summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ada/gcc-interface/cuintp.c2
-rw-r--r--gcc/alias.c10
-rw-r--r--gcc/builtins.c39
-rw-r--r--gcc/c-family/c-ada-spec.c2
-rw-r--r--gcc/c-family/c-common.c10
-rw-r--r--gcc/c-family/c-lex.c4
-rw-r--r--gcc/c-family/c-pretty-print.c2
-rw-r--r--gcc/cgraph.c2
-rw-r--r--gcc/combine.c9
-rw-r--r--gcc/config/bfin/bfin.c2
-rw-r--r--gcc/config/i386/i386.c6
-rw-r--r--gcc/config/rs6000/rs6000-c.c4
-rw-r--r--gcc/cp/decl.c9
-rw-r--r--gcc/cp/init.c26
-rw-r--r--gcc/cp/mangle.c4
-rw-r--r--gcc/cp/typeck2.c9
-rw-r--r--gcc/dbxout.c8
-rw-r--r--gcc/dojump.c2
-rw-r--r--gcc/double-int.h34
-rw-r--r--gcc/dwarf2out.c12
-rw-r--r--gcc/dwarf2out.h4
-rw-r--r--gcc/emit-rtl.c6
-rw-r--r--gcc/explow.c3
-rw-r--r--gcc/expmed.c43
-rw-r--r--gcc/expr.c55
-rw-r--r--gcc/fixed-value.c13
-rw-r--r--gcc/fold-const.c206
-rw-r--r--gcc/fortran/target-memory.c2
-rw-r--r--gcc/fortran/trans-array.c4
-rw-r--r--gcc/fortran/trans-const.c5
-rw-r--r--gcc/fortran/trans-expr.c2
-rw-r--r--gcc/fortran/trans-intrinsic.c12
-rw-r--r--gcc/fortran/trans-types.c4
-rw-r--r--gcc/gengtype-parse.c47
-rw-r--r--gcc/gengtype-state.c1
-rw-r--r--gcc/gengtype.c7
-rw-r--r--gcc/genmodes.c4
-rw-r--r--gcc/gimple-fold.c39
-rw-r--r--gcc/gimple-ssa-strength-reduction.c61
-rw-r--r--gcc/graphite-clast-to-gimple.c2
-rw-r--r--gcc/graphite-sese-to-poly.c4
-rw-r--r--gcc/ipa-prop.c3
-rw-r--r--gcc/java/boehm.c10
-rw-r--r--gcc/java/jcf-parse.c4
-rw-r--r--gcc/loop-doloop.c12
-rw-r--r--gcc/loop-unroll.c24
-rw-r--r--gcc/lto-streamer-in.c6
-rw-r--r--gcc/lto/lto.c2
-rw-r--r--gcc/machmode.def3
-rw-r--r--gcc/objc/objc-act.c2
-rw-r--r--gcc/optabs.c19
-rw-r--r--gcc/postreload.c2
-rw-r--r--gcc/predict.c24
-rw-r--r--gcc/real.c29
-rw-r--r--gcc/real.h11
-rw-r--r--gcc/rtl.h102
-rw-r--r--gcc/simplify-rtx.c103
-rw-r--r--gcc/stor-layout.c12
-rw-r--r--gcc/system.h4
-rw-r--r--gcc/tree-affine.c46
-rw-r--r--gcc/tree-chrec.c10
-rw-r--r--gcc/tree-data-ref.c2
-rw-r--r--gcc/tree-dfa.c34
-rw-r--r--gcc/tree-object-size.c4
-rw-r--r--gcc/tree-predcom.c12
-rw-r--r--gcc/tree-pretty-print.c8
-rw-r--r--gcc/tree-ssa-address.c22
-rw-r--r--gcc/tree-ssa-alias.c30
-rw-r--r--gcc/tree-ssa-ccp.c102
-rw-r--r--gcc/tree-ssa-forwprop.c5
-rw-r--r--gcc/tree-ssa-loop-ivcanon.c8
-rw-r--r--gcc/tree-ssa-loop-ivopts.c22
-rw-r--r--gcc/tree-ssa-loop-niter.c108
-rw-r--r--gcc/tree-ssa-phiopt.c2
-rw-r--r--gcc/tree-ssa-pre.c2
-rw-r--r--gcc/tree-ssa-sccvn.c20
-rw-r--r--gcc/tree-ssa-structalias.c6
-rw-r--r--gcc/tree-ssa.c6
-rw-r--r--gcc/tree-switch-conversion.c6
-rw-r--r--gcc/tree-vect-loop.c8
-rw-r--r--gcc/tree-vrp.c250
-rw-r--r--gcc/tree.c137
-rw-r--r--gcc/tree.h100
-rw-r--r--gcc/varasm.c4
-rw-r--r--gcc/wide-int-print.cc8
-rw-r--r--gcc/wide-int-print.h2
-rw-r--r--gcc/wide-int.cc2554
-rw-r--r--gcc/wide-int.h5230
88 files changed, 3777 insertions, 6063 deletions
diff --git a/gcc/ada/gcc-interface/cuintp.c b/gcc/ada/gcc-interface/cuintp.c
index f535c4bdb02..807a15132e2 100644
--- a/gcc/ada/gcc-interface/cuintp.c
+++ b/gcc/ada/gcc-interface/cuintp.c
@@ -177,7 +177,7 @@ UI_From_gnu (tree Input)
in a signed 64-bit integer. */
if (tree_fits_shwi_p (Input))
return UI_From_Int (tree_to_shwi (Input));
- else if (wide_int::lts_p (Input, 0) && TYPE_UNSIGNED (gnu_type))
+ else if (wi::lts_p (Input, 0) && TYPE_UNSIGNED (gnu_type))
return No_Uint;
#endif
diff --git a/gcc/alias.c b/gcc/alias.c
index c52a9864ae2..aa79248d471 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -340,8 +340,8 @@ ao_ref_from_mem (ao_ref *ref, const_rtx mem)
|| (DECL_P (ref->base)
&& (DECL_SIZE (ref->base) == NULL_TREE
|| TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST
- || wide_int::ltu_p (DECL_SIZE (ref->base),
- ref->offset + ref->size)))))
+ || wi::ltu_p (DECL_SIZE (ref->base),
+ ref->offset + ref->size)))))
return false;
return true;
@@ -2285,10 +2285,10 @@ adjust_offset_for_component_ref (tree x, bool *known_p,
}
woffset = xoffset;
- woffset += (addr_wide_int (DECL_FIELD_BIT_OFFSET (field))
- .udiv_trunc (BITS_PER_UNIT));
+ woffset += wi::udiv_trunc (addr_wide_int (DECL_FIELD_BIT_OFFSET (field)),
+ BITS_PER_UNIT);
- if (!woffset.fits_uhwi_p ())
+ if (!wi::fits_uhwi_p (woffset))
{
*known_p = false;
return;
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 4f8676714c0..718128160e9 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -5042,8 +5042,7 @@ expand_builtin_signbit (tree exp, rtx target)
if (bitpos < GET_MODE_BITSIZE (rmode))
{
- wide_int mask = wide_int::set_bit_in_zero (bitpos,
- GET_MODE_PRECISION (rmode));
+ wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (rmode));
if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode))
temp = gen_lowpart (rmode, temp);
@@ -8139,39 +8138,39 @@ fold_builtin_bitop (tree fndecl, tree arg)
if (TREE_CODE (arg) == INTEGER_CST && !TREE_OVERFLOW (arg))
{
wide_int warg = arg;
- wide_int result;
+ int result;
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_INT_FN (BUILT_IN_FFS):
- result = warg.ffs ();
+ result = wi::ffs (warg);
break;
CASE_INT_FN (BUILT_IN_CLZ):
- result = warg.clz ();
+ result = wi::clz (warg);
break;
CASE_INT_FN (BUILT_IN_CTZ):
- result = warg.ctz ();
+ result = wi::ctz (warg);
break;
CASE_INT_FN (BUILT_IN_CLRSB):
- result = warg.clrsb ();
+ result = wi::clrsb (warg);
break;
CASE_INT_FN (BUILT_IN_POPCOUNT):
- result = warg.popcount ();
+ result = wi::popcount (warg);
break;
CASE_INT_FN (BUILT_IN_PARITY):
- result = warg.parity ();
+ result = wi::parity (warg);
break;
default:
gcc_unreachable ();
}
- return wide_int_to_tree (TREE_TYPE (TREE_TYPE (fndecl)), result);
+ return build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), result);
}
return NULL_TREE;
@@ -8198,9 +8197,9 @@ fold_builtin_bswap (tree fndecl, tree arg)
{
signop sgn = TYPE_SIGN (type);
tree result =
- wide_int_to_tree (type,
- wide_int (arg)
- .force_to_size (TYPE_PRECISION (type), sgn).bswap ());
+ wide_int_to_tree (type,
+ wide_int::from (arg, TYPE_PRECISION (type),
+ sgn).bswap ());
return result;
}
default:
@@ -8797,12 +8796,12 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src,
TREE_OPERAND (dest_base, 0), 0))
return NULL_TREE;
off = mem_ref_offset (src_base) + src_offset;
- if (!off.fits_shwi_p ())
+ if (!wi::fits_shwi_p (off))
return NULL_TREE;
src_offset = off.to_shwi ();
off = mem_ref_offset (dest_base) + dest_offset;
- if (!off.fits_shwi_p ())
+ if (!wi::fits_shwi_p (off))
return NULL_TREE;
dest_offset = off.to_shwi ();
if (ranges_overlap_p (src_offset, maxsize,
@@ -12734,9 +12733,9 @@ fold_builtin_object_size (tree ptr, tree ost)
{
wide_int wbytes
- = wide_int::from_uhwi (compute_builtin_object_size (ptr, object_size_type),
- precision);
- if (wbytes.fits_to_tree_p (size_type_node))
+ = wi::uhwi (compute_builtin_object_size (ptr, object_size_type),
+ precision);
+ if (wi::fits_to_tree_p (wbytes, size_type_node))
return wide_int_to_tree (size_type_node, wbytes);
}
else if (TREE_CODE (ptr) == SSA_NAME)
@@ -12746,9 +12745,9 @@ fold_builtin_object_size (tree ptr, tree ost)
it. */
wide_int wbytes;
bytes = compute_builtin_object_size (ptr, object_size_type);
- wbytes = wide_int::from_uhwi (bytes, precision);
+ wbytes = wi::uhwi (bytes, precision);
if (bytes != (unsigned HOST_WIDE_INT) (object_size_type < 2 ? -1 : 0)
- && wbytes.fits_to_tree_p (size_type_node))
+ && wi::fits_to_tree_p (wbytes, size_type_node))
return wide_int_to_tree (size_type_node, wbytes);
}
diff --git a/gcc/c-family/c-ada-spec.c b/gcc/c-family/c-ada-spec.c
index e13c08bd040..89379dbe4c8 100644
--- a/gcc/c-family/c-ada-spec.c
+++ b/gcc/c-family/c-ada-spec.c
@@ -2197,7 +2197,7 @@ dump_generic_ada_node (pretty_printer *buffer, tree node, tree type,
{
wide_int val = node;
int i;
- if (val.neg_p ())
+ if (wi::neg_p (val))
{
pp_minus (buffer);
val = -val;
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 8bde6270e76..3d049ac8f52 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -4054,8 +4054,10 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr,
/* Convert primop1 to target type, but do not introduce
additional overflow. We know primop1 is an int_cst. */
primop1 = force_fit_type (*restype_ptr,
- wide_int (primop1).force_to_size (TYPE_PRECISION (*restype_ptr),
- TYPE_SIGN (TREE_TYPE (primop1))),
+ wide_int::from
+ (primop1,
+ TYPE_PRECISION (*restype_ptr),
+ TYPE_SIGN (TREE_TYPE (primop1))),
0, TREE_OVERFLOW (primop1));
}
if (type != *restype_ptr)
@@ -7925,8 +7927,8 @@ handle_alloc_size_attribute (tree *node, tree ARG_UNUSED (name), tree args,
wide_int p;
if (TREE_CODE (position) != INTEGER_CST
- || (p = wide_int (position)).ltu_p (1)
- || p.gtu_p (arg_count) )
+ || wi::ltu_p (p = wide_int (position), 1)
+ || wi::gtu_p (p, arg_count))
{
warning (OPT_Wattributes,
"alloc_size parameter outside range");
diff --git a/gcc/c-family/c-lex.c b/gcc/c-family/c-lex.c
index bbf7ee5c849..62f738d7bed 100644
--- a/gcc/c-family/c-lex.c
+++ b/gcc/c-family/c-lex.c
@@ -545,7 +545,7 @@ narrowest_unsigned_type (const wide_int &val, unsigned int flags)
continue;
upper = TYPE_MAX_VALUE (integer_types[itk]);
- if (wide_int::geu_p (upper, val))
+ if (wi::geu_p (upper, val))
return (enum integer_type_kind) itk;
}
@@ -573,7 +573,7 @@ narrowest_signed_type (const wide_int &val, unsigned int flags)
continue;
upper = TYPE_MAX_VALUE (integer_types[itk]);
- if (wide_int::geu_p (upper, val))
+ if (wi::geu_p (upper, val))
return (enum integer_type_kind) itk;
}
diff --git a/gcc/c-family/c-pretty-print.c b/gcc/c-family/c-pretty-print.c
index 315b46db886..6f0581dc2ff 100644
--- a/gcc/c-family/c-pretty-print.c
+++ b/gcc/c-family/c-pretty-print.c
@@ -915,7 +915,7 @@ pp_c_integer_constant (c_pretty_printer *pp, tree i)
{
wide_int wi = i;
- if (wi.lt_p (i, 0, TYPE_SIGN (TREE_TYPE (i))))
+ if (wi::lt_p (i, 0, TYPE_SIGN (TREE_TYPE (i))))
{
pp_minus (pp);
wi = -wi;
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index 1526788dae0..c3992a6cc4c 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -624,7 +624,7 @@ cgraph_add_thunk (struct cgraph_node *decl_node ATTRIBUTE_UNUSED,
node = cgraph_create_node (alias);
gcc_checking_assert (!virtual_offset
- || wide_int::eq_p (virtual_offset, virtual_value));
+ || wi::eq_p (virtual_offset, virtual_value));
node->thunk.fixed_offset = fixed_offset;
node->thunk.this_adjusting = this_adjusting;
node->thunk.virtual_value = virtual_value;
diff --git a/gcc/combine.c b/gcc/combine.c
index b06818a7832..a72b927fbaa 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -2674,10 +2674,11 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
wide_int o;
rtx inner = SET_SRC (PATTERN (i3));
rtx outer = SET_SRC (temp);
-
- o = (wide_int (std::make_pair (outer, GET_MODE (SET_DEST (temp))))
- .insert (std::make_pair (inner, GET_MODE (dest)),
- offset, width));
+
+ o = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp))),
+ std::make_pair (inner, GET_MODE (dest)),
+ offset, width);
+
combine_merges++;
subst_insn = i3;
subst_low_luid = DF_INSN_LUID (i2);
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index 356408c4776..92ce3b7489b 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -3285,7 +3285,7 @@ bfin_local_alignment (tree type, unsigned align)
memcpy can use 32 bit loads/stores. */
if (TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (!wide_int::gtu_p (TYPE_SIZE (type), 8))
+ && !wi::gtu_p (TYPE_SIZE (type), 8)
&& align < 32)
return 32;
return align;
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 74bda2e58e0..ee97486bca8 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -25555,7 +25555,7 @@ ix86_data_alignment (tree type, int align, bool opt)
&& AGGREGATE_TYPE_P (type)
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (wide_int::geu_p (TYPE_SIZE (type), max_align))
+ && wi::geu_p (TYPE_SIZE (type), max_align)
&& align < max_align)
align = max_align;
@@ -25566,7 +25566,7 @@ ix86_data_alignment (tree type, int align, bool opt)
if ((opt ? AGGREGATE_TYPE_P (type) : TREE_CODE (type) == ARRAY_TYPE)
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (wide_int::geu_p (TYPE_SIZE (type), 128))
+ && wi::geu_p (TYPE_SIZE (type), 128)
&& align < 128)
return 128;
}
@@ -25681,7 +25681,7 @@ ix86_local_alignment (tree exp, enum machine_mode mode,
!= TYPE_MAIN_VARIANT (va_list_type_node)))
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (wide_int::geu_p (TYPE_SIZE (type), 16))
+ && wi::geu_p (TYPE_SIZE (type), 16)
&& align < 128)
return 128;
}
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index ef3fc29f562..4cf85dbab36 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -4196,7 +4196,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_MEM_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && wide_int::ltu_p (arg2, 2))
+ && wi::ltu_p (arg2, 2))
{
tree call = NULL_TREE;
@@ -4281,7 +4281,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
&& tree_fits_uhwi_p (arg2)
- && wide_int::ltu_p (arg2, 2))
+ && wi::ltu_p (arg2, 2))
{
tree call = NULL_TREE;
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index a3636c55b6a..393c889a747 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -12804,12 +12804,11 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc)
{
tree type = TREE_TYPE (prev_value);
signop sgn = TYPE_SIGN (type);
- wide_int wi = (max_wide_int (prev_value)
- .add (1, sgn, &overflowed));
+ wide_int wi = wi::add (prev_value, 1, sgn, &overflowed);
if (!overflowed)
{
- bool pos = !wi.neg_p (sgn);
- if (!wi.fits_to_tree_p (type))
+ bool pos = !wi::neg_p (wi, sgn);
+ if (!wi::fits_to_tree_p (wi, type))
{
unsigned int itk;
for (itk = itk_int; itk != itk_none; itk++)
@@ -12817,7 +12816,7 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc)
type = integer_types[itk];
if (type != NULL_TREE
&& (pos || !TYPE_UNSIGNED (type))
- && wi.fits_to_tree_p (type))
+ && wi::fits_to_tree_p (wi, type))
break;
}
if (type && cxx_dialect < cxx11
diff --git a/gcc/cp/init.c b/gcc/cp/init.c
index b8fa248a182..5cb9acd6574 100644
--- a/gcc/cp/init.c
+++ b/gcc/cp/init.c
@@ -2271,8 +2271,9 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
if (TREE_CODE (inner_nelts_cst) == INTEGER_CST)
{
bool overflow;
- addr_wide_int result = (addr_wide_int (inner_nelts_cst)
- .mul (inner_nelts_count, SIGNED, &overflow));
+ addr_wide_int result = wi::mul (addr_wide_int (inner_nelts_cst),
+ inner_nelts_count, SIGNED,
+ &overflow);
if (overflow)
{
if (complain & tf_error)
@@ -2375,7 +2376,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
/* Maximum available size in bytes. Half of the address space
minus the cookie size. */
addr_wide_int max_size
- = addr_wide_int::set_bit_in_zero (TYPE_PRECISION (sizetype) - 1);
+ = wi::set_bit_in_zero <addr_wide_int> (TYPE_PRECISION (sizetype) - 1);
/* Maximum number of outer elements which can be allocated. */
addr_wide_int max_outer_nelts;
tree max_outer_nelts_tree;
@@ -2383,28 +2384,29 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
gcc_assert (TREE_CODE (size) == INTEGER_CST);
cookie_size = targetm.cxx.get_cookie_size (elt_type);
gcc_assert (TREE_CODE (cookie_size) == INTEGER_CST);
- gcc_checking_assert (addr_wide_int (cookie_size).ltu_p(max_size));
+ gcc_checking_assert (wi::ltu_p (cookie_size, max_size));
/* Unconditionally subtract the cookie size. This decreases the
maximum object size and is safe even if we choose not to use
a cookie after all. */
max_size -= cookie_size;
bool overflow;
- inner_size = addr_wide_int (size)
- .mul (inner_nelts_count, SIGNED, &overflow);
- if (overflow || inner_size.gtu_p (max_size))
+ inner_size = wi::mul (addr_wide_int (size), inner_nelts_count, SIGNED,
+ &overflow);
+ if (overflow || wi::gtu_p (inner_size, max_size))
{
if (complain & tf_error)
error ("size of array is too large");
return error_mark_node;
}
- max_outer_nelts = max_size.udiv_trunc (inner_size);
+ max_outer_nelts = wi::udiv_trunc (max_size, inner_size);
/* Only keep the top-most seven bits, to simplify encoding the
constant in the instruction stream. */
{
unsigned shift = (max_outer_nelts.get_precision ()) - 7
- - max_outer_nelts.clz ().to_shwi ();
- max_outer_nelts = max_outer_nelts.rshiftu (shift).lshift (shift);
+ - wi::clz (max_outer_nelts);
+ max_outer_nelts = wi::lshift (wi::lrshift (max_outer_nelts, shift),
+ shift);
}
max_outer_nelts_tree = wide_int_to_tree (sizetype, max_outer_nelts);
@@ -2480,7 +2482,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
cookie_size = NULL_TREE;
/* No size arithmetic necessary, so the size check is
not needed. */
- if (outer_nelts_check != NULL && inner_size.one_p ())
+ if (outer_nelts_check != NULL && inner_size == 1)
outer_nelts_check = NULL_TREE;
}
/* Perform the overflow check. */
@@ -2525,7 +2527,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
cookie_size = NULL_TREE;
/* No size arithmetic necessary, so the size check is
not needed. */
- if (outer_nelts_check != NULL && inner_size.one_p ())
+ if (outer_nelts_check != NULL && inner_size == 1)
outer_nelts_check = NULL_TREE;
}
diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c
index f9b399f7e94..e70cf776521 100644
--- a/gcc/cp/mangle.c
+++ b/gcc/cp/mangle.c
@@ -3226,8 +3226,8 @@ write_array_type (const tree type)
addr_wide_int wmax = addr_wide_int (max) + 1;
/* Truncate the result - this will mangle [0, SIZE_INT_MAX]
number of elements as zero. */
- wmax = wmax.zext (TYPE_PRECISION (TREE_TYPE (max)));
- gcc_assert (wmax.fits_uhwi_p ());
+ wmax = wi::zext (wmax, TYPE_PRECISION (TREE_TYPE (max)));
+ gcc_assert (wi::fits_uhwi_p (wmax));
write_unsigned_number (wmax.to_uhwi ());
}
else
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index 31cd45d9ffe..e02fe093e9e 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -1119,11 +1119,10 @@ process_init_constructor_array (tree type, tree init,
{
tree domain = TYPE_DOMAIN (type);
if (domain && TREE_CONSTANT (TYPE_MAX_VALUE (domain)))
- len = (addr_wide_int (TYPE_MAX_VALUE (domain))
- - TYPE_MIN_VALUE (domain) + 1)
- .ext (TYPE_PRECISION (TREE_TYPE (domain)),
- TYPE_SIGN (TREE_TYPE (domain)))
- .to_uhwi ();
+ len = wi::ext (addr_wide_int (TYPE_MAX_VALUE (domain))
+ - TYPE_MIN_VALUE (domain) + 1,
+ TYPE_PRECISION (TREE_TYPE (domain)),
+ TYPE_SIGN (TREE_TYPE (domain))).to_uhwi ();
else
unbounded = true; /* Take as many as there are. */
}
diff --git a/gcc/dbxout.c b/gcc/dbxout.c
index 2c6a59fbed8..b311e8dcd3d 100644
--- a/gcc/dbxout.c
+++ b/gcc/dbxout.c
@@ -701,7 +701,7 @@ stabstr_O (tree cst)
/* If the value is zero, the base indicator will serve as the value
all by itself. */
- if (wcst.zero_p ())
+ if (wcst == 0)
return;
/* GDB wants constants with no extra leading "1" bits, so
@@ -709,19 +709,19 @@ stabstr_O (tree cst)
present. */
if (res_pres == 1)
{
- digit = wcst.extract_to_hwi (prec - 1, 1) & 0x1;
+ digit = wi::extract_uhwi (wcst, prec - 1, 1);
stabstr_C ('0' + digit);
}
else if (res_pres == 2)
{
- digit = wcst.extract_to_hwi (prec - 2, 2) & 0x3;
+ digit = wi::extract_uhwi (wcst, prec - 2, 2);
stabstr_C ('0' + digit);
}
prec -= res_pres;
for (i = prec - 3; i <= 0; i = i - 3)
{
- digit = wcst.extract_to_hwi (i, 3) & 0x7;
+ digit = wi::extract_uhwi (wcst, i, 3);
stabstr_C ('0' + digit);
}
}
diff --git a/gcc/dojump.c b/gcc/dojump.c
index 6eedecdaac0..700a17c9653 100644
--- a/gcc/dojump.c
+++ b/gcc/dojump.c
@@ -142,7 +142,7 @@ static bool
prefer_and_bit_test (enum machine_mode mode, int bitnum)
{
bool speed_p;
- wide_int mask = wide_int::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode));
+ wide_int mask = wi::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode));
if (and_test == 0)
{
diff --git a/gcc/double-int.h b/gcc/double-int.h
index 650520ba052..50ca182b83c 100644
--- a/gcc/double-int.h
+++ b/gcc/double-int.h
@@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef DOUBLE_INT_H
#define DOUBLE_INT_H
+#include "wide-int.h"
+
/* A large integer is currently represented as a pair of HOST_WIDE_INTs.
It therefore represents a number with precision of
2 * HOST_BITS_PER_WIDE_INT bits (it is however possible that the
@@ -435,4 +437,36 @@ void mpz_set_double_int (mpz_t, double_int, bool);
double_int mpz_get_double_int (const_tree, mpz_t, bool);
#endif
+namespace wi
+{
+ template <>
+ struct int_traits <double_int>
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = true;
+ static const unsigned int precision = HOST_BITS_PER_DOUBLE_INT;
+ static unsigned int get_precision (const double_int &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const double_int &);
+ };
+}
+
+inline unsigned int
+wi::int_traits <double_int>::get_precision (const double_int &)
+{
+ return precision;
+}
+
+inline wi::storage_ref
+wi::int_traits <double_int>::decompose (HOST_WIDE_INT *scratch, unsigned int p,
+ const double_int &x)
+{
+ gcc_checking_assert (precision == p);
+ scratch[0] = x.low;
+ if ((x.high == 0 && scratch[0] >= 0) || (x.high == -1 && scratch[0] < 0))
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = x.high;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
#endif /* DOUBLE_INT_H */
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index 6c870dd4e07..5c564c31b6b 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -11841,8 +11841,8 @@ clz_loc_descriptor (rtx rtl, enum machine_mode mode,
<< (GET_MODE_BITSIZE (mode) - 1));
else
msb = immed_wide_int_const
- (wide_int::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1,
- GET_MODE_PRECISION (mode)), mode);
+ (wi::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1,
+ GET_MODE_PRECISION (mode)), mode);
if (GET_CODE (msb) == CONST_INT && INTVAL (msb) < 0)
tmp = new_loc_descr (HOST_BITS_PER_WIDE_INT == 32
? DW_OP_const4u : HOST_BITS_PER_WIDE_INT == 64
@@ -14671,7 +14671,7 @@ static inline addr_wide_int
round_up_to_align (addr_wide_int t, unsigned int align)
{
t += align - 1;
- t = t.udiv_trunc (align);
+ t = wi::udiv_trunc (t, align);
t *= align;
return t;
}
@@ -14791,7 +14791,7 @@ field_byte_offset (const_tree decl)
object_offset_in_bits
= round_up_to_align (object_offset_in_bits, type_align_in_bits);
- if (object_offset_in_bits.gtu_p (bitpos_int))
+ if (wi::gtu_p (object_offset_in_bits, bitpos_int))
{
object_offset_in_bits = deepest_bitpos - type_size_in_bits;
@@ -14805,7 +14805,7 @@ field_byte_offset (const_tree decl)
object_offset_in_bits = bitpos_int;
object_offset_in_bytes
- = object_offset_in_bits.udiv_trunc (BITS_PER_UNIT);
+ = wi::udiv_trunc (object_offset_in_bits, BITS_PER_UNIT);
return object_offset_in_bytes.to_shwi ();
}
@@ -16226,7 +16226,7 @@ add_bound_info (dw_die_ref subrange_die, enum dwarf_attribute bound_attr, tree b
zext_hwi (tree_to_hwi (bound), prec));
}
else if (prec == HOST_BITS_PER_WIDE_INT
- || (cst_fits_uhwi_p (bound) && wide_int (bound).ges_p (0)))
+ || (cst_fits_uhwi_p (bound) && wi::ges_p (bound, 0)))
add_AT_unsigned (subrange_die, bound_attr, tree_to_hwi (bound));
else
add_AT_wide (subrange_die, bound_attr, wide_int (bound));
diff --git a/gcc/dwarf2out.h b/gcc/dwarf2out.h
index d6af85befb3..78d8cc0a80c 100644
--- a/gcc/dwarf2out.h
+++ b/gcc/dwarf2out.h
@@ -30,7 +30,7 @@ typedef struct dw_val_struct *dw_val_ref;
typedef struct dw_cfi_struct *dw_cfi_ref;
typedef struct dw_loc_descr_struct *dw_loc_descr_ref;
typedef struct dw_loc_list_struct *dw_loc_list_ref;
-typedef struct wide_int *wide_int_ref;
+typedef wide_int *wide_int_ptr;
/* Call frames are described using a sequence of Call Frame
@@ -183,7 +183,7 @@ typedef struct GTY(()) dw_val_struct {
HOST_WIDE_INT GTY ((default)) val_int;
unsigned HOST_WIDE_INT GTY ((tag ("dw_val_class_unsigned_const"))) val_unsigned;
double_int GTY ((tag ("dw_val_class_const_double"))) val_double;
- wide_int_ref GTY ((tag ("dw_val_class_wide_int"))) val_wide;
+ wide_int_ptr GTY ((tag ("dw_val_class_wide_int"))) val_wide;
dw_vec_const GTY ((tag ("dw_val_class_vec"))) val_vec;
struct dw_val_die_union
{
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index 143f29882c3..d72ba98b3dd 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -579,6 +579,8 @@ immed_wide_int_const (const wide_int &v, enum machine_mode mode)
if (len < 2 || prec <= HOST_BITS_PER_WIDE_INT)
return gen_int_mode (v.elt (0), mode);
+ wide_int copy = v;
+ wi::clear_undef (copy, SIGNED);
#if TARGET_SUPPORTS_WIDE_INT
{
unsigned int i;
@@ -597,12 +599,12 @@ immed_wide_int_const (const wide_int &v, enum machine_mode mode)
CWI_PUT_NUM_ELEM (value, len);
for (i = 0; i < len; i++)
- CONST_WIDE_INT_ELT (value, i) = v.elt (i);
+ CONST_WIDE_INT_ELT (value, i) = copy.elt (i);
return lookup_const_wide_int (value);
}
#else
- return immed_double_const (v.elt (0), v.elt (1), mode);
+ return immed_double_const (copy.elt (0), copy.elt (1), mode);
#endif
}
diff --git a/gcc/explow.c b/gcc/explow.c
index aa3d971cae7..a893c07ff01 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -96,7 +96,8 @@ plus_constant (enum machine_mode mode, rtx x, HOST_WIDE_INT c)
switch (code)
{
CASE_CONST_SCALAR_INT:
- return immed_wide_int_const (wide_int (std::make_pair (x, mode)) + c, mode);
+ return immed_wide_int_const (wi::add (std::make_pair (x, mode), c),
+ mode);
case MEM:
/* If this is a reference to the constant pool, try replacing it with
a reference to a new constant. If the resulting address isn't
diff --git a/gcc/expmed.c b/gcc/expmed.c
index 6d69e4135b2..d42341c6536 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -71,8 +71,8 @@ static inline rtx
mask_rtx (enum machine_mode mode, int bitpos, int bitsize, bool complement)
{
return immed_wide_int_const
- (wide_int::shifted_mask (bitpos, bitsize, complement,
- GET_MODE_PRECISION (mode)), mode);
+ (wi::shifted_mask (bitpos, bitsize, complement,
+ GET_MODE_PRECISION (mode)), mode);
}
/* Test whether a value is zero of a power of two. */
@@ -1859,8 +1859,8 @@ static rtx
lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
{
return
- immed_wide_int_const (wide_int (std::make_pair (value, mode))
- .zext (bitsize).lshift (bitpos), mode);
+ immed_wide_int_const (wi::lshift (wi::zext (std::make_pair (value, mode),
+ bitsize), bitpos), mode);
}
/* Extract a bit field that is split across two words
@@ -3101,7 +3101,7 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
{
int p = GET_MODE_PRECISION (mode);
wide_int val = std::make_pair (scalar_op1, mode);
- int shift = val.exact_log2 ().to_shwi ();
+ int shift = wi::exact_log2 (val);
/* Perfect power of 2. */
is_neg = false;
if (shift > 0)
@@ -3119,7 +3119,7 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
/* Any positive number that fits in a word. */
coeff = CONST_WIDE_INT_ELT (scalar_op1, 0);
}
- else if (val.sign_mask () == 0)
+ else if (wi::sign_mask (val) == 0)
{
/* Any positive number that fits in a word. */
coeff = CONST_WIDE_INT_ELT (scalar_op1, 0);
@@ -3313,12 +3313,12 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
pow2 = n + lgup - precision;
/* mlow = 2^(N + lgup)/d */
- wide_int val = wide_int::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
- mlow = val.udiv_trunc (d);
+ wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
+ mlow = wi::udiv_trunc (val, d);
/* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
- val |= wide_int::set_bit_in_zero(pow2, HOST_BITS_PER_DOUBLE_INT);
- mhigh = val.udiv_trunc (d);
+ val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
+ mhigh = wi::udiv_trunc (val, d);
/* If precision == N, then mlow, mhigh exceed 2^N
(but they do not exceed 2^(N+1)). */
@@ -3326,13 +3326,15 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
/* Reduce to lowest terms. */
for (post_shift = lgup; post_shift > 0; post_shift--)
{
- unsigned HOST_WIDE_INT ml_lo = mlow.extract_to_hwi (1, HOST_BITS_PER_WIDE_INT);
- unsigned HOST_WIDE_INT mh_lo = mhigh.extract_to_hwi (1, HOST_BITS_PER_WIDE_INT);
+ unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
+ HOST_BITS_PER_WIDE_INT);
+ unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
+ HOST_BITS_PER_WIDE_INT);
if (ml_lo >= mh_lo)
break;
- mlow = wide_int::from_uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
- mhigh = wide_int::from_uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
+ mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
+ mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
}
*post_shift_ptr = post_shift;
@@ -3346,7 +3348,7 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
else
{
*multiplier_ptr = mhigh.to_uhwi ();
- return mhigh.extract_to_hwi (HOST_BITS_PER_WIDE_INT, 1);
+ return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
}
}
@@ -3675,8 +3677,8 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
modulus. By including the signbit in the operation, many targets
can avoid an explicit compare operation in the following comparison
against zero. */
- mask = wide_int::mask (logd, false, GET_MODE_PRECISION (mode));
- mask = mask.set_bit (prec - 1);
+ mask = wi::mask (logd, false, GET_MODE_PRECISION (mode));
+ mask = wi::set_bit (mask, prec - 1);
temp = expand_binop (mode, and_optab, op0,
immed_wide_int_const (mask, mode),
@@ -3690,7 +3692,7 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
- mask = wide_int::mask (logd, true, GET_MODE_PRECISION (mode));
+ mask = wi::mask (logd, true, GET_MODE_PRECISION (mode));
temp = expand_binop (mode, ior_optab, temp,
immed_wide_int_const (mask, mode),
result, 1, OPTAB_LIB_WIDEN);
@@ -4937,8 +4939,9 @@ make_tree (tree type, rtx x)
case CONST_DOUBLE:
if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
- t = wide_int_to_tree (type, wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
- HOST_BITS_PER_WIDE_INT * 2));
+ t = wide_int_to_tree (type,
+ wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
+ HOST_BITS_PER_WIDE_INT * 2));
else
{
REAL_VALUE_TYPE d;
diff --git a/gcc/expr.c b/gcc/expr.c
index d212dc22c60..1d154ec6281 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -719,7 +719,8 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns
not much to do with respect to canonization. */
if (oldmode != VOIDmode
&& GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (oldmode))
- w = w.ext (GET_MODE_PRECISION (oldmode), unsignedp ? UNSIGNED : SIGNED);
+ w = wi::ext (w, GET_MODE_PRECISION (oldmode),
+ unsignedp ? UNSIGNED : SIGNED);
return immed_wide_int_const (w, mode);
}
@@ -6684,8 +6685,8 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
if (!integer_zerop (off))
{
addr_wide_int boff, coff = mem_ref_offset (exp);
- boff = coff.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ boff = wi::lshift (coff, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
bit_offset += boff;
}
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -6709,10 +6710,12 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
this conversion. */
if (TREE_CODE (offset) == INTEGER_CST)
{
- addr_wide_int tem = addr_wide_int (offset).sext (TYPE_PRECISION (sizetype));
- tem = tem.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
+ addr_wide_int tem = wi::sext (addr_wide_int (offset),
+ TYPE_PRECISION (sizetype));
+ tem = wi::lshift (tem, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
tem += bit_offset;
- if (tem.fits_shwi_p ())
+ if (wi::fits_shwi_p (tem))
{
*pbitpos = tem.to_shwi ();
*poffset = offset = NULL_TREE;
@@ -6723,18 +6726,18 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
if (offset)
{
/* Avoid returning a negative bitpos as this may wreak havoc later. */
- if (bit_offset.neg_p ())
+ if (wi::neg_p (bit_offset))
{
addr_wide_int mask
- = addr_wide_int::mask (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- false);
+ = wi::mask <addr_wide_int> (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ false);
addr_wide_int tem = bit_offset.and_not (mask);
/* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
bit_offset -= tem;
- tem = tem.rshifts (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ tem = wi::arshift (tem, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
offset = size_binop (PLUS_EXPR, offset,
wide_int_to_tree (sizetype, tem));
}
@@ -8205,13 +8208,13 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
op1 = expand_expr (treeop1, subtarget, VOIDmode,
EXPAND_SUM);
- /* Use wide_int::from_shwi to ensure that the constant is
+ /* Use wi::shwi to ensure that the constant is
truncated according to the mode of OP1, then sign extended
to a HOST_WIDE_INT. Using the constant directly can result
in non-canonical RTL in a 64x32 cross compile. */
wc = tree_to_hwi (treeop0);
- constant_part
- = immed_wide_int_const (wide_int::from_shwi (wc, wmode), wmode);
+ constant_part =
+ immed_wide_int_const (wi::shwi (wc, wmode), wmode);
op1 = plus_constant (mode, op1, INTVAL (constant_part));
if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
op1 = force_operand (op1, target);
@@ -8239,13 +8242,13 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
return simplify_gen_binary (PLUS, mode, op0, op1);
goto binop2;
}
- /* Use wide_int::from_shwi to ensure that the constant is
+ /* Use wi::shwi to ensure that the constant is
truncated according to the mode of OP1, then sign extended
to a HOST_WIDE_INT. Using the constant directly can result
in non-canonical RTL in a 64x32 cross compile. */
wc = tree_to_hwi (treeop1);
- constant_part
- = immed_wide_int_const (wide_int::from_shwi (wc, wmode), wmode);
+ constant_part
+ = immed_wide_int_const (wi::shwi (wc, wmode), wmode);
op0 = plus_constant (mode, op0, INTVAL (constant_part));
if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
op0 = force_operand (op0, target);
@@ -8774,8 +8777,8 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
instead. */
if (reduce_bit_field && TYPE_UNSIGNED (type))
{
- wide_int mask = wide_int::mask (TYPE_PRECISION (type),
- false, GET_MODE_PRECISION (mode));
+ wide_int mask = wi::mask (TYPE_PRECISION (type),
+ false, GET_MODE_PRECISION (mode));
temp = expand_binop (mode, xor_optab, op0,
immed_wide_int_const (mask, mode),
@@ -9419,9 +9422,10 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
should always be the same as TYPE_PRECISION (type).
However, it is not. Since we are converting from tree to
rtl, we have to expose this ugly truth here. */
- temp = immed_wide_int_const (wide_int (exp)
- .force_to_size (GET_MODE_PRECISION (TYPE_MODE (type)),
- TYPE_SIGN (type)),
+ temp = immed_wide_int_const (wide_int::from
+ (exp,
+ GET_MODE_PRECISION (TYPE_MODE (type)),
+ TYPE_SIGN (type)),
TYPE_MODE (type));
return temp;
}
@@ -10516,7 +10520,7 @@ reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
{
enum machine_mode mode = GET_MODE (exp);
rtx mask = immed_wide_int_const
- (wide_int::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
+ (wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
return expand_and (mode, exp, mask, target);
}
else
@@ -11091,8 +11095,7 @@ const_vector_from_tree (tree exp)
inner);
else
RTVEC_ELT (v, i)
- = immed_wide_int_const (wide_int (elt),
- TYPE_MODE (TREE_TYPE (elt)));
+ = immed_wide_int_const (elt, TYPE_MODE (TREE_TYPE (elt)));
}
return gen_rtx_CONST_VECTOR (mode, v);
diff --git a/gcc/fixed-value.c b/gcc/fixed-value.c
index 65a5deb454d..98689e91dad 100644
--- a/gcc/fixed-value.c
+++ b/gcc/fixed-value.c
@@ -157,11 +157,12 @@ fixed_to_decimal (char *str, const FIXED_VALUE_TYPE *f_orig,
{
REAL_VALUE_TYPE real_value, base_value, fixed_value;
+ signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode) ? UNSIGNED : SIGNED;
real_2expN (&base_value, GET_MODE_FBIT (f_orig->mode), f_orig->mode);
real_from_integer (&real_value, VOIDmode,
- wide_int::from_double_int (f_orig->data,
- GET_MODE_PRECISION (f_orig->mode)),
- UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode) ? UNSIGNED : SIGNED);
+ wide_int::from (f_orig->data,
+ GET_MODE_PRECISION (f_orig->mode), sgn),
+ sgn);
real_arithmetic (&fixed_value, RDIV_EXPR, &real_value, &base_value);
real_to_decimal (str, &fixed_value, buf_size, 0, 1);
}
@@ -1102,11 +1103,11 @@ real_convert_from_fixed (REAL_VALUE_TYPE *r, enum machine_mode mode,
{
REAL_VALUE_TYPE base_value, fixed_value, real_value;
+ signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f->mode) ? UNSIGNED : SIGNED;
real_2expN (&base_value, GET_MODE_FBIT (f->mode), f->mode);
real_from_integer (&fixed_value, VOIDmode,
- wide_int::from_double_int (f->data,
- GET_MODE_PRECISION (f->mode)),
- UNSIGNED_FIXED_POINT_MODE_P (f->mode) ? UNSIGNED : SIGNED);
+ wide_int::from (f->data, GET_MODE_PRECISION (f->mode),
+ sgn), sgn);
real_arithmetic (&real_value, RDIV_EXPR, &fixed_value, &base_value);
real_convert (r, mode, &real_value);
}
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index e15a3cf2e5d..e87a31cd8ec 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -170,7 +170,7 @@ protected_set_expr_location_unshare (tree x, location_t loc)
tree
div_if_zero_remainder (const_tree arg1, const_tree arg2)
{
- wide_int quo, rem;
+ wide_int quo;
wide_int warg1 = arg1;
wide_int warg2 = arg2;
signop sgn = TYPE_SIGN (TREE_TYPE (arg1));
@@ -183,20 +183,18 @@ div_if_zero_remainder (const_tree arg1, const_tree arg2)
precision by 1 bit, iff the top bit is set. */
if (sgn == UNSIGNED)
{
- if (warg1.neg_p ())
- warg1 = warg1.force_to_size (warg1.get_precision () + 1, sgn);
+ if (wi::neg_p (warg1))
+ warg1 = wide_int::from (warg1, warg1.get_precision () + 1, sgn);
sgn = SIGNED;
}
else
{
- if (warg2.neg_p ())
- warg2 = warg2.force_to_size (warg2.get_precision () + 1, sgn2);
+ if (wi::neg_p (warg2))
+ warg2 = wide_int::from (warg2, warg2.get_precision () + 1, sgn2);
}
}
- quo = warg1.divmod_trunc (warg2, &rem, sgn);
-
- if (rem.zero_p ())
+ if (wi::multiple_of_p (warg1, warg2, sgn, &quo))
return wide_int_to_tree (TREE_TYPE (arg1), quo);
return NULL_TREE;
@@ -379,7 +377,7 @@ may_negate_without_overflow_p (const_tree t)
if (TYPE_UNSIGNED (type))
return false;
- return !wide_int (t).only_sign_bit_p ();
+ return !wi::only_sign_bit_p (t);
}
/* Determine whether an expression T can be cheaply negated using
@@ -523,7 +521,7 @@ negate_expr_p (tree t)
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (wide_int::eq_p (op1, TYPE_PRECISION (type) - 1))
+ if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
return true;
}
break;
@@ -738,7 +736,7 @@ fold_negate_expr (location_t loc, tree t)
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (wide_int::eq_p (op1, TYPE_PRECISION (type) - 1))
+ if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
{
tree ntype = TYPE_UNSIGNED (type)
? signed_type_for (type)
@@ -978,7 +976,8 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
bool overflow = false;
op1 = arg1;
- arg2 = wide_int (parg2).force_to_size (TYPE_PRECISION (type), TYPE_SIGN (TREE_TYPE (parg2)));
+ arg2 = wide_int::from (parg2, TYPE_PRECISION (type),
+ TYPE_SIGN (TREE_TYPE (parg2)));
switch (code)
{
@@ -996,7 +995,7 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
case RSHIFT_EXPR:
case LSHIFT_EXPR:
- if (arg2.neg_p ())
+ if (wi::neg_p (arg2))
{
arg2 = -arg2;
if (code == RSHIFT_EXPR)
@@ -1009,14 +1008,14 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- res = op1.rshift (arg2, sign, GET_MODE_BITSIZE (TYPE_MODE (type)));
+ res = wi::rshift (op1, arg2, sign, GET_MODE_BITSIZE (TYPE_MODE (type)));
else
- res = op1.lshift (arg2, GET_MODE_BITSIZE (TYPE_MODE (type)));
+ res = wi::lshift (op1, arg2, GET_MODE_BITSIZE (TYPE_MODE (type)));
break;
case RROTATE_EXPR:
case LROTATE_EXPR:
- if (arg2.neg_p ())
+ if (wi::neg_p (arg2))
{
arg2 = -arg2;
if (code == RROTATE_EXPR)
@@ -1026,82 +1025,82 @@ int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
}
if (code == RROTATE_EXPR)
- res = op1.rrotate (arg2);
+ res = wi::rrotate (op1, arg2);
else
- res = op1.lrotate (arg2);
+ res = wi::lrotate (op1, arg2);
break;
case PLUS_EXPR:
- res = op1.add (arg2, sign, &overflow);
+ res = wi::add (op1, arg2, sign, &overflow);
break;
case MINUS_EXPR:
- res = op1.sub (arg2, sign, &overflow);
+ res = wi::sub (op1, arg2, sign, &overflow);
break;
case MULT_EXPR:
- res = op1.mul (arg2, sign, &overflow);
+ res = wi::mul (op1, arg2, sign, &overflow);
break;
case MULT_HIGHPART_EXPR:
- res = op1.mul_high (arg2, sign);
+ res = wi::mul_high (op1, arg2, sign);
break;
case TRUNC_DIV_EXPR:
case EXACT_DIV_EXPR:
- res = op1.div_trunc (arg2, sign, &overflow);
+ res = wi::div_trunc (op1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case FLOOR_DIV_EXPR:
- res = op1.div_floor (arg2, sign, &overflow);
+ res = wi::div_floor (op1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case CEIL_DIV_EXPR:
- res = op1.div_ceil (arg2, sign, &overflow);
+ res = wi::div_ceil (op1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case ROUND_DIV_EXPR:
- res = op1.div_round (arg2, sign, &overflow);
+ res = wi::div_round (op1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case TRUNC_MOD_EXPR:
- res = op1.mod_trunc (arg2, sign, &overflow);
+ res = wi::mod_trunc (op1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case FLOOR_MOD_EXPR:
- res = op1.mod_floor (arg2, sign, &overflow);
+ res = wi::mod_floor (op1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case CEIL_MOD_EXPR:
- res = op1.mod_ceil (arg2, sign, &overflow);
+ res = wi::mod_ceil (op1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case ROUND_MOD_EXPR:
- res = op1.mod_round (arg2, sign, &overflow);
+ res = wi::mod_round (op1, arg2, sign, &overflow);
if (overflow)
return NULL_TREE;
break;
case MIN_EXPR:
- res = op1.min (arg2, sign);
+ res = wi::min (op1, arg2, sign);
break;
case MAX_EXPR:
- res = op1.max (arg2, sign);
+ res = wi::max (op1, arg2, sign);
break;
default:
@@ -1579,18 +1578,12 @@ size_diffop_loc (location_t loc, tree arg0, tree arg1)
static tree
fold_convert_const_int_from_int (tree type, const_tree arg1)
{
- tree t;
- /* Extend the value coming in to something so large that there is no
- edge conditions on conversion of unsigned to signed numbers. */
- max_wide_int wt = arg1;
-
/* Given an integer constant, make new constant with new type,
- appropriately sign-extended or truncated. */
- t = force_fit_type (type, wt,
- !POINTER_TYPE_P (TREE_TYPE (arg1)),
- TREE_OVERFLOW (arg1));
-
- return t;
+ appropriately sign-extended or truncated. Use max_wide_int
+ so that any extension is done according ARG1's type. */
+ return force_fit_type (type, max_wide_int (arg1),
+ !POINTER_TYPE_P (TREE_TYPE (arg1)),
+ TREE_OVERFLOW (arg1));
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
@@ -1706,9 +1699,7 @@ fold_convert_const_int_from_fixed (tree type, const_tree arg1)
/* Given a fixed-point constant, make new constant with new type,
appropriately sign-extended or truncated. */
- t = force_fit_type (type, wide_int::from_double_int (temp,
- HOST_BITS_PER_DOUBLE_INT),
- -1,
+ t = force_fit_type (type, temp, -1,
(temp.is_negative ()
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
@@ -3732,7 +3723,7 @@ all_ones_mask_p (const_tree mask, unsigned int size)
if (size > precision || TYPE_SIGN (type) == UNSIGNED)
return false;
- return wide_int::mask (size, false, precision) == mask;
+ return wi::mask (size, false, precision) == mask;
}
/* Subroutine for fold: determine if VAL is the INTEGER_CONST that
@@ -3758,7 +3749,7 @@ sign_bit_p (tree exp, const_tree val)
return NULL_TREE;
width = TYPE_PRECISION (t);
- if (wide_int (val).only_sign_bit_p (width))
+ if (wi::only_sign_bit_p (val, width))
return exp;
/* Handle extension from a narrower type. */
@@ -4314,7 +4305,7 @@ build_range_check (location_t loc, tree type, tree exp, int in_p,
if (integer_onep (low) && TREE_CODE (high) == INTEGER_CST)
{
int prec = TYPE_PRECISION (etype);
- wide_int osb = wide_int::set_bit_in_zero (prec - 1, prec) - 1;
+ wide_int osb = wi::set_bit_in_zero (prec - 1, prec) - 1;
if (osb == high)
{
@@ -5047,7 +5038,7 @@ unextend (tree c, int p, int unsignedp, tree mask)
/* We work by getting just the sign bit into the low-order bit, then
into the high-order bit, then sign-extend. We then XOR that value
with C. */
- temp = wide_int_to_tree (TREE_TYPE (c), (wide_int (c).rshiftu (p - 1)) & 1);
+ temp = build_int_cst (TREE_TYPE (c), wi::extract_uhwi (c, p - 1, 1));
/* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that
@@ -5853,7 +5844,7 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
&& (tcode == RSHIFT_EXPR || TYPE_UNSIGNED (TREE_TYPE (op0)))
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- && wide_int::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
+ && wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
&& 0 != (t1 = fold_convert (ctype,
const_binop (LSHIFT_EXPR,
size_one_node,
@@ -6002,9 +5993,9 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
wide_int mul;
bool overflow_p;
signop sign = TYPE_SIGN (ctype);
- mul = wide_int (op1).mul_full (wide_int (c), sign);
+ mul = wi::mul_full (op1, c, sign);
overflow_p = TREE_OVERFLOW (c) | TREE_OVERFLOW (op1);
- if (!mul.fits_to_tree_p (ctype)
+ if (!wi::fits_to_tree_p (mul, ctype)
&& ((sign == UNSIGNED && tcode != MULT_EXPR) || sign == SIGNED))
overflow_p = true;
if (!overflow_p)
@@ -6415,7 +6406,7 @@ fold_div_compare (location_t loc,
/* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, arg01, arg1); */
- val = wide_int (arg01).mul (arg1, sign, &overflow);
+ val = wi::mul (arg01, arg1, sign, &overflow);
prod = force_fit_type (TREE_TYPE (arg00), val, -1, overflow);
neg_overflow = false;
@@ -6426,7 +6417,7 @@ fold_div_compare (location_t loc,
lo = prod;
/* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp). */
- val = wide_int (prod).add (tmp, sign, &overflow);
+ val = wi::add (prod, tmp, sign, &overflow);
hi = force_fit_type (TREE_TYPE (arg00), val,
-1, overflow | TREE_OVERFLOW (prod));
}
@@ -6619,7 +6610,8 @@ fold_single_bit_test (location_t loc, enum tree_code code,
not overflow, adjust BITNUM and INNER. */
if (TREE_CODE (inner) == RSHIFT_EXPR
&& TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
- && (max_wide_int (TREE_OPERAND (inner, 1)) + bitnum).ltu_p (TYPE_PRECISION (type)))
+ && wi::ltu_p (wi::add (TREE_OPERAND (inner, 1), bitnum),
+ TYPE_PRECISION (type)))
{
bitnum += tree_to_hwi (TREE_OPERAND (inner, 1));
inner = TREE_OPERAND (inner, 0);
@@ -6879,8 +6871,7 @@ fold_sign_changed_comparison (location_t loc, enum tree_code code, tree type,
return NULL_TREE;
if (TREE_CODE (arg1) == INTEGER_CST)
- arg1 = force_fit_type (inner_type, wide_int (arg1),
- 0, TREE_OVERFLOW (arg1));
+ arg1 = force_fit_type (inner_type, arg1, 0, TREE_OVERFLOW (arg1));
else
arg1 = fold_convert_loc (loc, inner_type, arg1);
@@ -7197,7 +7188,7 @@ fold_plusminus_mult_expr (location_t loc, enum tree_code code, tree type,
/* As we canonicalize A - 2 to A + -2 get rid of that sign for
the purpose of this canonicalization. */
if (TYPE_SIGN (TREE_TYPE (arg1)) == SIGNED
- && wide_int (arg1).neg_p ()
+ && wi::neg_p (arg1)
&& negate_expr_p (arg1)
&& code == PLUS_EXPR)
{
@@ -7287,7 +7278,6 @@ native_encode_int (const_tree expr, unsigned char *ptr, int len)
int total_bytes = GET_MODE_SIZE (TYPE_MODE (type));
int byte, offset, word, words;
unsigned char value;
- wide_int wexpr = wide_int (expr);
if (total_bytes > len)
return 0;
@@ -7296,7 +7286,7 @@ native_encode_int (const_tree expr, unsigned char *ptr, int len)
for (byte = 0; byte < total_bytes; byte++)
{
int bitpos = byte * BITS_PER_UNIT;
- value = wexpr.extract_to_hwi (bitpos, BITS_PER_UNIT);
+ value = wi::extract_uhwi (expr, bitpos, BITS_PER_UNIT);
if (total_bytes > UNITS_PER_WORD)
{
@@ -7524,7 +7514,7 @@ native_interpret_int (tree type, const unsigned char *ptr, int len)
|| total_bytes * BITS_PER_UNIT > HOST_BITS_PER_DOUBLE_INT)
return NULL_TREE;
- result = wide_int::from_buffer (ptr, total_bytes);
+ result = wi::from_buffer (ptr, total_bytes);
return wide_int_to_tree (type, result);
}
@@ -8091,10 +8081,9 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
}
if (change)
{
- tem = force_fit_type (type, max_wide_int (and1),
- 0, TREE_OVERFLOW (and1));
+ tem = force_fit_type (type, and1, 0, TREE_OVERFLOW (and1));
return fold_build2_loc (loc, BIT_AND_EXPR, type,
- fold_convert_loc (loc, type, and0), tem);
+ fold_convert_loc (loc, type, and0), tem);
}
}
@@ -8882,20 +8871,21 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos)
if (bitpos < 0)
return true;
+ int precision = TYPE_PRECISION (TREE_TYPE (base));
if (offset == NULL_TREE)
- wi_offset = wide_int::zero (TYPE_PRECISION (TREE_TYPE (base)));
+ wi_offset = wi::zero (precision);
else if (TREE_CODE (offset) != INTEGER_CST || TREE_OVERFLOW (offset))
return true;
else
wi_offset = offset;
bool overflow;
- wide_int units = wide_int::from_shwi (bitpos / BITS_PER_UNIT);
- total = wi_offset.add (units, UNSIGNED, &overflow);
+ wide_int units = wi::shwi (bitpos / BITS_PER_UNIT, precision);
+ total = wi::add (wi_offset, units, UNSIGNED, &overflow);
if (overflow)
return true;
- if (!total.fits_uhwi_p ())
+ if (!wi::fits_uhwi_p (total))
return true;
HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (TREE_TYPE (base)));
@@ -10443,7 +10433,7 @@ fold_binary_loc (location_t loc,
code11 = TREE_CODE (tree11);
if (code01 == INTEGER_CST
&& code11 == INTEGER_CST
- && ((max_wide_int (tree01) + tree11)
+ && (wi::add (tree01, tree11)
== element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
{
tem = build2_loc (loc, LROTATE_EXPR,
@@ -11235,10 +11225,10 @@ fold_binary_loc (location_t loc,
return omit_one_operand_loc (loc, type, arg1,
TREE_OPERAND (arg0, 0));
- msk = wide_int::mask (width, false, TYPE_PRECISION (TREE_TYPE (arg1)));
+ msk = wi::mask (width, false, TYPE_PRECISION (TREE_TYPE (arg1)));
/* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
- if (msk.and_not (c1 | c2).zero_p ())
+ if (msk.and_not (c1 | c2) == 0)
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
TREE_OPERAND (arg0, 0), arg1);
@@ -11250,8 +11240,9 @@ fold_binary_loc (location_t loc,
c3 = c1.and_not (c2);
for (w = BITS_PER_UNIT; w <= width; w <<= 1)
{
- wide_int mask = wide_int::mask (width - w, false, TYPE_PRECISION (type));
- if (((c1 | c2) & mask) == mask && c1.and_not (mask).zero_p ())
+ wide_int mask = wi::mask (width - w, false,
+ TYPE_PRECISION (type));
+ if (((c1 | c2) & mask) == mask && c1.and_not (mask) == 0)
{
c3 = mask;
break;
@@ -11646,14 +11637,13 @@ fold_binary_loc (location_t loc,
&& TREE_CODE (arg0) == MULT_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- int arg1tz
- = wide_int (TREE_OPERAND (arg0, 1)).ctz ().to_shwi ();
+ int arg1tz = wi::ctz (TREE_OPERAND (arg0, 1));
if (arg1tz > 0)
{
wide_int arg1mask, masked;
- arg1mask = wide_int::mask (arg1tz, true, TYPE_PRECISION (type));
- masked = arg1mask & wide_int (arg1);
- if (masked.zero_p ())
+ arg1mask = wi::mask (arg1tz, true, TYPE_PRECISION (type));
+ masked = arg1mask & arg1;
+ if (masked == 0)
return omit_two_operands_loc (loc, type, build_zero_cst (type),
arg0, arg1);
else if (masked != arg1)
@@ -11695,7 +11685,7 @@ fold_binary_loc (location_t loc,
which = 1;
}
- if ((wide_int::max_value (TREE_TYPE (arg0)) & cst1) != cst1)
+ if ((wi::max_value (TREE_TYPE (arg0)) & cst1) != cst1)
which = -1;
for (; which >= 0; which--)
@@ -11726,7 +11716,7 @@ fold_binary_loc (location_t loc,
omitted (assumed 0). */
if ((TREE_CODE (arg0) == PLUS_EXPR
|| (TREE_CODE (arg0) == MINUS_EXPR && which == 0))
- && (wide_int (pmop[which]) & cst1) == 0)
+ && (cst1 & pmop[which]) == 0)
pmop[which] = NULL;
break;
default:
@@ -11788,8 +11778,8 @@ fold_binary_loc (location_t loc,
wide_int mask;
prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
- mask = wide_int (arg1).zforce_to_size (prec);
- if (mask.minus_one_p ())
+ mask = wide_int::from (arg1, prec, UNSIGNED);
+ if (mask == -1)
return
fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
}
@@ -12193,9 +12183,8 @@ fold_binary_loc (location_t loc,
tree sum = fold_binary_loc (loc, PLUS_EXPR, TREE_TYPE (arg1),
arg1, TREE_OPERAND (arg0, 1));
if (sum && integer_zerop (sum)) {
- tree pow2
- = wide_int_to_tree (integer_type_node,
- wide_int (arg1).exact_log2 ());
+ tree pow2 = build_int_cst (integer_type_node,
+ wi::exact_log2 (arg1));
return fold_build2_loc (loc, RSHIFT_EXPR, type,
TREE_OPERAND (arg0, 0), pow2);
}
@@ -12215,9 +12204,8 @@ fold_binary_loc (location_t loc,
if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0)
{
tree sh_cnt = TREE_OPERAND (arg1, 1);
- tree pow2
- = wide_int_to_tree (TREE_TYPE (sh_cnt),
- wide_int (sval).exact_log2 ());
+ tree pow2 = build_int_cst (TREE_TYPE (sh_cnt),
+ wi::exact_log2 (sval));
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not "
@@ -12250,7 +12238,7 @@ fold_binary_loc (location_t loc,
/* X / -1 is -X. */
if (!TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
- && wide_int (arg1).minus_one_p ())
+ && wi::eq_p (arg1, -1))
return fold_convert_loc (loc, type, negate_expr (arg0));
/* Convert -A / -B to A / B when the type is signed and overflow is
@@ -12332,7 +12320,7 @@ fold_binary_loc (location_t loc,
/* X % -1 is zero. */
if (!TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
- && wide_int (arg1).minus_one_p ())
+ && wi::eq_p (arg1, -1))
return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
/* X % -C is the same as X % C. */
@@ -12340,7 +12328,7 @@ fold_binary_loc (location_t loc,
&& TYPE_SIGN (type) == SIGNED
&& TREE_CODE (arg1) == INTEGER_CST
&& !TREE_OVERFLOW (arg1)
- && wide_int (arg1).neg_p ()
+ && wi::neg_p (arg1)
&& !TYPE_OVERFLOW_TRAPS (type)
/* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
&& !sign_bit_p (arg1, arg1))
@@ -12513,8 +12501,8 @@ fold_binary_loc (location_t loc,
if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (arg0) == RROTATE_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
- && (wide_int (arg1) + TREE_OPERAND (arg0, 1))
- .umod_trunc (prec).zero_p ())
+ && wi::umod_trunc (wi::add (arg1, TREE_OPERAND (arg0, 1)),
+ prec) == 0)
return TREE_OPERAND (arg0, 0);
/* Fold (X & C2) << C1 into (X << C1) & (C2 << C1)
@@ -12836,7 +12824,7 @@ fold_binary_loc (location_t loc,
&& operand_equal_p (tree_strip_nop_conversions (TREE_OPERAND (arg0,
1)),
arg1, 0)
- && (wide_int (TREE_OPERAND (arg0, 0)) & 1) == 1)
+ && wi::bit_and (TREE_OPERAND (arg0, 0), 1) == 1)
{
return omit_two_operands_loc (loc, type,
code == NE_EXPR
@@ -12927,7 +12915,7 @@ fold_binary_loc (location_t loc,
prec = TYPE_PRECISION (itype);
/* Check for a valid shift count. */
- if (wide_int::ltu_p (arg001, prec))
+ if (wi::ltu_p (arg001, prec))
{
tree arg01 = TREE_OPERAND (arg0, 1);
tree arg000 = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
@@ -13052,7 +13040,7 @@ fold_binary_loc (location_t loc,
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
tree itype = TREE_TYPE (arg00);
- if (wide_int::eq_p (arg01, TYPE_PRECISION (itype) - 1))
+ if (wi::eq_p (arg01, TYPE_PRECISION (itype) - 1))
{
if (TYPE_UNSIGNED (itype))
{
@@ -13459,9 +13447,9 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (arg1) == INTEGER_CST
&& (INTEGRAL_TYPE_P (arg1_type) || POINTER_TYPE_P (arg1_type)))
{
- wide_int max = wide_int::max_value (arg1_type);
- wide_int signed_max = wide_int::max_value (prec, SIGNED);
- wide_int min = wide_int::min_value (arg1_type);
+ wide_int max = wi::max_value (arg1_type);
+ wide_int signed_max = wi::max_value (prec, SIGNED);
+ wide_int min = wi::min_value (arg1_type);
wide_int wi_arg1 = arg1;
if (wi_arg1 == max)
@@ -14075,7 +14063,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
if (outer_width > TYPE_PRECISION (type))
outer_width = TYPE_PRECISION (type);
- mask = wide_int::shifted_mask
+ mask = wi::shifted_mask
(inner_width, outer_width - inner_width, false,
TYPE_PRECISION (TREE_TYPE (arg1)));
@@ -14084,7 +14072,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
tem_type = signed_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
}
- else if ((wi_arg1 & mask).zero_p ())
+ else if ((wi_arg1 & mask) == 0)
{
tem_type = unsigned_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
@@ -14357,7 +14345,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
/* Make sure that the perm value is in an acceptable
range. */
t = val;
- if (t.gtu_p (nelts_cnt))
+ if (wi::gtu_p (t, nelts_cnt))
{
need_mask_canon = true;
sel[i] = t.to_uhwi () & (nelts_cnt - 1);
@@ -15179,7 +15167,7 @@ multiple_of_p (tree type, const_tree top, const_tree bottom)
op1 = TREE_OPERAND (top, 1);
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- if (wide_int::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
+ if (wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
&& 0 != (t1 = fold_convert (type,
const_binop (LSHIFT_EXPR,
size_one_node,
@@ -16172,7 +16160,7 @@ fold_negate_const (tree arg0, tree type)
{
wide_int val = arg0;
bool overflow;
- val = val.neg (&overflow);
+ val = wi::neg (val, &overflow);
t = force_fit_type (type, val, 1,
(overflow | TREE_OVERFLOW (arg0))
&& !TYPE_UNSIGNED (type));
@@ -16221,7 +16209,7 @@ fold_abs_const (tree arg0, tree type)
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
- if (!val.neg_p (TYPE_SIGN (type)))
+ if (!wi::neg_p (val, TYPE_SIGN (type)))
t = arg0;
/* If the value is negative, then the absolute value is
@@ -16229,7 +16217,7 @@ fold_abs_const (tree arg0, tree type)
else
{
bool overflow;
- val = val.neg (&overflow);
+ val = wi::neg (val, &overflow);
t = force_fit_type (type, val, -1,
overflow | TREE_OVERFLOW (arg0));
}
@@ -16260,7 +16248,7 @@ fold_not_const (const_tree arg0, tree type)
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- val = ~wide_int (arg0);
+ val = wi::bit_not (arg0);
return force_fit_type (type, val, 0, TREE_OVERFLOW (arg0));
}
@@ -16703,7 +16691,7 @@ round_up_loc (location_t loc, tree value, int divisor)
overflow_p = TREE_OVERFLOW (value);
val &= ~(divisor - 1);
val += divisor;
- if (val.zero_p ())
+ if (val == 0)
overflow_p = true;
return force_fit_type (TREE_TYPE (value), val, -1, overflow_p);
diff --git a/gcc/fortran/target-memory.c b/gcc/fortran/target-memory.c
index 2c8ba379dad..937fda529b8 100644
--- a/gcc/fortran/target-memory.c
+++ b/gcc/fortran/target-memory.c
@@ -428,7 +428,7 @@ gfc_interpret_logical (int kind, unsigned char *buffer, size_t buffer_size,
{
tree t = native_interpret_expr (gfc_get_logical_type (kind), buffer,
buffer_size);
- *logical = wide_int (t).zero_p () ? 0 : 1;
+ *logical = wi::eq_p (t, 0) ? 0 : 1;
return size_logical (kind);
}
diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c
index cd989523ff6..5c71e176b32 100644
--- a/gcc/fortran/trans-array.c
+++ b/gcc/fortran/trans-array.c
@@ -5386,9 +5386,9 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr)
gfc_conv_structure (&se, expr, 1);
wtmp = addr_wide_int (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1;
- gcc_assert (!wtmp.zero_p ());
+ gcc_assert (wtmp != 0);
/* This will probably eat buckets of memory for large arrays. */
- while (!wtmp.zero_p ())
+ while (wtmp != 0)
{
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr);
wtmp -= 1;
diff --git a/gcc/fortran/trans-const.c b/gcc/fortran/trans-const.c
index a801ce8c60a..1ff33e5c92a 100644
--- a/gcc/fortran/trans-const.c
+++ b/gcc/fortran/trans-const.c
@@ -200,7 +200,7 @@ gfc_init_constants (void)
tree
gfc_conv_mpz_to_tree (mpz_t i, int kind)
{
- wide_int val = wide_int::from_mpz (gfc_get_int_type (kind), i, true);
+ wide_int val = wi::from_mpz (gfc_get_int_type (kind), i, true);
return wide_int_to_tree (gfc_get_int_type (kind), val);
}
@@ -209,8 +209,7 @@ gfc_conv_mpz_to_tree (mpz_t i, int kind)
void
gfc_conv_tree_to_mpz (mpz_t i, tree source)
{
- wide_int val = source;
- val.to_mpz (i, TYPE_SIGN (TREE_TYPE (source)));
+ wi::to_mpz (source, i, TYPE_SIGN (TREE_TYPE (source)));
}
/* Converts a real constant into backend form. */
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index 3cb1effe31b..96951638250 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -2085,7 +2085,7 @@ gfc_conv_cst_int_power (gfc_se * se, tree lhs, tree rhs)
/* If exponent is too large, we won't expand it anyway, so don't bother
with large integer values. */
- if (!wrhs.fits_shwi_p ())
+ if (!wi::fits_shwi_p (wrhs))
return 0;
m = wrhs.to_shwi ();
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index bdfc2d78a05..eceabfa072e 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -986,8 +986,9 @@ trans_this_image (gfc_se * se, gfc_expr *expr)
{
wide_int wdim_arg = dim_arg;
- if (wdim_arg.ltu_p (1)
- || wdim_arg.gtu_p (GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
+ if (wi::ltu_p (wdim_arg, 1)
+ || wi::gtu_p (wdim_arg,
+ GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
@@ -1346,8 +1347,8 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
{
wide_int wbound = bound;
if (((!as || as->type != AS_ASSUMED_RANK)
- && wbound.geu_p (GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
- || wbound.gtu_p (GFC_MAX_DIMENSIONS))
+ && wi::geu_p (wbound, GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
+ || wi::gtu_p (wbound, GFC_MAX_DIMENSIONS))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", upper ? "UBOUND" : "LBOUND",
&expr->where);
@@ -1543,7 +1544,8 @@ conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr)
if (INTEGER_CST_P (bound))
{
wide_int wbound = bound;
- if (wbound.ltu_p (1) || wbound.gtu_p (GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
+ if (wi::ltu_p (wbound, 1)
+ || wi::gtu_p (wbound, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c
index 0d85ce1fd1e..48ab6545071 100644
--- a/gcc/fortran/trans-types.c
+++ b/gcc/fortran/trans-types.c
@@ -954,8 +954,8 @@ gfc_init_types (void)
n = TYPE_PRECISION (gfc_array_index_type) - GFC_DTYPE_SIZE_SHIFT;
gfc_max_array_element_size
= wide_int_to_tree (long_unsigned_type_node,
- wide_int::max_value (n, UNSIGNED,
- TYPE_PRECISION (long_unsigned_type_node)));
+ wi::mask (n, UNSIGNED,
+ TYPE_PRECISION (long_unsigned_type_node)));
boolean_type_node = gfc_get_logical_type (gfc_default_logical_kind);
boolean_true_node = build_int_cst (boolean_type_node, 1);
diff --git a/gcc/gengtype-parse.c b/gcc/gengtype-parse.c
index acd0bb03df1..4c1a4b3f9f3 100644
--- a/gcc/gengtype-parse.c
+++ b/gcc/gengtype-parse.c
@@ -182,6 +182,23 @@ require2 (int t1, int t2)
return v;
}
+/* If the next token does not have one of the codes T1, T2 or T3, report a
+ parse error; otherwise return the token's value. */
+static const char *
+require3 (int t1, int t2, int t3)
+{
+ int u = token ();
+ const char *v = advance ();
+ if (u != t1 && u != t2 && u != t3)
+ {
+ parse_error ("expected %s, %s or %s, have %s",
+ print_token (t1, 0), print_token (t2, 0),
+ print_token (t3, 0), print_token (u, v));
+ return 0;
+ }
+ return v;
+}
+
/* Near-terminals. */
/* C-style string constant concatenation: STRING+
@@ -228,7 +245,8 @@ require_template_declaration (const char *tmpl_name)
str = concat (tmpl_name, "<", (char *) 0);
/* Read the comma-separated list of identifiers. */
- while (token () != '>')
+ int depth = 1;
+ while (depth > 0)
{
if (token () == ENUM)
{
@@ -241,16 +259,31 @@ require_template_declaration (const char *tmpl_name)
str = concat (str, advance (), (char *) 0);
continue;
}
- const char *id = require2 (ID, ',');
+ if (token () == ':')
+ {
+ advance ();
+ str = concat (str, ":", (char *) 0);
+ continue;
+ }
+ if (token () == '<')
+ {
+ advance ();
+ str = concat (str, "<", (char *) 0);
+ depth += 1;
+ continue;
+ }
+ if (token () == '>')
+ {
+ advance ();
+ str = concat (str, ">", (char *) 0);
+ depth -= 1;
+ continue;
+ }
+ const char *id = require3 (SCALAR, ID, ',');
if (id == NULL)
id = ",";
str = concat (str, id, (char *) 0);
}
-
- /* Recognize the closing '>'. */
- require ('>');
- str = concat (str, ">", (char *) 0);
-
return str;
}
diff --git a/gcc/gengtype-state.c b/gcc/gengtype-state.c
index ba7948a1ff0..8eaa729c311 100644
--- a/gcc/gengtype-state.c
+++ b/gcc/gengtype-state.c
@@ -30,7 +30,6 @@
#endif
#include "system.h"
#include "errors.h" /* For fatal. */
-#include "double-int.h"
#include "hashtab.h"
#include "version.h" /* For version_string & pkgversion_string. */
#include "obstack.h"
diff --git a/gcc/gengtype.c b/gcc/gengtype.c
index 08f8a58e90f..1dd1c611249 100644
--- a/gcc/gengtype.c
+++ b/gcc/gengtype.c
@@ -25,7 +25,6 @@
#include "system.h"
#include "errors.h" /* for fatal */
#include "getopt.h"
-#include "double-int.h"
#include "version.h" /* for version_string & pkgversion_string. */
#include "hashtab.h"
#include "xregex.h"
@@ -525,7 +524,7 @@ do_typedef (const char *s, type_p t, struct fileloc *pos)
for (p = typedefs; p != NULL; p = p->next)
if (strcmp (p->name, s) == 0)
{
- if (p->type != t)
+ if (p->type != t && strcmp (s, "result_type") != 0)
{
error_at_line (pos, "type `%s' previously defined", s);
error_at_line (&p->line, "previously defined here");
@@ -5468,7 +5467,9 @@ main (int argc, char **argv)
POS_HERE (do_scalar_typedef ("REAL_VALUE_TYPE", &pos));
POS_HERE (do_scalar_typedef ("FIXED_VALUE_TYPE", &pos));
POS_HERE (do_scalar_typedef ("double_int", &pos));
- POS_HERE (do_scalar_typedef ("wide_int", &pos));
+ POS_HERE (do_scalar_typedef ("double_int_storage", &pos));
+ POS_HERE (do_scalar_typedef ("addr_wide_int", &pos));
+ POS_HERE (do_scalar_typedef ("max_wide_int", &pos));
POS_HERE (do_scalar_typedef ("uint64_t", &pos));
POS_HERE (do_scalar_typedef ("uint8", &pos));
POS_HERE (do_scalar_typedef ("uintptr_t", &pos));
diff --git a/gcc/genmodes.c b/gcc/genmodes.c
index 7be633ac5ba..7bb16d3c61e 100644
--- a/gcc/genmodes.c
+++ b/gcc/genmodes.c
@@ -868,14 +868,14 @@ emit_max_int (void)
max = i->bytesize;
if (max > mmax)
mmax = max;
- printf ("#define MAX_BITSIZE_MODE_ANY_INT (%d*BITS_PER_UNIT)\n", mmax);
+ printf ("#define MAX_BITSIZE_MODE_ANY_INT %d\n", mmax * MAX_BITS_PER_UNIT);
mmax = 0;
for (j = 0; j < MAX_MODE_CLASS; j++)
for (i = modes[j]; i; i = i->next)
if (mmax < i->bytesize)
mmax = i->bytesize;
- printf ("#define MAX_BITSIZE_MODE_ANY_MODE (%d*BITS_PER_UNIT)\n", mmax);
+ printf ("#define MAX_BITSIZE_MODE_ANY_MODE %d\n", mmax * MAX_BITS_PER_UNIT);
}
static void
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 65a075a83ef..c125ba486e0 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -2800,15 +2800,16 @@ fold_array_ctor_reference (tree type, tree ctor,
be larger than size of array element. */
if (!TYPE_SIZE_UNIT (type)
|| TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST
- || elt_size.lts_p (addr_wide_int (TYPE_SIZE_UNIT (type))))
+ || wi::lts_p (elt_size, TYPE_SIZE_UNIT (type)))
return NULL_TREE;
/* Compute the array index we look for. */
- access_index = addr_wide_int (offset / BITS_PER_UNIT).udiv_trunc (elt_size);
+ access_index = wi::udiv_trunc (addr_wide_int (offset / BITS_PER_UNIT),
+ elt_size);
access_index += low_bound;
if (index_type)
- access_index = access_index.ext (TYPE_PRECISION (index_type),
- TYPE_SIGN (index_type));
+ access_index = wi::ext (access_index, TYPE_PRECISION (index_type),
+ TYPE_SIGN (index_type));
/* And offset within the access. */
inner_offset = offset % (elt_size.to_uhwi () * BITS_PER_UNIT);
@@ -2820,7 +2821,8 @@ fold_array_ctor_reference (tree type, tree ctor,
index = low_bound - 1;
if (index_type)
- index = index.ext (TYPE_PRECISION (index_type), TYPE_SIGN (index_type));
+ index = wi::ext (index, TYPE_PRECISION (index_type),
+ TYPE_SIGN (index_type));
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval)
{
@@ -2842,14 +2844,14 @@ fold_array_ctor_reference (tree type, tree ctor,
{
index += 1;
if (index_type)
- index = index.ext (TYPE_PRECISION (index_type),
- TYPE_SIGN (index_type));
+ index = wi::ext (index, TYPE_PRECISION (index_type),
+ TYPE_SIGN (index_type));
max_index = index;
}
/* Do we have match? */
- if (access_index.cmpu (index) >= 0
- && access_index.cmpu (max_index) <= 0)
+ if (wi::cmpu (access_index, index) >= 0
+ && wi::cmpu (access_index, max_index) <= 0)
return fold_ctor_reference (type, cval, inner_offset, size,
from_decl);
}
@@ -2889,11 +2891,10 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
: TREE_CODE (TREE_TYPE (cfield)) == ARRAY_TYPE));
/* Compute bit offset of the field. */
- bitoffset = (addr_wide_int (field_offset)
- + byte_offset_cst * addr_wide_int (BITS_PER_UNIT));
+ bitoffset = wi::add (field_offset, byte_offset_cst * BITS_PER_UNIT);
/* Compute bit offset where the field ends. */
if (field_size != NULL_TREE)
- bitoffset_end = bitoffset + addr_wide_int (field_size);
+ bitoffset_end = bitoffset + field_size;
else
bitoffset_end = 0;
@@ -2901,17 +2902,17 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
/* Is there any overlap between [OFFSET, OFFSET+SIZE) and
[BITOFFSET, BITOFFSET_END)? */
- if (access_end.cmps (bitoffset) > 0
+ if (wi::cmps (access_end, bitoffset) > 0
&& (field_size == NULL_TREE
- || addr_wide_int (offset).lts_p (bitoffset_end)))
+ || wi::lts_p (offset, bitoffset_end)))
{
addr_wide_int inner_offset = addr_wide_int (offset) - bitoffset;
/* We do have overlap. Now see if field is large enough to
cover the access. Give up for accesses spanning multiple
fields. */
- if (access_end.cmps (bitoffset_end) > 0)
+ if (wi::cmps (access_end, bitoffset_end) > 0)
return NULL_TREE;
- if (addr_wide_int (offset).lts_p (bitoffset))
+ if (wi::lts_p (offset, bitoffset))
return NULL_TREE;
return fold_ctor_reference (type, cval,
inner_offset.to_uhwi (), size,
@@ -3009,10 +3010,10 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree))
&& (tree_fits_uhwi_p (unit_size)))
{
addr_wide_int woffset
- = (addr_wide_int (idx) - addr_wide_int (low_bound))
- .sext (TYPE_PRECISION (TREE_TYPE (idx)));
+ = wi::sext (addr_wide_int (idx) - low_bound,
+ TYPE_PRECISION (TREE_TYPE (idx)));
- if (woffset.fits_shwi_p ())
+ if (wi::fits_shwi_p (woffset))
{
offset = woffset.to_shwi ();
/* TODO: This code seems wrong, multiply then check
diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c
index 796dcbd7d25..024dffe15eb 100644
--- a/gcc/gimple-ssa-strength-reduction.c
+++ b/gcc/gimple-ssa-strength-reduction.c
@@ -777,7 +777,6 @@ restructure_reference (tree *pbase, tree *poffset, max_wide_int *pindex,
{
tree base = *pbase, offset = *poffset;
max_wide_int index = *pindex;
- wide_int bpu = BITS_PER_UNIT;
tree mult_op0, t1, t2, type;
max_wide_int c1, c2, c3, c4;
@@ -786,11 +785,11 @@ restructure_reference (tree *pbase, tree *poffset, max_wide_int *pindex,
|| TREE_CODE (base) != MEM_REF
|| TREE_CODE (offset) != MULT_EXPR
|| TREE_CODE (TREE_OPERAND (offset, 1)) != INTEGER_CST
- || !index.umod_floor (bpu).zero_p ())
+ || wi::umod_floor (index, BITS_PER_UNIT) != 0)
return false;
t1 = TREE_OPERAND (base, 0);
- c1 = max_wide_int::from_wide_int (mem_ref_offset (base));
+ c1 = max_wide_int::from (mem_ref_offset (base), SIGNED);
type = TREE_TYPE (TREE_OPERAND (base, 1));
mult_op0 = TREE_OPERAND (offset, 0);
@@ -822,7 +821,7 @@ restructure_reference (tree *pbase, tree *poffset, max_wide_int *pindex,
c2 = 0;
}
- c4 = index.udiv_floor (bpu);
+ c4 = wi::udiv_floor (index, BITS_PER_UNIT);
*pbase = t1;
*poffset = fold_build2 (MULT_EXPR, sizetype, t2,
@@ -968,7 +967,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
X = (B + i') * (S * c) */
base = base_cand->base_expr;
index = base_cand->index;
- temp = (max_wide_int (base_cand->stride) * stride_in);
+ temp = wi::mul (base_cand->stride, stride_in);
stride = wide_int_to_tree (TREE_TYPE (stride_in), temp);
ctype = base_cand->cand_type;
if (has_single_use (base_in))
@@ -990,7 +989,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
+ stmt_cost (base_cand->cand_stmt, speed));
}
else if (base_cand->kind == CAND_ADD
- && base_cand->index.one_p ()
+ && base_cand->index == 1
&& TREE_CODE (base_cand->stride) == INTEGER_CST)
{
/* Y = B + (1 * S), S constant
@@ -1091,7 +1090,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (addend_cand && !base && addend_cand->kind != CAND_PHI)
{
if (addend_cand->kind == CAND_MULT
- && addend_cand->index.zero_p ()
+ && addend_cand->index == 0
&& TREE_CODE (addend_cand->stride) == INTEGER_CST)
{
/* Z = (B + 0) * S, S constant
@@ -1118,7 +1117,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (base_cand && !base && base_cand->kind != CAND_PHI)
{
if (base_cand->kind == CAND_ADD
- && (base_cand->index.zero_p ()
+ && (base_cand->index == 0
|| operand_equal_p (base_cand->stride,
integer_zero_node, 0)))
{
@@ -1141,7 +1140,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (subtrahend_cand && !base && subtrahend_cand->kind != CAND_PHI)
{
if (subtrahend_cand->kind == CAND_MULT
- && subtrahend_cand->index.zero_p ()
+ && subtrahend_cand->index == 0
&& TREE_CODE (subtrahend_cand->stride) == INTEGER_CST)
{
/* Z = (B + 0) * S, S constant
@@ -1205,7 +1204,7 @@ create_add_imm_cand (gimple gs, tree base_in, max_wide_int index_in, bool speed)
signop sign = TYPE_SIGN (TREE_TYPE (base_cand->stride));
if (TREE_CODE (base_cand->stride) == INTEGER_CST
- && index_in.multiple_of_p (base_cand->stride, sign, &multiple))
+ && wi::multiple_of_p (index_in, base_cand->stride, sign, &multiple))
{
/* Y = (B + i') * S, S constant, c = kS for some integer k
X = Y + c
@@ -1824,7 +1823,7 @@ cand_abs_increment (slsr_cand_t c)
{
max_wide_int increment = cand_increment (c);
- if (!address_arithmetic_p && increment.neg_p ())
+ if (!address_arithmetic_p && wi::neg_p (increment))
increment = -increment;
return increment;
@@ -1854,7 +1853,7 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, const max_wide_int &bump
in this case. This does not affect siblings or dependents
of C. Restriction to signed HWI is conservative for unsigned
types but allows for safe negation without twisted logic. */
- if (bump.fits_shwi_p ()
+ if (wi::fits_shwi_p (bump)
&& bump.to_shwi () != HOST_WIDE_INT_MIN
/* It is not useful to replace casts, copies, or adds of
an SSA name and a constant. */
@@ -1872,7 +1871,7 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, const max_wide_int &bump
types, introduce a cast. */
if (!useless_type_conversion_p (target_type, TREE_TYPE (basis_name)))
basis_name = introduce_cast_before_cand (c, target_type, basis_name);
- if (bump.neg_p ())
+ if (wi::neg_p (bump))
{
code = MINUS_EXPR;
bump = -bump;
@@ -1886,7 +1885,7 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, const max_wide_int &bump
print_gimple_stmt (dump_file, c->cand_stmt, 0, 0);
}
- if (bump.zero_p ())
+ if (bump == 0)
{
tree lhs = gimple_assign_lhs (c->cand_stmt);
gimple copy_stmt = gimple_build_assign (lhs, basis_name);
@@ -1994,7 +1993,7 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
/* If the add candidate along this incoming edge has the same
index as C's hidden basis, the hidden basis represents this
edge correctly. */
- if (increment.zero_p ())
+ if (increment == 0)
return basis_name;
basis_type = TREE_TYPE (basis_name);
@@ -2005,7 +2004,7 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
tree bump_tree;
enum tree_code code = PLUS_EXPR;
max_wide_int bump = increment * c->stride;
- if (bump.neg_p ())
+ if (wi::neg_p (bump))
{
code = MINUS_EXPR;
bump = -bump;
@@ -2018,7 +2017,7 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
else
{
int i;
- bool negate_incr = (!address_arithmetic_p && increment.neg_p ());
+ bool negate_incr = (!address_arithmetic_p && wi::neg_p (increment));
i = incr_vec_index (negate_incr ? -increment : increment);
gcc_assert (i >= 0);
@@ -2028,10 +2027,10 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
new_stmt = gimple_build_assign_with_ops (code, lhs, basis_name,
incr_vec[i].initializer);
}
- else if (increment.one_p ())
+ else if (increment == 1)
new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, lhs, basis_name,
c->stride);
- else if (increment.minus_one_p ())
+ else if (increment == -1)
new_stmt = gimple_build_assign_with_ops (MINUS_EXPR, lhs, basis_name,
c->stride);
else
@@ -2092,7 +2091,7 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name,
/* If the phi argument is the base name of the CAND_PHI, then
this incoming arc should use the hidden basis. */
if (operand_equal_p (arg, phi_cand->base_expr, 0))
- if (basis->index.zero_p ())
+ if (basis->index == 0)
feeding_def = gimple_assign_lhs (basis->cand_stmt);
else
{
@@ -2312,7 +2311,7 @@ record_increment (slsr_cand_t c, const max_wide_int &increment_in, bool is_phi_a
/* Treat increments that differ only in sign as identical so as to
share initializers, unless we are generating pointer arithmetic. */
- if (!address_arithmetic_p && increment.neg_p ())
+ if (!address_arithmetic_p && wi::neg_p (increment))
increment = -increment;
for (i = 0; i < incr_vec_len; i++)
@@ -2356,8 +2355,8 @@ record_increment (slsr_cand_t c, const max_wide_int &increment_in, bool is_phi_a
if (c->kind == CAND_ADD
&& !is_phi_adjust
&& c->index == increment
- && (increment.gts_p (1)
- || increment.lts_p (-1))
+ && (wi::gts_p (increment, 1)
+ || wi::lts_p (increment, -1))
&& (gimple_assign_rhs_code (c->cand_stmt) == PLUS_EXPR
|| gimple_assign_rhs_code (c->cand_stmt) == POINTER_PLUS_EXPR))
{
@@ -2656,7 +2655,7 @@ analyze_increments (slsr_cand_t first_dep, enum machine_mode mode, bool speed)
/* If somehow this increment is bigger than a HWI, we won't
be optimizing candidates that use it. And if the increment
has a count of zero, nothing will be done with it. */
- if (!incr_vec[i].incr.fits_shwi_p () || !incr_vec[i].count)
+ if (!wi::fits_shwi_p (incr_vec[i].incr) || !incr_vec[i].count)
incr_vec[i].cost = COST_INFINITE;
/* Increments of 0, 1, and -1 are always profitable to replace,
@@ -2953,10 +2952,10 @@ insert_initializers (slsr_cand_t c)
max_wide_int incr = incr_vec[i].incr;
if (!profitable_increment_p (i)
- || incr.one_p ()
- || (incr.minus_one_p ()
+ || incr == 1
+ || (incr == -1
&& gimple_assign_rhs_code (c->cand_stmt) != POINTER_PLUS_EXPR)
- || incr.zero_p ())
+ || incr == 0)
continue;
/* We may have already identified an existing initializer that
@@ -3044,7 +3043,7 @@ all_phi_incrs_profitable (slsr_cand_t c, gimple phi)
slsr_cand_t arg_cand = base_cand_from_table (arg);
max_wide_int increment = arg_cand->index - basis->index;
- if (!address_arithmetic_p && increment.neg_p ())
+ if (!address_arithmetic_p && wi::neg_p (increment))
increment = -increment;
j = incr_vec_index (increment);
@@ -3198,7 +3197,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name)
from the basis name, or an add of the stride to the basis
name, respectively. It may be necessary to introduce a
cast (or reuse an existing cast). */
- else if (cand_incr.one_p ())
+ else if (cand_incr == 1)
{
tree stride_type = TREE_TYPE (c->stride);
tree orig_type = TREE_TYPE (orig_rhs2);
@@ -3213,7 +3212,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name)
c);
}
- else if (cand_incr.minus_one_p ())
+ else if (cand_incr == -1)
{
tree stride_type = TREE_TYPE (c->stride);
tree orig_type = TREE_TYPE (orig_rhs2);
@@ -3240,7 +3239,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name)
fputs (" (duplicate, not actually replacing)\n", dump_file);
}
- else if (cand_incr.zero_p ())
+ else if (cand_incr == 0)
{
tree lhs = gimple_assign_lhs (c->cand_stmt);
tree lhs_type = TREE_TYPE (lhs);
diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c
index aed82a7005d..76197040d1d 100644
--- a/gcc/graphite-clast-to-gimple.c
+++ b/gcc/graphite-clast-to-gimple.c
@@ -67,7 +67,7 @@ gmp_cst_to_tree (tree type, mpz_t val)
mpz_init (tmp);
mpz_set (tmp, val);
- wi = wide_int::from_mpz (t, tmp, true);
+ wi = wi::from_mpz (t, tmp, true);
mpz_clear (tmp);
return wide_int_to_tree (t, wi);
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 56be4caf0dd..ebf03ddf640 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -53,7 +53,7 @@ static inline void
tree_int_to_gmp (tree t, mpz_t res)
{
wide_int wi = t;
- wi.to_mpz (res, TYPE_SIGN (TREE_TYPE (t)));
+ wi::to_mpz (wi, res, TYPE_SIGN (TREE_TYPE (t)));
}
/* Returns the index of the PHI argument defined in the outermost
@@ -1041,7 +1041,7 @@ build_loop_iteration_domains (scop_p scop, struct loop *loop,
isl_constraint *c;
mpz_init (g);
- nit.to_mpz (g, SIGNED);
+ wi::to_mpz (nit, g, SIGNED);
mpz_sub_ui (g, g, 1);
approx = extract_affine_gmp (g, isl_set_get_space (inner));
x = isl_pw_aff_ge_set (approx, aff);
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 44c1c51408b..3b75b4a485e 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -3540,7 +3540,8 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gimple stmt,
if (TYPE_ALIGN (type) > align)
align = TYPE_ALIGN (type);
}
- misalign += (addr_wide_int (off).sext (TYPE_PRECISION (TREE_TYPE (off)))
+ misalign += (wi::sext (addr_wide_int (off),
+ TYPE_PRECISION (TREE_TYPE (off)))
* BITS_PER_UNIT).to_short_addr ();
misalign = misalign & (align - 1);
if (misalign != 0)
diff --git a/gcc/java/boehm.c b/gcc/java/boehm.c
index 7e7bbd1b6da..158c8ebd274 100644
--- a/gcc/java/boehm.c
+++ b/gcc/java/boehm.c
@@ -108,7 +108,7 @@ mark_reference_fields (tree field,
bits for all words in the record. This is conservative, but the
size_words != 1 case is impossible in regular java code. */
for (i = 0; i < size_words; ++i)
- *mask = (*mask).set_bit (ubit - count - i - 1);
+ *mask = wi::set_bit (*mask, ubit - count - i - 1);
if (count >= ubit - 2)
*pointer_after_end = 1;
@@ -146,7 +146,7 @@ get_boehm_type_descriptor (tree type)
value_type = java_type_for_mode (ptr_mode, 1);
- mask = wide_int::zero (TYPE_PRECISION (value_type));
+ mask = wi::zero (TYPE_PRECISION (value_type));
/* If we have a type of unknown size, use a proc. */
if (int_size_in_bytes (type) == -1)
@@ -196,12 +196,12 @@ get_boehm_type_descriptor (tree type)
that we don't have to emit reflection data for run time
marking. */
count = 0;
- mask = wide_int::zero (TYPE_PRECISION (value_type));
+ mask = wi::zero (TYPE_PRECISION (value_type));
++last_set_index;
while (last_set_index)
{
if ((last_set_index & 1))
- mask = mask.set_bit (log2_size + count);
+ mask = wi::set_bit (mask, log2_size + count);
last_set_index >>= 1;
++count;
}
@@ -210,7 +210,7 @@ get_boehm_type_descriptor (tree type)
else if (! pointer_after_end)
{
/* Bottom two bits for bitmap mark type are 01. */
- mask = mask.set_bit (0);
+ mask = wi::set_bit (mask, 0);
value = wide_int_to_tree (value_type, mask);
}
else
diff --git a/gcc/java/jcf-parse.c b/gcc/java/jcf-parse.c
index e217f24852c..d9025ced492 100644
--- a/gcc/java/jcf-parse.c
+++ b/gcc/java/jcf-parse.c
@@ -1043,9 +1043,9 @@ get_constant (JCF *jcf, int index)
wide_int val;
num = JPOOL_UINT (jcf, index);
- val = wide_int (num).sforce_to_size (32).lshift_widen (32, 64);
+ val = wi::lshift (wide_int::from (num, 64, SIGNED), 32);
num = JPOOL_UINT (jcf, index + 1);
- val |= wide_int (num);
+ val |= num;
value = wide_int_to_tree (long_type_node, val);
break;
diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c
index 17e4cb1c7fa..daeb26cb66f 100644
--- a/gcc/loop-doloop.c
+++ b/gcc/loop-doloop.c
@@ -461,9 +461,9 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
/* Determine if the iteration counter will be non-negative.
Note that the maximum value loaded is iterations_max - 1. */
if (max_loop_iterations (loop, &iterations)
- && (iterations.leu_p (wide_int::set_bit_in_zero
- (GET_MODE_PRECISION (mode) - 1,
- GET_MODE_PRECISION (mode)))))
+ && wi::leu_p (iterations,
+ wi::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1,
+ GET_MODE_PRECISION (mode))))
nonneg = 1;
break;
@@ -553,7 +553,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
rtx iter_rtx;
if (!max_loop_iterations (loop, &iter)
- || !iter.fits_shwi_p ())
+ || !wi::fits_shwi_p (iter))
iter_rtx = const0_rtx;
else
iter_rtx = GEN_INT (iter.to_shwi ());
@@ -671,7 +671,7 @@ doloop_optimize (struct loop *loop)
count = copy_rtx (desc->niter_expr);
iterations = desc->const_iter ? desc->niter_expr : const0_rtx;
if (!max_loop_iterations (loop, &iter)
- || !iter.fits_shwi_p ())
+ || !wi::fits_shwi_p (iter))
iterations_max = const0_rtx;
else
iterations_max = GEN_INT (iter.to_shwi ());
@@ -697,7 +697,7 @@ doloop_optimize (struct loop *loop)
computed, we must be sure that the number of iterations fits into
the new mode. */
&& (word_mode_size >= GET_MODE_PRECISION (mode)
- || iter.leu_p (word_mode_max)))
+ || wi::leu_p (iter, word_mode_max)))
{
if (word_mode_size > GET_MODE_PRECISION (mode))
{
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index 810449d6ec7..0b46f1d8399 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -696,7 +696,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags)
if (desc->niter < 2 * nunroll
|| ((estimated_loop_iterations (loop, &iterations)
|| max_loop_iterations (loop, &iterations))
- && iterations.ltu_p (2 * nunroll)))
+ && wi::ltu_p (iterations, 2 * nunroll)))
{
if (dump_file)
fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
@@ -819,8 +819,7 @@ unroll_loop_constant_iterations (struct loop *loop)
desc->niter -= exit_mod;
loop->nb_iterations_upper_bound -= exit_mod;
if (loop->any_estimate
- && wide_int (exit_mod).leu_p
- (loop->nb_iterations_estimate))
+ && wi::leu_p (exit_mod, loop->nb_iterations_estimate))
loop->nb_iterations_estimate -= exit_mod;
else
loop->any_estimate = false;
@@ -863,8 +862,7 @@ unroll_loop_constant_iterations (struct loop *loop)
desc->niter -= exit_mod + 1;
loop->nb_iterations_upper_bound -= exit_mod + 1;
if (loop->any_estimate
- && wide_int (exit_mod + 1).leu_p
- (loop->nb_iterations_estimate))
+ && wi::leu_p (exit_mod + 1, loop->nb_iterations_estimate))
loop->nb_iterations_estimate -= exit_mod + 1;
else
loop->any_estimate = false;
@@ -917,10 +915,10 @@ unroll_loop_constant_iterations (struct loop *loop)
desc->niter /= max_unroll + 1;
loop->nb_iterations_upper_bound
- = loop->nb_iterations_upper_bound.udiv_trunc (max_unroll + 1);
+ = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1);
if (loop->any_estimate)
loop->nb_iterations_estimate
- = loop->nb_iterations_estimate.udiv_trunc (max_unroll + 1);
+ = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
desc->niter_expr = GEN_INT (desc->niter);
/* Remove the edges. */
@@ -997,7 +995,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags)
/* Check whether the loop rolls. */
if ((estimated_loop_iterations (loop, &iterations)
|| max_loop_iterations (loop, &iterations))
- && iterations.ltu_p (2 * nunroll))
+ && wi::ltu_p (iterations, 2 * nunroll))
{
if (dump_file)
fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
@@ -1308,10 +1306,10 @@ unroll_loop_runtime_iterations (struct loop *loop)
simplify_gen_binary (UDIV, desc->mode, old_niter,
GEN_INT (max_unroll + 1));
loop->nb_iterations_upper_bound
- = loop->nb_iterations_upper_bound.udiv_trunc (max_unroll + 1);
+ = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1);
if (loop->any_estimate)
loop->nb_iterations_estimate
- = loop->nb_iterations_estimate.udiv_trunc (max_unroll + 1);
+ = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
if (exit_at_end)
{
desc->niter_expr =
@@ -1384,7 +1382,7 @@ decide_peel_simple (struct loop *loop, int flags)
if (estimated_loop_iterations (loop, &iterations))
{
/* TODO: unsigned/signed confusion */
- if (wide_int::from_shwi (npeel).leu_p (iterations))
+ if (wi::leu_p (npeel, iterations))
{
if (dump_file)
{
@@ -1401,7 +1399,7 @@ decide_peel_simple (struct loop *loop, int flags)
/* If we have small enough bound on iterations, we can still peel (completely
unroll). */
else if (max_loop_iterations (loop, &iterations)
- && iterations.ltu_p (npeel))
+ && wi::ltu_p (iterations, npeel))
npeel = iterations.to_shwi () + 1;
else
{
@@ -1552,7 +1550,7 @@ decide_unroll_stupid (struct loop *loop, int flags)
/* Check whether the loop rolls. */
if ((estimated_loop_iterations (loop, &iterations)
|| max_loop_iterations (loop, &iterations))
- && iterations.ltu_p (2 * nunroll))
+ && wi::ltu_p (iterations, 2 * nunroll))
{
if (dump_file)
fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index bb5de529b1d..453c2445073 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -702,8 +702,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn,
for (i = 0; i < len; i++)
a[i] = streamer_read_hwi (ib);
- loop->nb_iterations_upper_bound
- = max_wide_int::from_array (a, len);
+ loop->nb_iterations_upper_bound = max_wide_int::from_array (a, len);
}
loop->any_estimate = streamer_read_hwi (ib);
if (loop->any_estimate)
@@ -715,8 +714,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn,
for (i = 0; i < len; i++)
a[i] = streamer_read_hwi (ib);
- loop->nb_iterations_estimate
- = max_wide_int::from_array (a, len);
+ loop->nb_iterations_estimate = max_wide_int::from_array (a, len);
}
place_new_loop (fn, loop);
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index 5842c43de45..1b755acc4a2 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -1795,7 +1795,7 @@ compare_tree_sccs_1 (tree t1, tree t2, tree **map)
if (CODE_CONTAINS_STRUCT (code, TS_INT_CST))
{
- if (!wide_int::eq_p (t1, t2))
+ if (!wi::eq_p (t1, t2))
return false;
}
diff --git a/gcc/machmode.def b/gcc/machmode.def
index 1062f186e8a..9ca8b799e97 100644
--- a/gcc/machmode.def
+++ b/gcc/machmode.def
@@ -229,6 +229,9 @@ UACCUM_MODE (USA, 4, 16, 16); /* 16.16 */
UACCUM_MODE (UDA, 8, 32, 32); /* 32.32 */
UACCUM_MODE (UTA, 16, 64, 64); /* 64.64 */
+/* Should be overridden by EXTRA_MODES_FILE if wrong. */
+#define MAX_BITS_PER_UNIT 8
+
/* Allow the target to specify additional modes of various kinds. */
#if HAVE_EXTRA_MODES
# include EXTRA_MODES_FILE
diff --git a/gcc/objc/objc-act.c b/gcc/objc/objc-act.c
index 0d17eb28ea0..3839f5e8ed6 100644
--- a/gcc/objc/objc-act.c
+++ b/gcc/objc/objc-act.c
@@ -4924,7 +4924,7 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags)
/* Get the value of the argument and add 2. */
tree number = TREE_VALUE (argument);
if (number && TREE_CODE (number) == INTEGER_CST
- && !wide_int (number).zero_p ())
+ && !wi::eq_p (number, 0))
TREE_VALUE (argument)
= wide_int_to_tree (TREE_TYPE (number), wide_int (number) + 2);
argument = TREE_CHAIN (argument);
diff --git a/gcc/optabs.c b/gcc/optabs.c
index a351257a47c..b47692146a5 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -851,8 +851,8 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
{
carries = outof_input;
- tmp = immed_wide_int_const (wide_int::from_shwi (BITS_PER_WORD,
- op1_mode), op1_mode);
+ tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
+ op1_mode), op1_mode);
tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
0, true, methods);
}
@@ -868,14 +868,14 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
if (shift_mask == BITS_PER_WORD - 1)
{
tmp = immed_wide_int_const
- (wide_int::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
+ (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
0, true, methods);
}
else
{
- tmp = immed_wide_int_const (wide_int::from_shwi (BITS_PER_WORD - 1,
- op1_mode), op1_mode);
+ tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
+ op1_mode), op1_mode);
tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
0, true, methods);
}
@@ -1038,8 +1038,7 @@ expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
is true when the effective shift value is less than BITS_PER_WORD.
Set SUPERWORD_OP1 to the shift count that should be used to shift
OUTOF_INPUT into INTO_TARGET when the condition is false. */
- tmp = immed_wide_int_const (wide_int::from_shwi (BITS_PER_WORD, op1_mode),
- op1_mode);
+ tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
{
/* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
@@ -2925,7 +2924,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
}
- mask = wide_int::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
+ mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
if (code == ABS)
mask = ~mask;
@@ -3586,7 +3585,7 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
op1 = operand_subword_force (op1, word, mode);
}
- mask = wide_int::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
+ mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
sign = expand_binop (imode, and_optab, op1,
immed_wide_int_const (mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
@@ -3656,7 +3655,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
}
- mask = wide_int::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
+ mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
if (target == 0
|| target == op0
diff --git a/gcc/postreload.c b/gcc/postreload.c
index 2d0f320a7c7..6ac5bb32a80 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -310,7 +310,7 @@ reload_cse_simplify_set (rtx set, rtx insn)
case SIGN_EXTEND:
result = wide_int (std::make_pair (this_rtx, GET_MODE (src)));
if (GET_MODE_PRECISION (GET_MODE (src)) > GET_MODE_PRECISION (word_mode))
- result = result.sext (GET_MODE_PRECISION (word_mode));
+ result = wi::sext (result, GET_MODE_PRECISION (word_mode));
break;
default:
gcc_unreachable ();
diff --git a/gcc/predict.c b/gcc/predict.c
index f6f55d912b7..986284744bd 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -1285,44 +1285,44 @@ predict_iv_comparison (struct loop *loop, basic_block bb,
max_wide_int compare_step = compare_step_var;
/* (loop_bound - base) / compare_step */
- tem = loop_bound.sub (base, SIGNED, &overflow);
+ tem = wi::sub (loop_bound, base, SIGNED, &overflow);
overall_overflow |= overflow;
- loop_count = tem.div_trunc (compare_step, SIGNED, &overflow);
+ loop_count = wi::div_trunc (tem, compare_step, SIGNED, &overflow);
overall_overflow |= overflow;
- if ((!compare_step.neg_p ())
+ if (!wi::neg_p (compare_step)
^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
{
/* (loop_bound - compare_bound) / compare_step */
- tem = loop_bound.sub (compare_bound, SIGNED, &overflow);
+ tem = wi::sub (loop_bound, compare_bound, SIGNED, &overflow);
overall_overflow |= overflow;
- compare_count = tem.div_trunc (compare_step, SIGNED, &overflow);
+ compare_count = wi::div_trunc (tem, compare_step, SIGNED, &overflow);
overall_overflow |= overflow;
}
else
{
/* (compare_bound - base) / compare_step */
- tem = compare_bound.sub (base, SIGNED, &overflow);
+ tem = wi::sub (compare_bound, base, SIGNED, &overflow);
overall_overflow |= overflow;
- compare_count = tem.div_trunc (compare_step, SIGNED, &overflow);
+ compare_count = wi::div_trunc (tem, compare_step, SIGNED, &overflow);
overall_overflow |= overflow;
}
if (compare_code == LE_EXPR || compare_code == GE_EXPR)
++compare_count;
if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
++loop_count;
- if (compare_count.neg_p ())
+ if (wi::neg_p (compare_count))
compare_count = 0;
- if (loop_count.neg_p ())
+ if (wi::neg_p (loop_count))
loop_count = 0;
- if (loop_count.zero_p ())
+ if (loop_count == 0)
probability = 0;
- else if (compare_count.cmps (loop_count) == 1)
+ else if (wi::cmps (compare_count, loop_count) == 1)
probability = REG_BR_PROB_BASE;
else
{
tem = compare_count * REG_BR_PROB_BASE;
- tem = tem.udiv_trunc (loop_count);
+ tem = wi::udiv_trunc (tem, loop_count);
probability = tem.to_uhwi ();
}
diff --git a/gcc/real.c b/gcc/real.c
index 559670a634f..ffb7213aedf 100644
--- a/gcc/real.c
+++ b/gcc/real.c
@@ -1395,7 +1395,7 @@ real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision)
{
case rvc_zero:
underflow:
- return wide_int::zero (precision);
+ return wi::zero (precision);
case rvc_inf:
case rvc_nan:
@@ -1403,9 +1403,9 @@ real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision)
*fail = true;
if (r->sign)
- return wide_int::set_bit_in_zero (precision - 1, precision);
+ return wi::set_bit_in_zero (precision - 1, precision);
else
- return ~wide_int::set_bit_in_zero (precision - 1, precision);
+ return ~wi::set_bit_in_zero (precision - 1, precision);
case rvc_normal:
if (r->decimal)
@@ -1450,8 +1450,8 @@ real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision)
w = SIGSZ * HOST_BITS_PER_LONG + words * HOST_BITS_PER_WIDE_INT;
result = wide_int::from_array (val,
(w + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT, w, w);
- result = result.rshiftu ((words * HOST_BITS_PER_WIDE_INT) - exp);
- result = result.zforce_to_size (precision);
+ result = wi::lrshift (result, (words * HOST_BITS_PER_WIDE_INT) - exp);
+ result = wide_int::from (result, precision, UNSIGNED);
if (r->sign)
return -result;
@@ -2191,26 +2191,26 @@ real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode,
void
real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode,
- wide_int val, signop sgn)
+ const wide_int_ref &val_in, signop sgn)
{
- if (val.zero_p ())
+ if (val_in == 0)
get_zero (r, 0);
else
{
- unsigned int len = val.get_precision ();
+ unsigned int len = val_in.get_precision ();
int i, j, e=0;
int maxbitlen = MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT;
const unsigned int realmax = SIGNIFICAND_BITS/HOST_BITS_PER_WIDE_INT * HOST_BITS_PER_WIDE_INT;
memset (r, 0, sizeof (*r));
r->cl = rvc_normal;
- r->sign = val.neg_p (sgn);
+ r->sign = wi::neg_p (val_in, sgn);
if (len == 0)
len = 1;
/* We have to ensure we can negate the largest negative number. */
- val = val.force_to_size (maxbitlen, sgn);
+ wide_int val = wide_int::from (val_in, maxbitlen, sgn);
if (r->sign)
val = -val;
@@ -2231,7 +2231,7 @@ real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode,
if (len > realmax)
{
HOST_WIDE_INT cnt_l_z;
- cnt_l_z = val.clz ().to_shwi ();
+ cnt_l_z = wi::clz (val);
if (maxbitlen - cnt_l_z > realmax)
{
@@ -2240,14 +2240,15 @@ real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode,
/* This value is too large, we must shift it right to
preserve all the bits we can, and then bump the
exponent up by that amount. */
- val = val.rshiftu (e);
+ val = wi::lrshift (val, e);
}
len = realmax;
}
/* Clear out top bits so elt will work with precisions that aren't
a multiple of HOST_BITS_PER_WIDE_INT. */
- val = val.force_to_size (len, sgn);
+ val = wide_int::from (val, len, sgn);
+ wi::clear_undef (val, sgn);
len = len / HOST_BITS_PER_WIDE_INT;
SET_REAL_EXP (r, len * HOST_BITS_PER_WIDE_INT + e);
@@ -2401,7 +2402,7 @@ real_digit (int n)
gcc_assert (n <= 9);
if (n > 0 && num[n].cl == rvc_zero)
- real_from_integer (&num[n], VOIDmode, wide_int (n), UNSIGNED);
+ real_from_integer (&num[n], VOIDmode, n, UNSIGNED);
return &num[n];
}
diff --git a/gcc/real.h b/gcc/real.h
index 9d93989fa49..336e934209f 100644
--- a/gcc/real.h
+++ b/gcc/real.h
@@ -22,6 +22,8 @@
#include "machmode.h"
#include "signop.h"
+#include "wide-int.h"
+#include "insn-modes.h"
/* An expanded form of the represented number. */
@@ -482,4 +484,13 @@ extern bool real_isinteger (const REAL_VALUE_TYPE *c, enum machine_mode mode);
number, (1 - b**-p) * b**emax for a given FP format FMT as a hex
float string. BUF must be large enough to contain the result. */
extern void get_max_float (const struct real_format *, char *, size_t);
+
+#ifndef GENERATOR_FILE
+/* real related routines. */
+extern wide_int real_to_integer (const REAL_VALUE_TYPE *, bool *, int);
+extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode,
+ const wide_int_ref &, signop);
+extern wide_int decimal_real_to_integer (const REAL_VALUE_TYPE *, bool *, int);
+#endif
+
#endif /* ! GCC_REAL_H */
diff --git a/gcc/rtl.h b/gcc/rtl.h
index de5f8a4d16c..fe78797c6fc 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -1397,82 +1397,82 @@ struct address_info {
bool autoinc_p;
};
-#ifndef GENERATOR_FILE
+/* This is used to bundle an rtx and a mode together so that the pair
+ can be used as the second operand of a wide int expression. If we
+ ever put modes into rtx integer constants, this should go away and
+ then just pass an rtx in. */
+typedef std::pair <rtx, enum machine_mode> rtx_mode_t;
-/* Accessors for rtx_mode. */
-static inline rtx
-get_rtx (const rtx_mode_t p)
+namespace wi
{
- return p.first;
+ template <>
+ struct int_traits <rtx_mode_t>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ static const bool host_dependent_precision = false;
+ static unsigned int get_precision (const rtx_mode_t &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const rtx_mode_t &);
+ };
}
-static inline enum machine_mode
-get_mode (const rtx_mode_t p)
+inline unsigned int
+wi::int_traits <rtx_mode_t>::get_precision (const rtx_mode_t &x)
{
- return p.second;
+ return GET_MODE_PRECISION (x.second);
}
-/* Specialization of to_shwi1 function in wide-int.h for rtl. This
- cannot be in wide-int.h because of circular includes. */
-template<>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, unsigned int *p, const rtx_mode_t& rp)
+inline wi::storage_ref
+wi::int_traits <rtx_mode_t>::decompose (HOST_WIDE_INT *,
+ unsigned int precision,
+ const rtx_mode_t &x)
{
- const rtx rcst = get_rtx (rp);
- enum machine_mode mode = get_mode (rp);
-
- *p = GET_MODE_PRECISION (mode);
-
- switch (GET_CODE (rcst))
+ gcc_checking_assert (precision == get_precision (x));
+ switch (GET_CODE (x.first))
{
case CONST_INT:
- *l = 1;
- return &INTVAL (rcst);
+ return wi::storage_ref (&INTVAL (x.first), 1, precision);
case CONST_WIDE_INT:
- *l = CONST_WIDE_INT_NUNITS (rcst);
- return &CONST_WIDE_INT_ELT (rcst, 0);
+ return wi::storage_ref (&CONST_WIDE_INT_ELT (x.first, 0),
+ CONST_WIDE_INT_NUNITS (x.first), precision);
case CONST_DOUBLE:
- *l = 2;
- return &CONST_DOUBLE_LOW (rcst);
+ return wi::storage_ref (&CONST_DOUBLE_LOW (x.first), 2, precision);
default:
gcc_unreachable ();
}
}
-/* Specialization of to_shwi2 function in wide-int.h for rtl. This
- cannot be in wide-int.h because of circular includes. */
-template<>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, const rtx_mode_t& rp)
+namespace wi
{
- const rtx rcst = get_rtx (rp);
+ hwi_with_prec shwi (HOST_WIDE_INT, enum machine_mode mode);
+ wide_int min_value (enum machine_mode, signop);
+ wide_int max_value (enum machine_mode, signop);
+}
- switch (GET_CODE (rcst))
- {
- case CONST_INT:
- *l = 1;
- return &INTVAL (rcst);
-
- case CONST_WIDE_INT:
- *l = CONST_WIDE_INT_NUNITS (rcst);
- return &CONST_WIDE_INT_ELT (rcst, 0);
-
- case CONST_DOUBLE:
- *l = 2;
- return &CONST_DOUBLE_LOW (rcst);
-
- default:
- gcc_unreachable ();
- }
+inline wi::hwi_with_prec
+wi::shwi (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ return shwi (val, GET_MODE_PRECISION (mode));
}
-#endif
+/* Produce the smallest number that is represented in MODE. The precision
+ is taken from MODE and the sign from SGN. */
+inline wide_int
+wi::min_value (enum machine_mode mode, signop sgn)
+{
+ return min_value (GET_MODE_PRECISION (mode), sgn);
+}
+/* Produce the largest number that is represented in MODE. The precision
+ is taken from MODE and the sign from SGN. */
+inline wide_int
+wi::max_value (enum machine_mode mode, signop sgn)
+{
+ return max_value (GET_MODE_PRECISION (mode), sgn);
+}
extern void init_rtlanal (void);
extern int rtx_cost (rtx, enum rtx_code, int, bool);
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 93b688b04e8..3f263535cb3 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -1670,31 +1670,31 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
break;
case ABS:
- result = op0.abs ();
+ result = wi::abs (op0);
break;
case FFS:
- result = op0.ffs ();
+ result = wi::shwi (wi::ffs (op0), mode);
break;
case CLZ:
- result = op0.clz ();
+ result = wi::shwi (wi::clz (op0), mode);
break;
case CLRSB:
- result = op0.clrsb ();
+ result = wi::shwi (wi::clrsb (op0), mode);
break;
case CTZ:
- result = op0.ctz ();
+ result = wi::shwi (wi::ctz (op0), mode);
break;
case POPCOUNT:
- result = op0.popcount ();
+ result = wi::shwi (wi::popcount (op0), mode);
break;
case PARITY:
- result = op0.parity ();
+ result = wi::shwi (wi::parity (op0), mode);
break;
case BSWAP:
@@ -1702,15 +1702,12 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
break;
case TRUNCATE:
- result = op0.zforce_to_size (width);
- break;
-
case ZERO_EXTEND:
- result = op0.zforce_to_size (width);
+ result = wide_int::from (op0, width, UNSIGNED);
break;
case SIGN_EXTEND:
- result = op0.sforce_to_size (width);
+ result = wide_int::from (op0, width, SIGNED);
break;
case SQRT:
@@ -1796,13 +1793,13 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
return const0_rtx;
/* Test against the signed upper bound. */
- wmax = wide_int::max_value (width, SIGNED);
+ wmax = wi::max_value (width, SIGNED);
real_from_integer (&t, VOIDmode, wmax, SIGNED);
if (REAL_VALUES_LESS (t, x))
return immed_wide_int_const (wmax, mode);
/* Test against the signed lower bound. */
- wmin = wide_int::min_value (width, SIGNED);
+ wmin = wi::min_value (width, SIGNED);
real_from_integer (&t, VOIDmode, wmin, SIGNED);
if (REAL_VALUES_LESS (x, t))
return immed_wide_int_const (wmin, mode);
@@ -1815,7 +1812,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
return const0_rtx;
/* Test against the unsigned upper bound. */
- wmax = wide_int::max_value (width, UNSIGNED);
+ wmax = wi::max_value (width, UNSIGNED);
real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
if (REAL_VALUES_LESS (t, x))
return immed_wide_int_const (wmax, mode);
@@ -2018,12 +2015,12 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
wide_int coeff1;
rtx lhs = op0, rhs = op1;
- coeff0 = wide_int::one (GET_MODE_PRECISION (mode));
- coeff1 = wide_int::one (GET_MODE_PRECISION (mode));
+ coeff0 = wi::one (GET_MODE_PRECISION (mode));
+ coeff1 = wi::one (GET_MODE_PRECISION (mode));
if (GET_CODE (lhs) == NEG)
{
- coeff0 = wide_int::minus_one (GET_MODE_PRECISION (mode));
+ coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
@@ -2037,14 +2034,14 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
{
- coeff0 = wide_int::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
- GET_MODE_PRECISION (mode));
+ coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
+ GET_MODE_PRECISION (mode));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- coeff1 = wide_int::minus_one (GET_MODE_PRECISION (mode));
+ coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
@@ -2058,8 +2055,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
{
- coeff1 = wide_int::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
- GET_MODE_PRECISION (mode));
+ coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
+ GET_MODE_PRECISION (mode));
rhs = XEXP (rhs, 0);
}
@@ -2195,12 +2192,12 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
wide_int negcoeff1;
rtx lhs = op0, rhs = op1;
- coeff0 = wide_int::one (GET_MODE_PRECISION (mode));
- negcoeff1 = wide_int::minus_one (GET_MODE_PRECISION (mode));
+ coeff0 = wi::one (GET_MODE_PRECISION (mode));
+ negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
if (GET_CODE (lhs) == NEG)
{
- coeff0 = wide_int::minus_one (GET_MODE_PRECISION (mode));
+ coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
@@ -2214,14 +2211,14 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& INTVAL (XEXP (lhs, 1)) >= 0
&& INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
{
- coeff0 = wide_int::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
- GET_MODE_PRECISION (mode));
+ coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
+ GET_MODE_PRECISION (mode));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- negcoeff1 = wide_int::one (GET_MODE_PRECISION (mode));
+ negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
@@ -2235,8 +2232,8 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& INTVAL (XEXP (rhs, 1)) >= 0
&& INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
{
- negcoeff1 = wide_int::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
- GET_MODE_PRECISION (mode));
+ negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
+ GET_MODE_PRECISION (mode));
negcoeff1 = -negcoeff1;
rhs = XEXP (rhs, 0);
}
@@ -2402,8 +2399,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
/* Convert multiply by constant power of two into shift. */
if (CONST_SCALAR_INT_P (trueop1))
{
- val = wide_int (std::make_pair (trueop1, mode))
- .exact_log2 ().to_shwi ();
+ val = wi::exact_log2 (std::make_pair (trueop1, mode));
if (val >= 0 && val < GET_MODE_BITSIZE (mode))
return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
}
@@ -3733,25 +3729,25 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
break;
case DIV:
- result = wop0.div_trunc (pop1, SIGNED, &overflow);
+ result = wi::div_trunc (wop0, pop1, SIGNED, &overflow);
if (overflow)
return NULL_RTX;
break;
case MOD:
- result = wop0.mod_trunc (pop1, SIGNED, &overflow);
+ result = wi::mod_trunc (wop0, pop1, SIGNED, &overflow);
if (overflow)
return NULL_RTX;
break;
case UDIV:
- result = wop0.div_trunc (pop1, UNSIGNED, &overflow);
+ result = wi::div_trunc (wop0, pop1, UNSIGNED, &overflow);
if (overflow)
return NULL_RTX;
break;
case UMOD:
- result = wop0.mod_trunc (pop1, UNSIGNED, &overflow);
+ result = wi::mod_trunc (wop0, pop1, UNSIGNED, &overflow);
if (overflow)
return NULL_RTX;
break;
@@ -3769,19 +3765,19 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
break;
case SMIN:
- result = wop0.smin (pop1);
+ result = wi::smin (wop0, pop1);
break;
case SMAX:
- result = wop0.smax (pop1);
+ result = wi::smax (wop0, pop1);
break;
case UMIN:
- result = wop0.umin (pop1);
+ result = wi::umin (wop0, pop1);
break;
case UMAX:
- result = wop0.umax (pop1);
+ result = wi::umax (wop0, pop1);
break;
case LSHIFTRT:
@@ -3791,32 +3787,32 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
case ROTATERT:
{
wide_int wop1 = pop1;
- if (wop1.neg_p ())
+ if (wi::neg_p (wop1))
return NULL_RTX;
if (SHIFT_COUNT_TRUNCATED)
- wop1 = wop1.umod_trunc (width);
+ wop1 = wi::umod_trunc (wop1, width);
switch (code)
{
case LSHIFTRT:
- result = wop0.rshiftu (wop1, bitsize);
+ result = wi::lrshift (wop0, wop1, bitsize);
break;
case ASHIFTRT:
- result = wop0.rshifts (wop1, bitsize);
+ result = wi::arshift (wop0, wop1, bitsize);
break;
case ASHIFT:
- result = wop0.lshift (wop1, bitsize);
+ result = wi::lshift (wop0, wop1, bitsize);
break;
case ROTATE:
- result = wop0.lrotate (wop1);
+ result = wi::lrotate (wop0, wop1);
break;
case ROTATERT:
- result = wop0.rrotate (wop1);
+ result = wi::rrotate (wop0, wop1);
break;
default:
@@ -4652,8 +4648,8 @@ simplify_const_relational_operation (enum rtx_code code,
return comparison_result (code, CMP_EQ);
else
{
- int cr = wo0.lts_p (ptrueop1) ? CMP_LT : CMP_GT;
- cr |= wo0.ltu_p (ptrueop1) ? CMP_LTU : CMP_GTU;
+ int cr = wi::lts_p (wo0, ptrueop1) ? CMP_LT : CMP_GT;
+ cr |= wi::ltu_p (wo0, ptrueop1) ? CMP_LTU : CMP_GTU;
return comparison_result (code, cr);
}
}
@@ -5203,10 +5199,10 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
case CONST_WIDE_INT:
{
wide_int val = std::make_pair (el, innermode);
- unsigned char extend = val.sign_mask ();
+ unsigned char extend = wi::sign_mask (val);
for (i = 0; i < elem_bitsize; i += value_bit)
- *vp++ = val.extract_to_hwi (i, value_bit);
+ *vp++ = wi::extract_uhwi (val, i, value_bit);
for (; i < elem_bitsize; i += value_bit)
*vp++ = extend;
}
@@ -5376,7 +5372,8 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
tmp[u] = buf;
base += HOST_BITS_PER_WIDE_INT;
}
- r = wide_int::from_array (tmp, units, GET_MODE_PRECISION (outer_submode));
+ r = wide_int::from_array (tmp, units,
+ GET_MODE_PRECISION (outer_submode));
elems[elem] = immed_wide_int_const (r, outer_submode);
}
break;
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index a9ee566d604..b816fa215c6 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2208,8 +2208,10 @@ layout_type (tree type)
&& tree_int_cst_lt (ub, lb))
{
unsigned prec = TYPE_PRECISION (TREE_TYPE (lb));
- lb = wide_int_to_tree (ssizetype, addr_wide_int (lb).sext (prec));
- ub = wide_int_to_tree (ssizetype, addr_wide_int (ub).sext (prec));
+ lb = wide_int_to_tree (ssizetype,
+ wi::sext (addr_wide_int (lb), prec));
+ ub = wide_int_to_tree (ssizetype,
+ wi::sext (addr_wide_int (ub), prec));
}
length
= fold_convert (sizetype,
@@ -2514,8 +2516,10 @@ set_min_and_max_values_for_integral_type (tree type,
int precision,
signop sgn)
{
- TYPE_MIN_VALUE (type) = wide_int_to_tree (type, wide_int::min_value (precision, sgn));
- TYPE_MAX_VALUE (type) = wide_int_to_tree (type, wide_int::max_value (precision, sgn));
+ TYPE_MIN_VALUE (type)
+ = wide_int_to_tree (type, wi::min_value (precision, sgn));
+ TYPE_MAX_VALUE (type)
+ = wide_int_to_tree (type, wi::max_value (precision, sgn));
}
/* Set the extreme values of TYPE based on its precision in bits,
diff --git a/gcc/system.h b/gcc/system.h
index b735a96c10b..6be17088534 100644
--- a/gcc/system.h
+++ b/gcc/system.h
@@ -713,6 +713,10 @@ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN;
#define gcc_unreachable() (fancy_abort (__FILE__, __LINE__, __FUNCTION__))
#endif
+/* Until we can use STATIC_ASSERT. */
+#define STATIC_ASSERT(X) \
+ typedef int assertion1[(X) ? 1 : -1] ATTRIBUTE_UNUSED
+
/* Provide a fake boolean type. We make no attempt to use the
C99 _Bool, as it may not be available in the bootstrap compiler,
and even if it is, it is liable to be buggy.
diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c
index ec56f8bab0d..3e83344d8f0 100644
--- a/gcc/tree-affine.c
+++ b/gcc/tree-affine.c
@@ -35,7 +35,7 @@ along with GCC; see the file COPYING3. If not see
max_wide_int
wide_int_ext_for_comb (max_wide_int cst, aff_tree *comb)
{
- return cst.sext (TYPE_PRECISION (comb->type));
+ return wi::sext (cst, TYPE_PRECISION (comb->type));
}
/* Initializes affine combination COMB so that its value is zero in TYPE. */
@@ -80,10 +80,10 @@ aff_combination_scale (aff_tree *comb, max_wide_int scale)
unsigned i, j;
scale = wide_int_ext_for_comb (scale, comb);
- if (scale.one_p ())
+ if (scale == 1)
return;
- if (scale.zero_p ())
+ if (scale == 0)
{
aff_combination_zero (comb, comb->type);
return;
@@ -97,7 +97,7 @@ aff_combination_scale (aff_tree *comb, max_wide_int scale)
new_coef = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb);
/* A coefficient may become zero due to overflow. Remove the zero
elements. */
- if (new_coef.zero_p ())
+ if (new_coef == 0)
continue;
comb->elts[j].coef = new_coef;
comb->elts[j].val = comb->elts[i].val;
@@ -132,7 +132,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, max_wide_int scale)
tree type;
scale = wide_int_ext_for_comb (scale, comb);
- if (scale.zero_p ())
+ if (scale == 0)
return;
for (i = 0; i < comb->n; i++)
@@ -141,7 +141,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, max_wide_int scale)
max_wide_int new_coef;
new_coef = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb);
- if (!new_coef.zero_p ())
+ if (new_coef != 0)
{
comb->elts[i].coef = new_coef;
return;
@@ -172,7 +172,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, max_wide_int scale)
if (POINTER_TYPE_P (type))
type = sizetype;
- if (scale.one_p ())
+ if (scale == 1)
elt = fold_convert (type, elt);
else
elt = fold_build2 (MULT_EXPR, type,
@@ -234,7 +234,7 @@ aff_combination_convert (aff_tree *comb, tree type)
for (i = j = 0; i < comb->n; i++)
{
max_wide_int new_coef = comb->elts[i].coef;
- if (new_coef.zero_p ())
+ if (new_coef == 0)
continue;
comb->elts[j].coef = new_coef;
comb->elts[j].val = fold_convert (type, comb->elts[i].val);
@@ -378,7 +378,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, max_wide_int scale,
scale = wide_int_ext_for_comb (scale, comb);
- if (scale.minus_one_p ()
+ if (scale == -1
&& POINTER_TYPE_P (TREE_TYPE (elt)))
{
elt = convert_to_ptrofftype (elt);
@@ -386,7 +386,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, max_wide_int scale,
scale = max_wide_int (1);
}
- if (scale.one_p ())
+ if (scale == 1)
{
if (!expr)
{
@@ -404,7 +404,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, max_wide_int scale,
expr, fold_convert (type1, elt));
}
- if (scale.minus_one_p ())
+ if (scale == -1)
{
if (!expr)
return fold_build1 (NEGATE_EXPR, type1,
@@ -425,7 +425,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, max_wide_int scale,
return fold_build2 (MULT_EXPR, type1, elt,
wide_int_to_tree (type1, scale));
- if (scale.neg_p ())
+ if (wi::neg_p (scale))
{
code = MINUS_EXPR;
scale = -scale;
@@ -468,7 +468,7 @@ aff_combination_to_tree (aff_tree *comb)
/* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
unsigned. */
- if (comb->offset.neg_p ())
+ if (wi::neg_p (comb->offset))
{
off = -comb->offset;
sgn = -1;
@@ -765,20 +765,19 @@ wide_int_constant_multiple_p (max_wide_int val, max_wide_int div,
{
max_wide_int rem, cst;
- if (val.zero_p ())
+ if (val == 0)
{
- if (*mult_set && !mult->zero_p ())
+ if (*mult_set && mult != 0)
return false;
*mult_set = true;
*mult = 0;
return true;
}
- if (div.zero_p ())
+ if (div == 0)
return false;
- cst = val.sdivmod_floor (div, &rem);
- if (!rem.zero_p ())
+ if (!wi::multiple_of_p (val, div, SIGNED, &cst))
return false;
if (*mult_set && *mult != cst)
@@ -799,7 +798,7 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
bool mult_set = false;
unsigned i;
- if (val->n == 0 && val->offset.zero_p ())
+ if (val->n == 0 && val->offset == 0)
{
*mult = 0;
return true;
@@ -899,8 +898,7 @@ get_inner_reference_aff (tree ref, aff_tree *addr, max_wide_int *size)
aff_combination_add (addr, &tmp);
}
- aff_combination_const (&tmp, sizetype,
- max_wide_int (bitpos / BITS_PER_UNIT));
+ aff_combination_const (&tmp, sizetype, bitpos / BITS_PER_UNIT);
aff_combination_add (addr, &tmp);
*size = (bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
@@ -919,17 +917,17 @@ aff_comb_cannot_overlap_p (aff_tree *diff, const max_wide_int &size1, const max_
return false;
d = diff->offset;
- if (d.neg_p ())
+ if (wi::neg_p (d))
{
/* The second object is before the first one, we succeed if the last
element of the second object is before the start of the first one. */
bound = d + size2 - 1;
- return bound.neg_p ();
+ return wi::neg_p (bound);
}
else
{
/* We succeed if the second object starts after the first one ends. */
- return size1.les_p (d);
+ return wi::les_p (size1, d);
}
}
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index 8aeefb90072..7c3960ecbbe 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -475,17 +475,17 @@ tree_fold_binomial (tree type, tree n, unsigned int k)
num = n;
/* Check that k <= n. */
- if (num.ltu_p (k))
+ if (wi::ltu_p (num, k))
return NULL_TREE;
/* Denominator = 2. */
- denom = wide_int::two (TYPE_PRECISION (TREE_TYPE (n)));
+ denom = wi::two (TYPE_PRECISION (TREE_TYPE (n)));
/* Index = Numerator-1. */
idx = num - 1;
/* Numerator = Numerator*Index = n*(n-1). */
- num = num.smul (idx, &overflow);
+ num = wi::smul (num, idx, &overflow);
if (overflow)
return NULL_TREE;
@@ -495,7 +495,7 @@ tree_fold_binomial (tree type, tree n, unsigned int k)
--idx;
/* Numerator *= Index. */
- num = num.smul (idx, &overflow);
+ num = wi::smul (num, idx, &overflow);
if (overflow)
return NULL_TREE;
@@ -504,7 +504,7 @@ tree_fold_binomial (tree type, tree n, unsigned int k)
}
/* Result = Numerator / Denominator. */
- di_res = num.udiv_trunc (denom);
+ di_res = wi::udiv_trunc (num, denom);
res = wide_int_to_tree (type, di_res);
return int_fits_type_p (res, type) ? res : NULL_TREE;
}
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index f7623a22d64..dad8cbeea60 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -1752,7 +1752,7 @@ max_stmt_executions_tree (struct loop *loop)
if (!max_stmt_executions (loop, &nit))
return chrec_dont_know;
- if (!nit.fits_to_tree_p (unsigned_type_node))
+ if (!wi::fits_to_tree_p (nit, unsigned_type_node))
return chrec_dont_know;
return wide_int_to_tree (unsigned_type_node, nit);
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index 204b57c9202..ec77264950a 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -433,8 +433,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (this_offset && TREE_CODE (this_offset) == INTEGER_CST)
{
addr_wide_int woffset = this_offset;
- woffset = woffset.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ woffset = wi::lshift (woffset,
+ (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
woffset += DECL_FIELD_BIT_OFFSET (field);
bit_offset += woffset;
@@ -455,7 +456,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
tree ssize = TYPE_SIZE_UNIT (stype);
if (tree_fits_shwi_p (fsize)
&& tree_fits_shwi_p (ssize)
- && woffset.fits_shwi_p ())
+ && wi::fits_shwi_p (woffset))
maxsize += ((tree_to_shwi (ssize)
- tree_to_shwi (fsize))
* BITS_PER_UNIT
@@ -474,7 +475,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (maxsize != -1
&& csize
&& tree_fits_uhwi_p (csize)
- && bit_offset.fits_shwi_p ())
+ && wi::fits_shwi_p (bit_offset))
maxsize = tree_to_shwi (csize)
- bit_offset.to_shwi ();
else
@@ -497,11 +498,12 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
TREE_CODE (unit_size) == INTEGER_CST))
{
addr_wide_int woffset
- = (addr_wide_int (index) - low_bound)
- .sext (TYPE_PRECISION (TREE_TYPE (index)));
+ = wi::sext (addr_wide_int (index) - low_bound,
+ TYPE_PRECISION (TREE_TYPE (index)));
woffset *= addr_wide_int (unit_size);
- woffset = woffset.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ woffset = wi::lshift (woffset,
+ (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
bit_offset += woffset;
/* An array ref with a constant index up in the structure
@@ -518,7 +520,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (maxsize != -1
&& asize
&& tree_fits_uhwi_p (asize)
- && bit_offset.fits_shwi_p ())
+ && wi::fits_shwi_p (bit_offset))
maxsize = tree_to_uhwi (asize) - bit_offset.to_shwi ();
else
maxsize = -1;
@@ -549,10 +551,10 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
else
{
addr_wide_int off = mem_ref_offset (exp);
- off = off.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ off = wi::lshift (off, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
off += bit_offset;
- if (off.fits_shwi_p ())
+ if (wi::fits_shwi_p (off))
{
bit_offset = off;
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -579,10 +581,10 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
else
{
addr_wide_int off = mem_ref_offset (exp);
- off = off.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ off = wi::lshift (off, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
off += bit_offset;
- if (off.fits_shwi_p ())
+ if (wi::fits_shwi_p (off))
{
bit_offset = off;
exp = TREE_OPERAND (TMR_BASE (exp), 0);
@@ -599,7 +601,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
}
done:
- if (!bit_offset.fits_shwi_p ())
+ if (!wi::fits_shwi_p (bit_offset))
{
*poffset = 0;
*psize = bitsize;
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index fa14235a9bf..a0cdf829745 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -192,9 +192,9 @@ addr_object_size (struct object_size_info *osi, const_tree ptr,
if (sz != unknown[object_size_type])
{
addr_wide_int dsz = addr_wide_int (sz) - mem_ref_offset (pt_var);
- if (dsz.neg_p ())
+ if (wi::neg_p (dsz))
sz = 0;
- else if (dsz.fits_uhwi_p ())
+ else if (wi::fits_uhwi_p (dsz))
sz = dsz.to_uhwi ();
else
sz = unknown[object_size_type];
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index dbc3bf48098..11c66235eff 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -618,7 +618,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset)
tree_to_aff_combination_expand (DR_OFFSET (dr), type, offset,
&name_expansions);
- aff_combination_const (&delta, type, max_wide_int (DR_INIT (dr)));
+ aff_combination_const (&delta, type, DR_INIT (dr));
aff_combination_add (offset, &delta);
}
@@ -897,7 +897,7 @@ order_drefs (const void *a, const void *b)
{
const dref *const da = (const dref *) a;
const dref *const db = (const dref *) b;
- int offcmp = (*da)->offset.cmps ((*db)->offset);
+ int offcmp = wi::cmps ((*da)->offset, (*db)->offset);
if (offcmp != 0)
return offcmp;
@@ -921,14 +921,14 @@ add_ref_to_chain (chain_p chain, dref ref)
dref root = get_chain_root (chain);
max_wide_int dist;
- gcc_assert (root->offset.les_p (ref->offset));
+ gcc_assert (wi::les_p (root->offset, ref->offset));
dist = ref->offset - root->offset;
- if (max_wide_int::from_uhwi (MAX_DISTANCE).leu_p (dist))
+ if (wi::leu_p (MAX_DISTANCE, dist))
{
free (ref);
return;
}
- gcc_assert (dist.fits_uhwi_p ());
+ gcc_assert (wi::fits_uhwi_p (dist));
chain->refs.safe_push (ref);
@@ -1194,7 +1194,7 @@ determine_roots_comp (struct loop *loop,
FOR_EACH_VEC_ELT (comp->refs, i, a)
{
if (!chain || DR_IS_WRITE (a->ref)
- || max_wide_int (MAX_DISTANCE).leu_p (a->offset - last_ofs))
+ || wi::leu_p (MAX_DISTANCE, a->offset - last_ofs))
{
if (nontrivial_chain_p (chain))
{
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index e6fb0370fac..dae0b9c6fd2 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -1071,7 +1071,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
{
wide_int val = node;
- if (val.neg_p (TYPE_SIGN (TREE_TYPE (node))))
+ if (wi::neg_p (val, TYPE_SIGN (TREE_TYPE (node))))
{
pp_minus (buffer);
val = -val;
@@ -1324,7 +1324,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
tree field, val;
bool is_struct_init = false;
bool is_array_init = false;
- wide_int curidx = 0;
+ max_wide_int curidx;
pp_left_brace (buffer);
if (TREE_CLOBBER_P (node))
pp_string (buffer, "CLOBBER");
@@ -1339,7 +1339,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
{
tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node)));
is_array_init = true;
- curidx = max_wide_int (minv);
+ curidx = minv;
}
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val)
{
@@ -1353,7 +1353,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
}
else if (is_array_init
&& (TREE_CODE (field) != INTEGER_CST
- || max_wide_int (field) != curidx))
+ || curidx != field))
{
pp_left_bracket (buffer);
if (TREE_CODE (field) == RANGE_EXPR)
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index 94fe59f6ce7..d2a17e825df 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -195,8 +195,8 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as,
if (addr->offset && !integer_zerop (addr->offset))
{
- addr_wide_int dc = addr_wide_int (addr->offset)
- .sext (TYPE_PRECISION (TREE_TYPE (addr->offset)));
+ addr_wide_int dc = wi::sext (addr_wide_int (addr->offset),
+ TYPE_PRECISION (TREE_TYPE (addr->offset)));
off = immed_wide_int_const (dc, pointer_mode);
}
else
@@ -395,7 +395,7 @@ move_fixed_address_to_symbol (struct mem_address *parts, aff_tree *addr)
for (i = 0; i < addr->n; i++)
{
- if (!addr->elts[i].coef.one_p ())
+ if (addr->elts[i].coef != 1)
continue;
val = addr->elts[i].val;
@@ -423,7 +423,7 @@ move_hint_to_base (tree type, struct mem_address *parts, tree base_hint,
for (i = 0; i < addr->n; i++)
{
- if (!addr->elts[i].coef.one_p ())
+ if (addr->elts[i].coef != 1)
continue;
val = addr->elts[i].val;
@@ -455,7 +455,7 @@ move_pointer_to_base (struct mem_address *parts, aff_tree *addr)
for (i = 0; i < addr->n; i++)
{
- if (!addr->elts[i].coef.one_p ())
+ if (addr->elts[i].coef != 1)
continue;
val = addr->elts[i].val;
@@ -543,7 +543,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
best_mult = 0;
for (i = 0; i < addr->n; i++)
{
- if (!addr->elts[i].coef.fits_shwi_p ())
+ if (!wi::fits_shwi_p (addr->elts[i].coef))
continue;
coef = addr->elts[i].coef.to_shwi ();
@@ -556,7 +556,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
if (acost > best_mult_cost)
{
best_mult_cost = acost;
- best_mult = addr_wide_int::from_wide_int (addr->elts[i].coef);
+ best_mult = addr_wide_int::from (addr->elts[i].coef, SIGNED);
}
}
@@ -566,8 +566,8 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
/* Collect elements multiplied by best_mult. */
for (i = j = 0; i < addr->n; i++)
{
- amult = addr_wide_int::from_wide_int (addr->elts[i].coef);
- amult_neg = -amult.sext (TYPE_PRECISION (addr->type));
+ amult = addr_wide_int::from (addr->elts[i].coef, SIGNED);
+ amult_neg = -wi::sext (amult, TYPE_PRECISION (addr->type));
if (amult == best_mult)
op_code = PLUS_EXPR;
@@ -619,7 +619,7 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand,
parts->index = NULL_TREE;
parts->step = NULL_TREE;
- if (!addr->offset.zero_p ())
+ if (addr->offset != 0)
parts->offset = wide_int_to_tree (sizetype, addr->offset);
else
parts->offset = NULL_TREE;
@@ -651,7 +651,7 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand,
for (i = 0; i < addr->n; i++)
{
part = fold_convert (sizetype, addr->elts[i].val);
- if (!addr->elts[i].coef.one_p ())
+ if (addr->elts[i].coef != 1)
part = fold_build2 (MULT_EXPR, sizetype, part,
wide_int_to_tree (sizetype, addr->elts[i].coef));
add_to_parts (parts, part);
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index 8a53fa72775..8ed18e2e604 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -882,8 +882,9 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
moff = mem_ref_offset (base1);
- moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
- if (moff.neg_p ())
+ moff = wi::lshift (moff, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
+ if (wi::neg_p (moff))
offset2p += (-moff).to_short_addr ();
else
offset1p += moff.to_short_addr ();
@@ -958,8 +959,9 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
|| TREE_CODE (dbase2) == TARGET_MEM_REF)
{
addr_wide_int moff = mem_ref_offset (dbase2);
- moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
- if (moff.neg_p ())
+ moff = wi::lshift (moff, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
+ if (wi::neg_p (moff))
doffset1 -= (-moff).to_short_addr ();
else
doffset2 -= moff.to_short_addr ();
@@ -1052,14 +1054,16 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
moff = mem_ref_offset (base1);
- moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
- if (moff.neg_p ())
+ moff = wi::lshift (moff, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
+ if (wi::neg_p (moff))
offset2 += (-moff).to_short_addr ();
else
offset1 += moff.to_shwi ();
moff = mem_ref_offset (base2);
- moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
- if (moff.neg_p ())
+ moff = wi::lshift (moff, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
+ if (wi::neg_p (moff))
offset1 += (-moff).to_short_addr ();
else
offset2 += moff.to_short_addr ();
@@ -2006,14 +2010,14 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref)
TREE_OPERAND (ref->base, 1)))
{
addr_wide_int off1 = mem_ref_offset (base);
- off1 = off1.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ off1 = wi::lshift (off1, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
off1 += offset;
addr_wide_int off2 = mem_ref_offset (ref->base);
- off2 = off2.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ off2 = wi::lshift (off2, (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
off2 += ref_offset;
- if (off1.fits_shwi_p () && off2.fits_shwi_p ())
+ if (wi::fits_shwi_p (off1) && wi::fits_shwi_p (off2))
{
offset = off1.to_shwi ();
ref_offset = off2.to_shwi ();
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 5320a909d07..556021b92d3 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -196,15 +196,14 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
break;
case CONSTANT:
if (TREE_CODE (val.value) != INTEGER_CST
- || val.mask.zero_p ())
+ || val.mask == 0)
{
fprintf (outf, "%sCONSTANT ", prefix);
print_generic_expr (outf, val.value, dump_flags);
}
else
{
- wide_int cval = (max_wide_int (val.value)
- .and_not (val.mask));
+ wide_int cval = wi::bit_and_not (val.value, val.mask);
fprintf (outf, "%sCONSTANT ", prefix);
print_hex (cval, outf);
fprintf (outf, " (");
@@ -343,7 +342,7 @@ get_constant_value (tree var)
if (val
&& val->lattice_val == CONSTANT
&& (TREE_CODE (val->value) != INTEGER_CST
- || val->mask.zero_p ()))
+ || val->mask == 0))
return val->value;
return NULL_TREE;
}
@@ -434,8 +433,8 @@ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
/* Bit-lattices have to agree in the still valid bits. */
if (TREE_CODE (old_val.value) == INTEGER_CST
&& TREE_CODE (new_val.value) == INTEGER_CST)
- return (max_wide_int (old_val.value).and_not (new_val.mask)
- == max_wide_int (new_val.value).and_not (new_val.mask));
+ return (wi::bit_and_not (old_val.value, new_val.mask)
+ == wi::bit_and_not (new_val.value, new_val.mask));
/* Otherwise constant values have to agree. */
return operand_equal_p (old_val.value, new_val.value, 0);
@@ -460,7 +459,7 @@ set_lattice_value (tree var, prop_value_t new_val)
&& TREE_CODE (new_val.value) == INTEGER_CST
&& TREE_CODE (old_val->value) == INTEGER_CST)
{
- max_wide_int diff = max_wide_int (new_val.value) ^ old_val->value;
+ max_wide_int diff = wi::bit_xor (new_val.value, old_val->value);
new_val.mask = new_val.mask | old_val->mask | diff;
}
@@ -527,11 +526,11 @@ get_value_from_alignment (tree expr)
get_pointer_alignment_1 (expr, &align, &bitpos);
val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
- ? max_wide_int::mask (TYPE_PRECISION (type), false)
+ ? wi::mask <max_wide_int> (TYPE_PRECISION (type), false)
: -1).and_not (align / BITS_PER_UNIT - 1);
- val.lattice_val = val.mask.minus_one_p () ? VARYING : CONSTANT;
+ val.lattice_val = val.mask == -1 ? VARYING : CONSTANT;
if (val.lattice_val == CONSTANT)
- val.value = wide_int_to_tree (type, bitpos / BITS_PER_UNIT);
+ val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
else
val.value = NULL_TREE;
@@ -910,9 +909,8 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
For INTEGER_CSTs mask unequal bits. If no equal bits remain,
drop to varying. */
val1->mask = (val1->mask | val2->mask
- | (max_wide_int (val1->value)
- ^ val2->value));
- if (val1->mask.minus_one_p ())
+ | (wi::bit_xor (val1->value, val2->value)));
+ if (val1->mask == -1)
{
val1->lattice_val = VARYING;
val1->value = NULL_TREE;
@@ -1126,13 +1124,13 @@ bit_value_unop_1 (enum tree_code code, tree type,
/* First extend mask and value according to the original type. */
sgn = TYPE_SIGN (rtype);
- *mask = rmask.ext (TYPE_PRECISION (rtype), sgn);
- *val = rval.ext (TYPE_PRECISION (rtype), sgn);
+ *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn);
+ *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn);
/* Then extend mask and value according to the target type. */
sgn = TYPE_SIGN (type);
- *mask = (*mask).ext (TYPE_PRECISION (type), sgn);
- *val = (*val).ext (TYPE_PRECISION (type), sgn);
+ *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn);
+ *val = wi::ext (*val, TYPE_PRECISION (type), sgn);
break;
}
@@ -1184,17 +1182,17 @@ bit_value_binop_1 (enum tree_code code, tree type,
case LROTATE_EXPR:
case RROTATE_EXPR:
- if (r2mask.zero_p ())
+ if (r2mask == 0)
{
wide_int shift = r2val;
- if (shift.zero_p ())
+ if (shift == 0)
{
*mask = r1mask;
*val = r1val;
}
else
{
- if (shift.neg_p ())
+ if (wi::neg_p (shift))
{
shift = -shift;
if (code == RROTATE_EXPR)
@@ -1204,13 +1202,13 @@ bit_value_binop_1 (enum tree_code code, tree type,
}
if (code == RROTATE_EXPR)
{
- *mask = r1mask.rrotate (shift, width);
- *val = r1val.rrotate (shift, width);
+ *mask = wi::rrotate (r1mask, shift, width);
+ *val = wi::rrotate (r1val, shift, width);
}
else
{
- *mask = r1mask.lrotate (shift, width);
- *val = r1val.lrotate (shift, width);
+ *mask = wi::lrotate (r1mask, shift, width);
+ *val = wi::lrotate (r1val, shift, width);
}
}
}
@@ -1221,17 +1219,17 @@ bit_value_binop_1 (enum tree_code code, tree type,
/* ??? We can handle partially known shift counts if we know
its sign. That way we can tell that (x << (y | 8)) & 255
is zero. */
- if (r2mask.zero_p ())
+ if (r2mask == 0)
{
wide_int shift = r2val;
- if (shift.zero_p ())
+ if (shift == 0)
{
*mask = r1mask;
*val = r1val;
}
else
{
- if (shift.neg_p ())
+ if (wi::neg_p (shift))
{
shift = -shift;
if (code == RSHIFT_EXPR)
@@ -1241,13 +1239,13 @@ bit_value_binop_1 (enum tree_code code, tree type,
}
if (code == RSHIFT_EXPR)
{
- *mask = r1mask.ext (width, sgn).rshift (shift, sgn);
- *val = r1val.ext (width, sgn).rshift (shift, sgn);
+ *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
+ *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
}
else
{
- *mask = r1mask.lshift (shift).sext (width);
- *val = r1val.lshift (shift).sext (width);
+ *mask = wi::sext (wi::lshift (r1mask, shift), width);
+ *val = wi::sext (wi::lshift (r1val, shift), width);
}
}
}
@@ -1260,17 +1258,17 @@ bit_value_binop_1 (enum tree_code code, tree type,
/* Do the addition with unknown bits set to zero, to give carry-ins of
zero wherever possible. */
lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
- lo = lo.ext (width, sgn);
+ lo = wi::ext (lo, width, sgn);
/* Do the addition with unknown bits set to one, to give carry-ins of
one wherever possible. */
hi = (r1val | r1mask) + (r2val | r2mask);
- hi = hi.ext (width, sgn);
+ hi = wi::ext (hi, width, sgn);
/* Each bit in the result is known if (a) the corresponding bits in
both inputs are known, and (b) the carry-in to that bit position
is known. We can check condition (b) by seeing if we got the same
result with minimised carries as with maximised carries. */
*mask = r1mask | r2mask | (lo ^ hi);
- *mask = (*mask).ext (width, sgn);
+ *mask = wi::ext (*mask, width, sgn);
/* It shouldn't matter whether we choose lo or hi here. */
*val = lo;
break;
@@ -1291,8 +1289,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
{
/* Just track trailing zeros in both operands and transfer
them to the other. */
- int r1tz = (r1val | r1mask).ctz ().to_shwi ();
- int r2tz = (r2val | r2mask).ctz ().to_shwi ();
+ int r1tz = wi::ctz (r1val | r1mask);
+ int r2tz = wi::ctz (r2val | r2mask);
if (r1tz + r2tz >= width)
{
*mask = 0;
@@ -1300,7 +1298,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
}
else if (r1tz + r2tz > 0)
{
- *mask = max_wide_int::mask (r1tz + r2tz, true).ext (width, sgn);
+ *mask = wi::ext (wi::mask <max_wide_int> (r1tz + r2tz, true),
+ width, sgn);
*val = 0;
}
break;
@@ -1348,7 +1347,7 @@ bit_value_binop_1 (enum tree_code code, tree type,
o2mask = r2mask;
}
/* If the most significant bits are not known we know nothing. */
- if (o1mask.neg_p () || o2mask.neg_p ())
+ if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
break;
/* For comparisons the signedness is in the comparison operands. */
@@ -1357,8 +1356,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
/* If we know the most significant bits we know the values
value ranges by means of treating varying bits as zero
or one. Do a cross comparison of the max/min pairs. */
- maxmin = (o1val | o1mask).cmp (o2val.and_not (o2mask), sgn);
- minmax = o1val.and_not (o1mask).cmp (o2val | o2mask, sgn);
+ maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
+ minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
if (maxmin < 0) /* o1 is less than o2. */
{
*mask = 0;
@@ -1404,10 +1403,10 @@ bit_value_unop (enum tree_code code, tree type, tree rhs)
gcc_assert ((rval.lattice_val == CONSTANT
&& TREE_CODE (rval.value) == INTEGER_CST)
- || rval.mask.minus_one_p ());
+ || rval.mask == -1);
bit_value_unop_1 (code, type, &value, &mask,
TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask);
- if (!mask.minus_one_p ())
+ if (mask != -1)
{
val.lattice_val = CONSTANT;
val.mask = mask;
@@ -1445,10 +1444,10 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
gcc_assert ((r1val.lattice_val == CONSTANT
&& TREE_CODE (r1val.value) == INTEGER_CST)
- || r1val.mask.minus_one_p ());
+ || r1val.mask == -1);
gcc_assert ((r2val.lattice_val == CONSTANT
&& TREE_CODE (r2val.value) == INTEGER_CST)
- || r2val.mask.minus_one_p ());
+ || r2val.mask == -1);
bit_value_binop_1 (code, type, &value, &mask,
TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask,
TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask);
@@ -1486,7 +1485,7 @@ bit_value_assume_aligned (gimple stmt)
return ptrval;
gcc_assert ((ptrval.lattice_val == CONSTANT
&& TREE_CODE (ptrval.value) == INTEGER_CST)
- || ptrval.mask.minus_one_p ());
+ || ptrval.mask == -1);
align = gimple_call_arg (stmt, 1);
if (!tree_fits_uhwi_p (align))
return ptrval;
@@ -1508,7 +1507,7 @@ bit_value_assume_aligned (gimple stmt)
bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
type, value_to_wide_int (ptrval), ptrval.mask,
type, value_to_wide_int (alignval), alignval.mask);
- if (!mask.minus_one_p ())
+ if (mask != -1)
{
val.lattice_val = CONSTANT;
val.mask = mask;
@@ -1670,8 +1669,8 @@ evaluate_stmt (gimple stmt)
case BUILT_IN_STRNDUP:
val.lattice_val = CONSTANT;
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
- val.mask = max_wide_int (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
- / BITS_PER_UNIT - 1));
+ val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
+ / BITS_PER_UNIT - 1);
break;
case BUILT_IN_ALLOCA:
@@ -1681,8 +1680,7 @@ evaluate_stmt (gimple stmt)
: BIGGEST_ALIGNMENT);
val.lattice_val = CONSTANT;
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
- val.mask = max_wide_int (~(((HOST_WIDE_INT) align)
- / BITS_PER_UNIT - 1));
+ val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
break;
/* These builtins return their first argument, unmodified. */
@@ -1901,7 +1899,7 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
fold more conditionals here. */
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || !val.mask.zero_p ())
+ || val.mask != 0)
return false;
if (dump_file)
@@ -2081,7 +2079,7 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p)
block = gimple_bb (stmt);
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || !val.mask.zero_p ())
+ || val.mask != 0)
return SSA_PROP_VARYING;
/* Find which edge out of the conditional block will be taken and add it
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 863a3ef18af..d6623f2e237 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -2936,8 +2936,9 @@ combine_conversions (gimple_stmt_iterator *gsi)
tem = fold_build2 (BIT_AND_EXPR, inside_type,
defop0,
wide_int_to_tree
- (inside_type, wide_int::mask (inter_prec, false,
- TYPE_PRECISION (inside_type))));
+ (inside_type,
+ wi::mask (inter_prec, false,
+ TYPE_PRECISION (inside_type))));
if (!useless_type_conversion_p (type, inside_type))
{
tem = force_gimple_operand_gsi (gsi, tem, true, NULL_TREE, true,
diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c
index e203caf9536..101130188d6 100644
--- a/gcc/tree-ssa-loop-ivcanon.c
+++ b/gcc/tree-ssa-loop-ivcanon.c
@@ -488,7 +488,7 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
into unreachable (or trap when debugging experience is supposed
to be good). */
if (!elt->is_exit
- && elt->bound.ltu_p (max_wide_int (npeeled)))
+ && wi::ltu_p (elt->bound, npeeled))
{
gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
gimple stmt = gimple_build_call
@@ -505,7 +505,7 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
}
/* If we know the exit will be taken after peeling, update. */
else if (elt->is_exit
- && elt->bound.leu_p (max_wide_int (npeeled)))
+ && wi::leu_p (elt->bound, npeeled))
{
basic_block bb = gimple_bb (elt->stmt);
edge exit_edge = EDGE_SUCC (bb, 0);
@@ -545,7 +545,7 @@ remove_redundant_iv_tests (struct loop *loop)
/* Exit is pointless if it won't be taken before loop reaches
upper bound. */
if (elt->is_exit && loop->any_upper_bound
- && loop->nb_iterations_upper_bound.ltu_p (elt->bound))
+ && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
{
basic_block bb = gimple_bb (elt->stmt);
edge exit_edge = EDGE_SUCC (bb, 0);
@@ -562,7 +562,7 @@ remove_redundant_iv_tests (struct loop *loop)
|| !integer_zerop (niter.may_be_zero)
|| !niter.niter
|| TREE_CODE (niter.niter) != INTEGER_CST
- || !loop->nb_iterations_upper_bound.ltu_p (niter.niter))
+ || !wi::ltu_p (loop->nb_iterations_upper_bound, niter.niter))
continue;
if (dump_file && (dump_flags & TDF_DETAILS))
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index d56874b13ca..5fa1804f52c 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -1616,7 +1616,7 @@ constant_multiple_of (tree top, tree bot, max_wide_int *mul)
if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res))
return false;
- *mul = (res * mby).sext (precision);
+ *mul = wi::sext (res * mby, precision);
return true;
case PLUS_EXPR:
@@ -1627,19 +1627,19 @@ constant_multiple_of (tree top, tree bot, max_wide_int *mul)
if (code == MINUS_EXPR)
p1 = -p1;
- *mul = (p0 + p1).sext (precision);
+ *mul = wi::sext (p0 + p1, precision);
return true;
case INTEGER_CST:
if (TREE_CODE (bot) != INTEGER_CST)
return false;
- p0 = max_wide_int (top).sext (precision);
- p1 = max_wide_int (bot).sext (precision);
- if (p1.zero_p ())
+ p0 = wi::sext (top, precision);
+ p1 = wi::sext (bot, precision);
+ if (p1 == 0)
return false;
- *mul = p0.sdivmod_floor (p1, &res).sext (precision);
- return res.zero_p ();
+ *mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision);
+ return res == 0;
default:
return false;
@@ -4053,7 +4053,7 @@ get_computation_cost_at (struct ivopts_data *data,
if (!constant_multiple_of (ustep, cstep, &rat))
return infinite_cost;
- if (rat.fits_shwi_p ())
+ if (wi::fits_shwi_p (rat))
ratio = rat.to_shwi ();
else
return infinite_cost;
@@ -4567,7 +4567,7 @@ iv_elimination_compare_lt (struct ivopts_data *data,
aff_combination_scale (&tmpa, -1);
aff_combination_add (&tmpb, &tmpa);
aff_combination_add (&tmpb, &nit);
- if (tmpb.n != 0 || !tmpb.offset.one_p ())
+ if (tmpb.n != 0 || tmpb.offset != 1)
return false;
/* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
@@ -4659,7 +4659,7 @@ may_eliminate_iv (struct ivopts_data *data,
if (stmt_after_increment (loop, cand, use->stmt))
max_niter += 1;
period_value = period;
- if (max_niter.gtu_p (period_value))
+ if (wi::gtu_p (max_niter, period_value))
{
/* See if we can take advantage of inferred loop bound information. */
if (data->loop_single_exit_p)
@@ -4667,7 +4667,7 @@ may_eliminate_iv (struct ivopts_data *data,
if (!max_loop_iterations (loop, &max_niter))
return false;
/* The loop bound is already adjusted by adding 1. */
- if (max_niter.gtu_p (period_value))
+ if (wi::gtu_p (max_niter, period_value))
return false;
}
else
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 1a6cce10a59..49d2e3d0eff 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -91,8 +91,8 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
*var = op0;
off = op1;
/* Always sign extend the offset. */
- off = off.sext (TYPE_PRECISION (type));
- off.to_mpz (offset, SIGNED);
+ off = wi::sext (off, TYPE_PRECISION (type));
+ wi::to_mpz (off, offset, SIGNED);
if (negate)
mpz_neg (offset, offset);
break;
@@ -100,7 +100,7 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
case INTEGER_CST:
*var = build_int_cst_type (type, 0);
off = expr;
- off.to_mpz (offset, TYPE_SIGN (type));
+ wi::to_mpz (off, offset, TYPE_SIGN (type));
break;
default:
@@ -170,7 +170,7 @@ bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
}
mpz_init (m);
- wide_int::minus_one (TYPE_PRECISION (type)).to_mpz (m, UNSIGNED);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
mpz_add_ui (m, m, 1);
mpz_sub (bnds->up, x, y);
mpz_set (bnds->below, bnds->up);
@@ -454,10 +454,10 @@ bounds_add (bounds *bnds, max_wide_int delta, tree type)
mpz_t mdelta, max;
mpz_init (mdelta);
- delta.to_mpz (mdelta, SIGNED);
+ wi::to_mpz (delta, mdelta, SIGNED);
mpz_init (max);
- max_wide_int::minus_one (TYPE_PRECISION (type)).to_mpz (max, UNSIGNED);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
mpz_add (bnds->up, bnds->up, mdelta);
mpz_add (bnds->below, bnds->below, mdelta);
@@ -560,7 +560,7 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
if (integer_onep (s)
|| (TREE_CODE (c) == INTEGER_CST
&& TREE_CODE (s) == INTEGER_CST
- && wide_int (c).mod_trunc (s, TYPE_SIGN (type)).zero_p ())
+ && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
|| (TYPE_OVERFLOW_UNDEFINED (type)
&& multiple_of_p (type, c, s)))
{
@@ -579,16 +579,15 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
the whole # of iterations analysis will fail). */
if (!no_overflow)
{
- max = max_wide_int::mask (TYPE_PRECISION (type)
- - max_wide_int (s).ctz ().to_uhwi (),
- false);
- max.to_mpz (bnd, UNSIGNED);
+ max = wi::mask <max_wide_int> (TYPE_PRECISION (type) - wi::ctz (s),
+ false);
+ wi::to_mpz (max, bnd, UNSIGNED);
return;
}
/* Now we know that the induction variable does not overflow, so the loop
iterates at most (range of type / S) times. */
- wide_int::minus_one (TYPE_PRECISION (type)).to_mpz (bnd, UNSIGNED);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
/* If the induction variable is guaranteed to reach the value of C before
overflow, ... */
@@ -597,13 +596,13 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
/* ... then we can strengthen this to C / S, and possibly we can use
the upper bound on C given by BNDS. */
if (TREE_CODE (c) == INTEGER_CST)
- wide_int (c).to_mpz (bnd, UNSIGNED);
+ wi::to_mpz (c, bnd, UNSIGNED);
else if (bnds_u_valid)
mpz_set (bnd, bnds->up);
}
mpz_init (d);
- wide_int (s).to_mpz (d, UNSIGNED);
+ wi::to_mpz (s, d, UNSIGNED);
mpz_fdiv_q (bnd, bnd, d);
mpz_clear (d);
}
@@ -654,7 +653,8 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final,
mpz_init (max);
number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
exit_must_be_taken);
- niter->max = max_wide_int::from_mpz (niter_type, max, false);
+ niter->max = max_wide_int::from (wi::from_mpz (niter_type, max, false),
+ TYPE_SIGN (niter_type));
mpz_clear (max);
/* First the trivial cases -- when the step is 1. */
@@ -727,7 +727,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
tmod = fold_convert (type1, mod);
mpz_init (mmod);
- wide_int (mod).to_mpz (mmod, UNSIGNED);
+ wi::to_mpz (mod, mmod, UNSIGNED);
mpz_neg (mmod, mmod);
/* If the induction variable does not overflow and the exit is taken,
@@ -809,7 +809,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
niter->may_be_zero,
noloop);
- bounds_add (bnds, max_wide_int (mod), type);
+ bounds_add (bnds, mod, type);
*delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
ret = true;
@@ -928,19 +928,19 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
dstep = iv0->step;
else
{
- dstep = max_wide_int (iv1->step).sext (TYPE_PRECISION (type));
+ dstep = wi::sext (iv1->step, TYPE_PRECISION (type));
dstep = -dstep;
}
mpz_init (mstep);
- dstep.to_mpz (mstep, UNSIGNED);
+ wi::to_mpz (dstep, mstep, UNSIGNED);
mpz_neg (mstep, mstep);
mpz_add_ui (mstep, mstep, 1);
rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
mpz_init (max);
- wide_int::minus_one (TYPE_PRECISION (type)).to_mpz (max, UNSIGNED);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
mpz_add (max, max, mstep);
no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
/* For pointers, only values lying inside a single object
@@ -1067,7 +1067,9 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
iv1->base, iv0->base);
niter->niter = delta;
- niter->max = max_wide_int::from_mpz (niter_type, bnds->up, false);
+ niter->max = max_wide_int::from (wi::from_mpz (niter_type, bnds->up,
+ false),
+ TYPE_SIGN (niter_type));
return true;
}
@@ -1110,11 +1112,12 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
mpz_init (mstep);
mpz_init (tmp);
- wide_int (step).to_mpz (mstep, UNSIGNED);
+ wi::to_mpz (step, mstep, UNSIGNED);
mpz_add (tmp, bnds->up, mstep);
mpz_sub_ui (tmp, tmp, 1);
mpz_fdiv_q (tmp, tmp, mstep);
- niter->max = max_wide_int::from_mpz (niter_type, tmp, false);
+ niter->max = max_wide_int::from (wi::from_mpz (niter_type, tmp, false),
+ TYPE_SIGN (niter_type));
mpz_clear (mstep);
mpz_clear (tmp);
@@ -2388,7 +2391,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
switch (code)
{
case INTEGER_CST:
- return max_wide_int (op0);
+ return op0;
CASE_CONVERT:
subtype = TREE_TYPE (op0);
@@ -2410,7 +2413,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* If the bound does not fit in TYPE, max. value of TYPE could be
attained. */
- if (max.ltu_p (bnd))
+ if (wi::ltu_p (max, bnd))
return max;
return bnd;
@@ -2426,24 +2429,24 @@ derive_constant_upper_bound_ops (tree type, tree op0,
choose the most logical way how to treat this constant regardless
of the signedness of the type. */
cst = op1;
- cst = cst.sext (TYPE_PRECISION (type));
+ cst = wi::sext (cst, TYPE_PRECISION (type));
if (code != MINUS_EXPR)
cst = -cst;
bnd = derive_constant_upper_bound (op0);
- if (cst.neg_p ())
+ if (wi::neg_p (cst))
{
cst = -cst;
/* Avoid CST == 0x80000... */
- if (cst.neg_p ())
+ if (wi::neg_p (cst))
return max;;
/* OP0 + CST. We need to check that
BND <= MAX (type) - CST. */
mmax -= cst;
- if (bnd.ltu_p (mmax))
+ if (wi::ltu_p (bnd, max))
return max;
return bnd + cst;
@@ -2463,7 +2466,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* This should only happen if the type is unsigned; however, for
buggy programs that use overflowing signed arithmetics even with
-fno-wrapv, this condition may also be true for signed values. */
- if (bnd.ltu_p (cst))
+ if (wi::ltu_p (bnd, cst))
return max;
if (TYPE_UNSIGNED (type))
@@ -2486,7 +2489,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
return max;
bnd = derive_constant_upper_bound (op0);
- return bnd.udiv_floor (max_wide_int (op1));
+ return wi::udiv_floor (bnd, op1);
case BIT_AND_EXPR:
if (TREE_CODE (op1) != INTEGER_CST
@@ -2519,14 +2522,14 @@ record_niter_bound (struct loop *loop, const max_wide_int &i_bound,
current estimation is smaller. */
if (upper
&& (!loop->any_upper_bound
- || i_bound.ltu_p (loop->nb_iterations_upper_bound)))
+ || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
{
loop->any_upper_bound = true;
loop->nb_iterations_upper_bound = i_bound;
}
if (realistic
&& (!loop->any_estimate
- || i_bound.ltu_p (loop->nb_iterations_estimate)))
+ || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
{
loop->any_estimate = true;
loop->nb_iterations_estimate = i_bound;
@@ -2536,7 +2539,8 @@ record_niter_bound (struct loop *loop, const max_wide_int &i_bound,
number of iterations, use the upper bound instead. */
if (loop->any_upper_bound
&& loop->any_estimate
- && loop->nb_iterations_upper_bound.ltu_p (loop->nb_iterations_estimate))
+ && wi::ltu_p (loop->nb_iterations_upper_bound,
+ loop->nb_iterations_estimate))
loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
}
@@ -2557,7 +2561,7 @@ do_warn_aggressive_loop_optimizations (struct loop *loop,
|| loop->warned_aggressive_loop_optimizations
/* Only warn if undefined behavior gives us lower estimate than the
known constant bound. */
- || i_bound.cmpu (loop->nb_iterations) >= 0
+ || wi::cmpu (i_bound, loop->nb_iterations) >= 0
/* And undefined behavior happens unconditionally. */
|| !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
return;
@@ -2605,7 +2609,7 @@ record_estimate (struct loop *loop, tree bound, max_wide_int i_bound,
if (TREE_CODE (bound) != INTEGER_CST)
realistic = false;
else
- gcc_checking_assert (i_bound == max_wide_int (bound));
+ gcc_checking_assert (i_bound == bound);
if (!upper && !realistic)
return;
@@ -2642,7 +2646,7 @@ record_estimate (struct loop *loop, tree bound, max_wide_int i_bound,
i_bound += delta;
/* If an overflow occurred, ignore the result. */
- if (i_bound.ltu_p (delta))
+ if (wi::ltu_p (i_bound, delta))
return;
if (upper && !is_exit)
@@ -3031,7 +3035,7 @@ wide_int_cmp (const void *p1, const void *p2)
{
const max_wide_int *d1 = (const max_wide_int *)p1;
const max_wide_int *d2 = (const max_wide_int *)p2;
- return (*d1).cmpu (*d2);
+ return wi::cmpu (*d1, *d2);
}
/* Return index of BOUND in BOUNDS array sorted in increasing order.
@@ -3051,7 +3055,7 @@ bound_index (vec<max_wide_int> bounds, const max_wide_int &bound)
if (index == bound)
return middle;
- else if (index.ltu_p (bound))
+ else if (wi::ltu_p (index, bound))
begin = middle + 1;
else
end = middle;
@@ -3088,12 +3092,12 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
{
bound += 1;
/* If an overflow occurred, ignore the result. */
- if (bound.zero_p ())
+ if (bound == 0)
continue;
}
if (!loop->any_upper_bound
- || bound.ltu_p (loop->nb_iterations_upper_bound))
+ || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
bounds.safe_push (bound);
}
@@ -3119,12 +3123,12 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
{
bound += 1;
/* If an overflow occurred, ignore the result. */
- if (bound.zero_p ())
+ if (bound == 0)
continue;
}
if (!loop->any_upper_bound
- || bound.ltu_p (loop->nb_iterations_upper_bound))
+ || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
{
ptrdiff_t index = bound_index (bounds, bound);
void **entry = pointer_map_contains (bb_bounds,
@@ -3259,7 +3263,7 @@ maybe_lower_iteration_bound (struct loop *loop)
for (elt = loop->bounds; elt; elt = elt->next)
{
if (!elt->is_exit
- && elt->bound.ltu_p (loop->nb_iterations_upper_bound))
+ && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
{
if (!not_executed_last_iteration)
not_executed_last_iteration = pointer_set_create ();
@@ -3475,7 +3479,7 @@ estimated_loop_iterations_int (struct loop *loop)
if (!estimated_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi_p ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
@@ -3495,7 +3499,7 @@ max_loop_iterations_int (struct loop *loop)
if (!max_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi_p ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
@@ -3556,7 +3560,7 @@ max_stmt_executions (struct loop *loop, max_wide_int *nit)
*nit += 1;
- return (*nit).gtu_p (nit_minus_one);
+ return wi::gtu_p (*nit, nit_minus_one);
}
/* Sets NIT to the estimated number of executions of the latch of the
@@ -3575,7 +3579,7 @@ estimated_stmt_executions (struct loop *loop, max_wide_int *nit)
*nit += 1;
- return (*nit).gtu_p (nit_minus_one);
+ return wi::gtu_p (*nit, nit_minus_one);
}
/* Records estimates on numbers of iterations of loops. */
@@ -3655,7 +3659,7 @@ n_of_executions_at_most (gimple stmt,
/* If the bound does not even fit into NIT_TYPE, it cannot tell us that
the number of iterations is small. */
- if (!bound.fits_to_tree_p (nit_type))
+ if (!wi::fits_to_tree_p (bound, nit_type))
return false;
/* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
@@ -3699,8 +3703,8 @@ n_of_executions_at_most (gimple stmt,
if (gimple_has_side_effects (gsi_stmt (bsi)))
return false;
bound += 1;
- if (bound.zero_p ()
- || !bound.fits_to_tree_p (nit_type))
+ if (bound == 0
+ || !wi::fits_to_tree_p (bound, nit_type))
return false;
}
cmp = GT_EXPR;
@@ -3811,7 +3815,7 @@ scev_probably_wraps_p (tree base, tree step,
estimate_numbers_of_iterations_loop (loop);
if (max_loop_iterations (loop, &niter)
- && niter.fits_to_tree_p (TREE_TYPE (valid_niter))
+ && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
&& (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
wide_int_to_tree (TREE_TYPE (valid_niter),
niter))) != NULL
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 8418648abb1..6b7dd3399d2 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -712,7 +712,7 @@ jump_function_from_stmt (tree *arg, gimple stmt)
&offset);
if (tem
&& TREE_CODE (tem) == MEM_REF
- && (mem_ref_offset (tem) + offset).zero_p ())
+ && (mem_ref_offset (tem) + offset) == 0)
{
*arg = TREE_OPERAND (tem, 0);
return true;
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 9e762e051c1..24eda8a4844 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -1605,7 +1605,7 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
addr_wide_int off = op[0];
off += -addr_wide_int (op[1]);
off *= addr_wide_int (op[2]);
- if (off.fits_shwi_p ())
+ if (wi::fits_shwi_p (off))
newop.off = off.to_shwi ();
}
newoperands[j] = newop;
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index 508042fd4ae..ef21995e9a5 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -803,10 +803,10 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
{
addr_wide_int off
= (addr_wide_int (this_offset)
- + addr_wide_int (bit_offset)
- .rshiftu (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT)));
- if (off.fits_shwi_p ())
+ + wi::lrshift (addr_wide_int (bit_offset),
+ BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT)));
+ if (wi::fits_shwi_p (off))
temp.off = off.to_shwi ();
}
}
@@ -826,7 +826,7 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
addr_wide_int off = temp.op0;
off += -addr_wide_int (temp.op1);
off *= addr_wide_int (temp.op2);
- if (off.fits_shwi_p ())
+ if (wi::fits_shwi_p (off))
temp.off = off.to_shwi();
}
break;
@@ -1147,8 +1147,8 @@ vn_reference_fold_indirect (vec<vn_reference_op_s> *ops,
gcc_checking_assert (addr_base && TREE_CODE (addr_base) != MEM_REF);
if (addr_base != TREE_OPERAND (op->op0, 0))
{
- addr_wide_int off = addr_wide_int (mem_op->op0)
- .sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
+ addr_wide_int off = wi::sext (addr_wide_int (mem_op->op0),
+ TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
off += addr_offset;
mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off);
op->op0 = build_fold_addr_expr (addr_base);
@@ -1181,8 +1181,8 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
&& code != POINTER_PLUS_EXPR)
return;
- off = addr_wide_int (mem_op->op0)
- .sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
+ off = wi::sext (addr_wide_int (mem_op->op0),
+ TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
/* The only thing we have to do is from &OBJ.foo.bar add the offset
from .foo.bar to the preceding MEM_REF offset and replace the
@@ -1373,7 +1373,7 @@ valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything)
addr_wide_int off = vro->op0;
off += -addr_wide_int (vro->op1);
off *= addr_wide_int (vro->op2);
- if (off.fits_shwi_p ())
+ if (wi::fits_shwi_p (off))
vro->off = off.to_shwi ();
}
}
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 72bc5049dc7..c65b5251e00 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -3011,9 +3011,9 @@ get_constraint_for_ptr_offset (tree ptr, tree offset,
else
{
/* Sign-extend the offset. */
- addr_wide_int soffset = addr_wide_int (offset)
- .sext (TYPE_PRECISION (TREE_TYPE (offset)));
- if (!soffset.fits_shwi_p ())
+ addr_wide_int soffset = wi::sext (addr_wide_int (offset),
+ TYPE_PRECISION (TREE_TYPE (offset)));
+ if (!wi::fits_shwi_p (soffset))
rhsoffset = UNKNOWN_OFFSET;
else
{
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index aa3bff6699f..b2e5cf34671 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -1828,9 +1828,9 @@ non_rewritable_mem_ref_base (tree ref)
|| TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE)
&& useless_type_conversion_p (TREE_TYPE (base),
TREE_TYPE (TREE_TYPE (decl)))
- && mem_ref_offset (base).fits_uhwi_p ()
- && addr_wide_int (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
- .gtu_p (mem_ref_offset (base))
+ && wi::fits_uhwi_p (mem_ref_offset (base))
+ && wi::gtu_p (TYPE_SIZE_UNIT (TREE_TYPE (decl)),
+ mem_ref_offset (base))
&& multiple_of_p (sizetype, TREE_OPERAND (base, 1),
TYPE_SIZE_UNIT (TREE_TYPE (base))))
return NULL_TREE;
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 49eb07fc0f2..a3a9ab0aded 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -974,14 +974,14 @@ array_value_type (gimple swtch, tree type, int num,
if (prec > HOST_BITS_PER_WIDE_INT)
return type;
- if (sign >= 0 && cst == cst.zext (prec))
+ if (sign >= 0 && cst == wi::zext (cst, prec))
{
- if (sign == 0 && cst == cst.sext (prec))
+ if (sign == 0 && cst == wi::sext (cst, prec))
break;
sign = 1;
break;
}
- if (sign <= 0 && cst == cst.sext (prec))
+ if (sign <= 0 && cst == wi::sext (cst, prec))
{
sign = -1;
break;
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 915b61eecc3..c51b3e2c316 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -5848,16 +5848,16 @@ vect_transform_loop (loop_vec_info loop_vinfo)
scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor),
expected_iterations / vectorization_factor);
loop->nb_iterations_upper_bound
- = loop->nb_iterations_upper_bound.udiv_floor (vectorization_factor);
+ = wi::udiv_floor (loop->nb_iterations_upper_bound, vectorization_factor);
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
- && !loop->nb_iterations_upper_bound.zero_p ())
+ && loop->nb_iterations_upper_bound != 0)
loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - 1;
if (loop->any_estimate)
{
loop->nb_iterations_estimate
- = loop->nb_iterations_estimate.udiv_floor (vectorization_factor);
+ = wi::udiv_floor (loop->nb_iterations_estimate, vectorization_factor);
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
- && !loop->nb_iterations_estimate.zero_p ())
+ && loop->nb_iterations_estimate != 0)
loop->nb_iterations_estimate = loop->nb_iterations_estimate - 1;
}
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 9d3f944da16..dfe423228ad 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -1615,12 +1615,14 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
conversion. We are willingly interpreting large positive
unsigned values as negative singed values here. */
min = force_fit_type (TREE_TYPE (var),
- wide_int (min).force_to_size (TYPE_PRECISION (TREE_TYPE (var)),
- TYPE_SIGN (TREE_TYPE (min))),
+ wide_int::from (min,
+ TYPE_PRECISION (TREE_TYPE (var)),
+ TYPE_SIGN (TREE_TYPE (min))),
0, false);
max = force_fit_type (TREE_TYPE (var),
- wide_int (max).force_to_size (TYPE_PRECISION (TREE_TYPE (var)),
- TYPE_SIGN (TREE_TYPE (max))),
+ wide_int::from (max,
+ TYPE_PRECISION (TREE_TYPE (var)),
+ TYPE_SIGN (TREE_TYPE (max))),
0, false);
/* We can transform a max, min range to an anti-range or
@@ -1984,8 +1986,8 @@ zero_nonzero_bits_from_vr (const tree expr_type,
wide_int *may_be_nonzero,
wide_int *must_be_nonzero)
{
- *may_be_nonzero = wide_int::minus_one (TYPE_PRECISION (expr_type));
- *must_be_nonzero = wide_int::zero (TYPE_PRECISION (expr_type));
+ *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
+ *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
if (!range_int_cst_p (vr)
|| TREE_OVERFLOW (vr->min)
|| TREE_OVERFLOW (vr->max))
@@ -2004,11 +2006,10 @@ zero_nonzero_bits_from_vr (const tree expr_type,
wide_int xor_mask = wmin ^ wmax;
*may_be_nonzero = wmin | wmax;
*must_be_nonzero = wmin & wmax;
- if (!xor_mask.zero_p ())
+ if (xor_mask != 0)
{
- wide_int mask = wide_int::mask (xor_mask.floor_log2 ().to_shwi (),
- false,
- (*may_be_nonzero).get_precision ());
+ wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
+ (*may_be_nonzero).get_precision ());
*may_be_nonzero = (*may_be_nonzero) | mask;
*must_be_nonzero = (*must_be_nonzero).and_not (mask);
}
@@ -2396,10 +2397,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
wide_int max0 = wide_int (vr0.max);
wide_int min1 = wide_int (vr1.min);
wide_int max1 = wide_int (vr1.max);
- wide_int type_min
- = wide_int::min_value (TYPE_PRECISION (expr_type), sgn);
- wide_int type_max
- = wide_int::max_value (TYPE_PRECISION (expr_type), sgn);
+ wide_int type_min = wi::min_value (TYPE_PRECISION (expr_type), sgn);
+ wide_int type_max = wi::max_value (TYPE_PRECISION (expr_type), sgn);
wide_int wmin, wmax;
int min_ovf = 0;
int max_ovf = 0;
@@ -2410,20 +2409,20 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
wmax = max0 + max1;
/* Check for overflow. */
- if (min1.cmp (0, sgn) != wmin.cmp (min0, sgn))
- min_ovf = min0.cmp (wmin, sgn);
- if (max1.cmp (0, sgn) != wmax.cmp (max0, sgn))
- max_ovf = max0.cmp (wmax, sgn);
+ if (wi::cmp (min1, 0, sgn) != wi::cmp (wmin, min0, sgn))
+ min_ovf = wi::cmp (min0, wmin, sgn);
+ if (wi::cmp (max1, 0, sgn) != wi::cmp (wmax, max0, sgn))
+ max_ovf = wi::cmp (max0, wmax, sgn);
}
else /* if (code == MINUS_EXPR) */
{
wmin = min0 - max1;
wmax = max0 - min1;
- if (wide_int (0).cmp (max1, sgn) != wmin.cmp (min0, sgn))
- min_ovf = min0.cmp (max1, sgn);
- if (wide_int (0).cmp (min1, sgn) != wmax.cmp (max0, sgn))
- max_ovf = max0.cmp (min1, sgn);
+ if (wi::cmp (0, max1, sgn) != wi::cmp (wmin, min0, sgn))
+ min_ovf = wi::cmp (min0, max1, sgn);
+ if (wi::cmp (0, min1, sgn) != wi::cmp (wmax, max0, sgn))
+ max_ovf = wi::cmp (max0, min1, sgn);
}
/* For non-wrapping arithmetic look at possibly smaller
@@ -2439,16 +2438,16 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* Check for type overflow. */
if (min_ovf == 0)
{
- if (wmin.cmp (type_min, sgn) == -1)
+ if (wi::cmp (wmin, type_min, sgn) == -1)
min_ovf = -1;
- else if (wmin.cmp (type_max, sgn) == 1)
+ else if (wi::cmp (wmin, type_max, sgn) == 1)
min_ovf = 1;
}
if (max_ovf == 0)
{
- if (wmax.cmp (type_min, sgn) == -1)
+ if (wi::cmp (wmax, type_min, sgn) == -1)
max_ovf = -1;
- else if (wmax.cmp (type_max, sgn) == 1)
+ else if (wi::cmp (wmax, type_max, sgn) == 1)
max_ovf = 1;
}
@@ -2456,8 +2455,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
{
/* If overflow wraps, truncate the values and adjust the
range kind and bounds appropriately. */
- wide_int tmin = wmin.force_to_size (prec, sgn);
- wide_int tmax = wmax.force_to_size (prec, sgn);
+ wide_int tmin = wide_int::from (wmin, prec, sgn);
+ wide_int tmax = wide_int::from (wmax, prec, sgn);
if (min_ovf == max_ovf)
{
/* No overflow or both overflow or underflow. The
@@ -2482,15 +2481,15 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
|| (max_ovf == 1 && min_ovf == 0));
type = VR_ANTI_RANGE;
tmin = tmax + 1;
- if (tmin.cmp (tmax, sgn) < 0)
+ if (wi::cmp (tmin, tmax, sgn) < 0)
covers = true;
tmax = tem - 1;
- if (tmax.cmp (tem, sgn) > 0)
+ if (wi::cmp (tmax, tem, sgn) > 0)
covers = true;
/* If the anti-range would cover nothing, drop to varying.
Likewise if the anti-range bounds are outside of the
types values. */
- if (covers || tmin.cmp (tmax, sgn) > 0)
+ if (covers || wi::cmp (tmin, tmax, sgn) > 0)
{
set_value_range_to_varying (vr);
return;
@@ -2638,27 +2637,27 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
{
wide_int min0, max0, min1, max1;
wide_int prod0, prod1, prod2, prod3;
- wide_int sizem1 = wide_int::max_value (prec, UNSIGNED, prec2);
+ wide_int sizem1 = wi::mask (prec, false, prec2);
wide_int size = sizem1 + 1;
/* Extend the values using the sign of the result to PREC2.
From here on out, everthing is just signed math no matter
what the input types were. */
- min0 = wide_int (vr0.min).force_to_size (prec2, sign);
- max0 = wide_int (vr0.max).force_to_size (prec2, sign);
- min1 = wide_int (vr1.min).force_to_size (prec2, sign);
- max1 = wide_int (vr1.max).force_to_size (prec2, sign);
+ min0 = wide_int::from (vr0.min, prec2, sign);
+ max0 = wide_int::from (vr0.max, prec2, sign);
+ min1 = wide_int::from (vr1.min, prec2, sign);
+ max1 = wide_int::from (vr1.max, prec2, sign);
/* Canonicalize the intervals. */
if (sign == UNSIGNED)
{
- if (size.ltu_p (min0 + max0))
+ if (wi::ltu_p (size, min0 + max0))
{
min0 -= size;
max0 -= size;
}
- if (size.ltu_p (min1 + max1))
+ if (wi::ltu_p (size, min1 + max1))
{
min1 -= size;
max1 -= size;
@@ -2673,7 +2672,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* Sort the 4 products so that min is in prod0 and max is in
prod3. */
/* min0min1 > max0max1 */
- if (prod0.gts_p (prod3))
+ if (wi::gts_p (prod0, prod3))
{
wide_int tmp = prod3;
prod3 = prod0;
@@ -2681,21 +2680,21 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
}
/* min0max1 > max0min1 */
- if (prod1.gts_p (prod2))
+ if (wi::gts_p (prod1, prod2))
{
wide_int tmp = prod2;
prod2 = prod1;
prod1 = tmp;
}
- if (prod0.gts_p (prod1))
+ if (wi::gts_p (prod0, prod1))
{
wide_int tmp = prod1;
prod1 = prod0;
prod0 = tmp;
}
- if (prod2.gts_p (prod3))
+ if (wi::gts_p (prod2, prod3))
{
wide_int tmp = prod3;
prod3 = prod2;
@@ -2704,7 +2703,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* diff = max - min. */
prod2 = prod3 - prod0;
- if (prod2.geu_p (sizem1))
+ if (wi::geu_p (prod2, sizem1))
{
/* the range covers all values. */
set_value_range_to_varying (vr);
@@ -2761,10 +2760,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
bool saved_flag_wrapv;
value_range_t vr1p = VR_INITIALIZER;
vr1p.type = VR_RANGE;
- vr1p.min
- = wide_int_to_tree (expr_type,
- wide_int::set_bit_in_zero (tree_to_shwi (vr1.min),
- TYPE_PRECISION (expr_type)));
+ vr1p.min = (wide_int_to_tree
+ (expr_type,
+ wi::set_bit_in_zero (tree_to_shwi (vr1.min),
+ TYPE_PRECISION (expr_type))));
vr1p.max = vr1p.min;
/* We have to use a wrapping multiply though as signed overflow
on lshifts is implementation defined in C89. */
@@ -2794,21 +2793,21 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
zero, which means vr1 is a singleton range of zero, which
means it should be handled by the previous LSHIFT_EXPR
if-clause. */
- bound = wide_int::set_bit_in_zero (bound_shift, prec);
+ bound = wi::set_bit_in_zero (bound_shift, prec);
complement = ~(bound - 1);
if (uns)
{
low_bound = bound;
high_bound = complement;
- if (wide_int::ltu_p (vr0.max, low_bound))
+ if (wi::ltu_p (vr0.max, low_bound))
{
/* [5, 6] << [1, 2] == [10, 24]. */
/* We're shifting out only zeroes, the value increases
monotonically. */
in_bounds = true;
}
- else if (high_bound.ltu_p (vr0.min))
+ else if (wi::ltu_p (high_bound, vr0.min))
{
/* [0xffffff00, 0xffffffff] << [1, 2]
== [0xfffffc00, 0xfffffffe]. */
@@ -2822,8 +2821,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* [-1, 1] << [1, 2] == [-4, 4]. */
low_bound = complement;
high_bound = bound;
- if (wide_int::lts_p (vr0.max, high_bound)
- && low_bound.lts_p (wide_int (vr0.min)))
+ if (wi::lts_p (vr0.max, high_bound)
+ && wi::lts_p (low_bound, vr0.min))
{
/* For non-negative numbers, we're shifting out only
zeroes, the value increases monotonically.
@@ -2981,16 +2980,16 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& tree_int_cst_sgn (vr0.max) < 0
&& tree_int_cst_sgn (vr1.max) < 0)
{
- wmax = wmax.min (vr0.max, TYPE_SIGN (expr_type));
- wmax = wmax.min (vr1.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
}
/* If either input range contains only non-negative values
we can truncate the result range maximum to the respective
maximum of the input range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
- wmax = wmax.min (vr0.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
- wmax = wmax.min (vr1.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
max = wide_int_to_tree (expr_type, wmax);
}
else if (code == BIT_IOR_EXPR)
@@ -3006,16 +3005,16 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& tree_int_cst_sgn (vr0.min) >= 0
&& tree_int_cst_sgn (vr1.min) >= 0)
{
- wmin = wmin.max (vr0.min, TYPE_SIGN (expr_type));
- wmin = wmin.max (vr1.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
}
/* If either input range contains only negative values
we can truncate the minimum of the result range to the
respective minimum range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
- wmin = wmin.max (vr0.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
- wmin = wmin.max (vr1.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
min = wide_int_to_tree (expr_type, wmin);
}
else if (code == BIT_XOR_EXPR)
@@ -3242,15 +3241,19 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
new_min = negative_overflow_infinity (outer_type);
else
new_min = force_fit_type (outer_type,
- wide_int (vr0.min).force_to_size (TYPE_PRECISION (outer_type),
- TYPE_SIGN (TREE_TYPE (vr0.min))),
+ wide_int::from
+ (vr0.min,
+ TYPE_PRECISION (outer_type),
+ TYPE_SIGN (TREE_TYPE (vr0.min))),
0, false);
if (is_overflow_infinity (vr0.max))
new_max = positive_overflow_infinity (outer_type);
else
new_max = force_fit_type (outer_type,
- wide_int (vr0.max).force_to_size (TYPE_PRECISION (outer_type),
- TYPE_SIGN (TREE_TYPE (vr0.max))),
+ wide_int::from
+ (vr0.max,
+ TYPE_PRECISION (outer_type),
+ TYPE_SIGN (TREE_TYPE (vr0.max))),
0, false);
set_and_canonicalize_value_range (vr, vr0.type,
new_min, new_max, NULL);
@@ -3835,16 +3838,16 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop,
signop sgn = TYPE_SIGN (TREE_TYPE (step));
bool overflow;
- wtmp = max_wide_int (step).mul (nit, sgn, &overflow);
+ wtmp = wi::mul (step, nit, sgn, &overflow);
/* If the multiplication overflowed we can't do a meaningful
adjustment. Likewise if the result doesn't fit in the type
of the induction variable. For a signed type we have to
check whether the result has the expected signedness which
is that of the step as number of iterations is unsigned. */
if (!overflow
- && wtmp.fits_to_tree_p (TREE_TYPE (init))
+ && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
&& (sgn == UNSIGNED
- || (wtmp.gts_p (0) == wide_int::gts_p (step, 0))))
+ || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
{
tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
@@ -4724,19 +4727,19 @@ static wide_int
masked_increment (wide_int val, wide_int mask, wide_int sgnbit,
unsigned int prec)
{
- wide_int bit = wide_int::one (prec), res;
+ wide_int bit = wi::one (prec), res;
unsigned int i;
val ^= sgnbit;
for (i = 0; i < prec; i++, bit += bit)
{
res = mask;
- if ((res & bit).zero_p ())
+ if ((res & bit) == 0)
continue;
res = bit - 1;
res = (val + bit).and_not (res);
res &= mask;
- if (res.gtu_p (val))
+ if (wi::gtu_p (res, val))
return res ^ sgnbit;
}
return val ^ sgnbit;
@@ -4911,8 +4914,8 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
gimple def_stmt = SSA_NAME_DEF_STMT (name);
tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
tree val2 = NULL_TREE;
- wide_int mask = 0;
unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
+ wide_int mask = wi::zero (prec);
unsigned int nprec = prec;
enum tree_code rhs_code = ERROR_MARK;
@@ -4985,7 +4988,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
&& live_on_edge (e, name2)
&& !has_single_use (name2))
{
- mask = wide_int::mask (tree_to_uhwi (cst2), false, prec);
+ mask = wi::mask (tree_to_uhwi (cst2), false, prec);
val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
}
}
@@ -5014,7 +5017,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
{
wide_int minval
- = wide_int::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
+ = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
new_val = val2;
if (minval == wide_int (new_val))
new_val = NULL_TREE;
@@ -5022,7 +5025,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
else
{
wide_int maxval
- = wide_int::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
+ = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
mask |= wide_int (val2);
if (mask == maxval)
new_val = NULL_TREE;
@@ -5101,17 +5104,17 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
}
if (names[0] || names[1])
{
- wide_int minv, maxv = 0, valv, cst2v;
+ wide_int minv, maxv, valv, cst2v;
wide_int tem, sgnbit;
bool valid_p = false, valn = false, cst2n = false;
enum tree_code ccode = comp_code;
- valv = wide_int (val).zforce_to_size (nprec);
- cst2v = wide_int (cst2).zforce_to_size (nprec);
+ valv = wide_int::from (val, nprec, UNSIGNED);
+ cst2v = wide_int::from (cst2, nprec, UNSIGNED);
if (TYPE_SIGN (TREE_TYPE (val)) == SIGNED)
{
- valn = valv.sext (nprec).neg_p ();
- cst2n = cst2v.sext (nprec).neg_p ();
+ valn = wi::neg_p (wi::sext (valv, nprec));
+ cst2n = wi::neg_p (wi::sext (cst2v, nprec));
}
/* If CST2 doesn't have most significant bit set,
but VAL is negative, we have comparison like
@@ -5119,9 +5122,9 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
if (!cst2n && valn)
ccode = ERROR_MARK;
if (cst2n)
- sgnbit = wide_int::set_bit_in_zero (nprec - 1, nprec);
+ sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
else
- sgnbit = wide_int::zero (nprec);
+ sgnbit = wi::zero (nprec);
minv = valv & cst2v;
switch (ccode)
{
@@ -5131,18 +5134,18 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
have folded the comparison into false) and
maximum unsigned value is VAL | ~CST2. */
maxv = valv | ~cst2v;
- maxv = maxv.zext (nprec);
+ maxv = wi::zext (maxv, nprec);
valid_p = true;
break;
case NE_EXPR:
tem = valv | ~cst2v;
- tem = tem.zext (nprec);
+ tem = wi::zext (tem, nprec);
/* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
- if (valv.zero_p ())
+ if (valv == 0)
{
cst2n = false;
- sgnbit = wide_int::zero (nprec);
+ sgnbit = wi::zero (nprec);
goto gt_expr;
}
/* If (VAL | ~CST2) is all ones, handle it as
@@ -5151,12 +5154,12 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
{
cst2n = false;
valn = false;
- sgnbit = wide_int::zero (nprec);
+ sgnbit = wi::zero (nprec);
goto lt_expr;
}
- if (!cst2n && cst2v.sext (nprec).neg_p ())
- sgnbit = wide_int::set_bit_in_zero (nprec - 1, nprec);
- if (!sgnbit.zero_p ())
+ if (!cst2n && wi::neg_p (wi::sext (cst2v, nprec)))
+ sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
+ if (sgnbit != 0)
{
if (valv == sgnbit)
{
@@ -5164,13 +5167,13 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
valn = true;
goto gt_expr;
}
- if (tem == wide_int::mask (nprec - 1, false, nprec))
+ if (tem == wi::mask (nprec - 1, false, nprec))
{
cst2n = true;
goto lt_expr;
}
if (!cst2n)
- sgnbit = 0;
+ sgnbit = wi::zero (nprec);
}
break;
@@ -5188,7 +5191,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
if (minv == valv)
break;
}
- maxv = wide_int::mask (nprec - (cst2n ? 1 : 0), false, nprec);
+ maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
valid_p = true;
break;
@@ -5200,7 +5203,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
minv = masked_increment (valv, cst2v, sgnbit, nprec);
if (minv == valv)
break;
- maxv = wide_int::mask (nprec - (cst2n ? 1 : 0), false, nprec);
+ maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
valid_p = true;
break;
@@ -5223,7 +5226,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
maxv -= 1;
}
maxv |= ~cst2v;
- maxv = maxv.zext (nprec);
+ maxv = wi::zext (maxv, nprec);
minv = sgnbit;
valid_p = true;
break;
@@ -5252,7 +5255,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
}
maxv -= 1;
maxv |= ~cst2v;
- maxv = maxv.zext (nprec);
+ maxv = wi::zext (maxv, nprec);
minv = sgnbit;
valid_p = true;
break;
@@ -5261,7 +5264,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
break;
}
if (valid_p
- && (maxv - minv).zext (nprec) != wide_int::minus_one (nprec))
+ && wi::zext (maxv - minv, nprec) != wi::minus_one (nprec))
{
tree tmp, new_val, type;
int i;
@@ -5277,7 +5280,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
type = build_nonstandard_integer_type (nprec, 1);
tmp = build1 (NOP_EXPR, type, names[i]);
}
- if (!minv.zero_p ())
+ if (minv != 0)
{
tmp = build2 (PLUS_EXPR, type, tmp,
wide_int_to_tree (type, -minv));
@@ -6238,8 +6241,8 @@ search_for_addr_array (tree t, location_t location)
return;
idx = mem_ref_offset (t);
- idx = idx.sdiv_trunc (addr_wide_int (el_sz));
- if (idx.lts_p (0))
+ idx = wi::sdiv_trunc (idx, el_sz);
+ if (wi::lts_p (idx, 0))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -6251,9 +6254,7 @@ search_for_addr_array (tree t, location_t location)
"array subscript is below array bounds");
TREE_NO_WARNING (t) = 1;
}
- else if (idx.gts_p (addr_wide_int (up_bound)
- - low_bound
- + 1))
+ else if (wi::gts_p (idx, addr_wide_int (up_bound) - low_bound + 1))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -8546,13 +8547,13 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
{
case BIT_AND_EXPR:
mask = may_be_nonzero0.and_not (must_be_nonzero1);
- if (mask.zero_p ())
+ if (mask == 0)
{
op = op0;
break;
}
mask = may_be_nonzero1.and_not (must_be_nonzero0);
- if (mask.zero_p ())
+ if (mask == 0)
{
op = op1;
break;
@@ -8560,13 +8561,13 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
break;
case BIT_IOR_EXPR:
mask = may_be_nonzero0.and_not (must_be_nonzero1);
- if (mask.zero_p ())
+ if (mask == 0)
{
op = op1;
break;
}
mask = may_be_nonzero1.and_not (must_be_nonzero0);
- if (mask.zero_p ())
+ if (mask == 0)
{
op = op0;
break;
@@ -8685,16 +8686,16 @@ range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn)
a signed wide_int, while a negative value cannot be represented
by an unsigned wide_int. */
if (src_sgn != dest_sgn
- && (max_wide_int (vr->min).lts_p (0) || max_wide_int (vr->max).lts_p (0)))
+ && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
return false;
/* Then we can perform the conversion on both ends and compare
the result for equality. */
- tem = max_wide_int (vr->min).ext (dest_precision, dest_sgn);
- if (max_wide_int (vr->min) != tem)
+ tem = wi::ext (vr->min, dest_precision, dest_sgn);
+ if (tem != vr->min)
return false;
- tem = max_wide_int (vr->max).ext (dest_precision, dest_sgn);
- if (max_wide_int (vr->max) != tem)
+ tem = wi::ext (vr->max, dest_precision, dest_sgn);
+ if (tem != vr->max)
return false;
return true;
@@ -8989,34 +8990,35 @@ simplify_conversion_using_ranges (gimple stmt)
/* If the first conversion is not injective, the second must not
be widening. */
- if ((innermax - innermin).gtu_p (max_wide_int::mask (middle_prec, false))
+ if (wi::gtu_p (innermax - innermin,
+ wi::mask <max_wide_int> (middle_prec, false))
&& middle_prec < final_prec)
return false;
/* We also want a medium value so that we can track the effect that
narrowing conversions with sign change have. */
inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
if (inner_sgn == UNSIGNED)
- innermed = max_wide_int::shifted_mask (1, inner_prec - 1, false);
+ innermed = wi::shifted_mask <max_wide_int> (1, inner_prec - 1, false);
else
innermed = 0;
- if (innermin.cmp (innermed, inner_sgn) >= 0
- || innermed.cmp (innermax, inner_sgn) >= 0)
+ if (wi::cmp (innermin, innermed, inner_sgn) >= 0
+ || wi::cmp (innermed, innermax, inner_sgn) >= 0)
innermed = innermin;
middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
- middlemin = innermin.ext (middle_prec, middle_sgn);
- middlemed = innermed.ext (middle_prec, middle_sgn);
- middlemax = innermax.ext (middle_prec, middle_sgn);
+ middlemin = wi::ext (innermin, middle_prec, middle_sgn);
+ middlemed = wi::ext (innermed, middle_prec, middle_sgn);
+ middlemax = wi::ext (innermax, middle_prec, middle_sgn);
/* Require that the final conversion applied to both the original
and the intermediate range produces the same result. */
final_sgn = TYPE_SIGN (finaltype);
- if (middlemin.ext (final_prec, final_sgn)
- != innermin.ext (final_prec, final_sgn)
- || middlemed.ext (final_prec, final_sgn)
- != innermed.ext (final_prec, final_sgn)
- || middlemax.ext (final_prec, final_sgn)
- != innermax.ext (final_prec, final_sgn))
+ if (wi::ext (middlemin, final_prec, final_sgn)
+ != wi::ext (innermin, final_prec, final_sgn)
+ || wi::ext (middlemed, final_prec, final_sgn)
+ != wi::ext (innermed, final_prec, final_sgn)
+ || wi::ext (middlemax, final_prec, final_sgn)
+ != wi::ext (innermax, final_prec, final_sgn))
return false;
gimple_assign_set_rhs1 (stmt, innerop);
diff --git a/gcc/tree.c b/gcc/tree.c
index 7807e041d46..787cdafbdc0 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -1056,13 +1056,13 @@ build_int_cst (tree type, HOST_WIDE_INT low)
if (!type)
type = integer_type_node;
- return wide_int_to_tree (type, low);
+ return wide_int_to_tree (type, wi::hwi (low, type));
}
-/* static inline */ tree
+tree
build_int_cstu (tree type, unsigned HOST_WIDE_INT cst)
{
- return wide_int_to_tree (type, cst);
+ return wide_int_to_tree (type, wi::hwi (cst, type));
}
/* Create an INT_CST node with a LOW value sign extended to TYPE. */
@@ -1071,8 +1071,7 @@ tree
build_int_cst_type (tree type, HOST_WIDE_INT low)
{
gcc_assert (type);
-
- return wide_int_to_tree (type, low);
+ return wide_int_to_tree (type, wi::hwi (low, type));
}
/* Constructs tree in type TYPE from with value given by CST. Signedness
@@ -1081,12 +1080,7 @@ build_int_cst_type (tree type, HOST_WIDE_INT low)
tree
double_int_to_tree (tree type, double_int cst)
{
- bool sign_extended_type = !TYPE_UNSIGNED (type);
-
- cst = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
-
- return wide_int_to_tree (type, wide_int::from_array ((HOST_WIDE_INT*)&cst.low,
- 2, TYPE_PRECISION (type)));
+ return wide_int_to_tree (type, max_wide_int::from (cst, TYPE_SIGN (type)));
}
/* We force the wide_int CST to the range of the type TYPE by sign or
@@ -1105,20 +1099,20 @@ double_int_to_tree (tree type, double_int cst)
tree
-force_fit_type (tree type, const wide_int &cst,
+force_fit_type (tree type, const wide_int_ref &cst,
int overflowable, bool overflowed)
{
signop sign = TYPE_SIGN (type);
/* If we need to set overflow flags, return a new unshared node. */
- if (overflowed || !cst.fits_to_tree_p (type))
+ if (overflowed || !wi::fits_to_tree_p (cst, type))
{
if (overflowed
|| overflowable < 0
|| (overflowable > 0 && sign == SIGNED))
{
- wide_int tmp = cst.force_to_size (TYPE_PRECISION (type),
- sign);
+ wide_int tmp = wide_int::from (cst, TYPE_PRECISION (type), sign);
+ wi::clear_undef (tmp, sign);
int l = tmp.get_len ();
tree t = make_int_cst (l);
if (l > 1)
@@ -1189,7 +1183,7 @@ int_cst_hash_eq (const void *x, const void *y)
upon the underlying HOST_WIDE_INTs works without masking. */
tree
-wide_int_to_tree (tree type, const wide_int_ro &pcst)
+wide_int_to_tree (tree type, const wide_int_ref &pcst)
{
tree t;
int ix = -1;
@@ -1210,23 +1204,23 @@ wide_int_to_tree (tree type, const wide_int_ro &pcst)
gcc_assert (pcst.elt (l - 2) >= 0);
}
- wide_int cst = pcst.force_to_size (prec, sgn);
+ wide_int cst = wide_int::from (pcst, prec, sgn);
/* The following call makes sure that all tree-cst's are canonical.
i.e. it really does sign or zero extend the top block of the
value if the precision of the type is not an even multiple of the
size of an HWI. */
- cst.clear_undef (sgn);
+ wi::clear_undef (cst, sgn);
switch (TREE_CODE (type))
{
case NULLPTR_TYPE:
- gcc_assert (cst.zero_p ());
+ gcc_assert (cst == 0);
/* Fallthru. */
case POINTER_TYPE:
case REFERENCE_TYPE:
/* Cache NULL pointer. */
- if (cst.zero_p ())
+ if (cst == 0)
{
limit = 1;
ix = 0;
@@ -1236,7 +1230,7 @@ wide_int_to_tree (tree type, const wide_int_ro &pcst)
case BOOLEAN_TYPE:
/* Cache false or true. */
limit = 2;
- if (cst.leu_p (1))
+ if (wi::leu_p (cst, 1))
ix = cst.to_uhwi ();
break;
@@ -1255,7 +1249,7 @@ wide_int_to_tree (tree type, const wide_int_ro &pcst)
if (cst.to_uhwi () < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
ix = cst.to_uhwi ();
}
- else if (cst.ltu_p (INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (cst, INTEGER_SHARE_LIMIT))
ix = cst.to_uhwi ();
}
else
@@ -1263,16 +1257,16 @@ wide_int_to_tree (tree type, const wide_int_ro &pcst)
/* Cache -1..N */
limit = INTEGER_SHARE_LIMIT + 1;
- if (cst.minus_one_p ())
+ if (cst == -1)
ix = 0;
- else if (!cst.neg_p ())
+ else if (!wi::neg_p (cst))
{
if (prec < HOST_BITS_PER_WIDE_INT)
{
if (cst.to_shwi () < INTEGER_SHARE_LIMIT)
ix = cst.to_shwi () + 1;
}
- else if (cst.lts_p (INTEGER_SHARE_LIMIT))
+ else if (wi::lts_p (cst, INTEGER_SHARE_LIMIT))
ix = cst.to_shwi () + 1;
}
}
@@ -1299,7 +1293,7 @@ wide_int_to_tree (tree type, const wide_int_ro &pcst)
{
/* Make sure no one is clobbering the shared constant. */
gcc_assert (TREE_TYPE (t) == type);
- gcc_assert (TREE_INT_CST_NUNITS (t) == cst.get_len ());
+ gcc_assert (TREE_INT_CST_NUNITS (t) == int (cst.get_len ()));
for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
gcc_assert (TREE_INT_CST_ELT (t, i) == cst.elt (i));
}
@@ -1343,7 +1337,7 @@ wide_int_to_tree (tree type, const wide_int_ro &pcst)
to worry about. */
void **slot;
tree nt = make_int_cst (cst.get_len ());
- for (i = 0; i < cst.get_len (); i++)
+ for (unsigned int i = 0; i < cst.get_len (); i++)
TREE_INT_CST_ELT (nt, i) = cst.elt (i);
TREE_TYPE (nt) = type;
@@ -1389,7 +1383,7 @@ cache_integer_cst (tree t)
case BOOLEAN_TYPE:
/* Cache false or true. */
limit = 2;
- if (wide_int::ltu_p (t, 2))
+ if (wi::ltu_p (t, 2))
ix = TREE_INT_CST_ELT (t, 0);
break;
@@ -1408,7 +1402,7 @@ cache_integer_cst (tree t)
if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
ix = tree_to_uhwi (t);
}
- else if (wide_int::ltu_p (t, INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
ix = tree_to_uhwi (t);
}
else
@@ -1418,14 +1412,14 @@ cache_integer_cst (tree t)
if (integer_minus_onep (t))
ix = 0;
- else if (!wide_int (t).neg_p ())
+ else if (!wi::neg_p (t))
{
if (prec < HOST_BITS_PER_WIDE_INT)
{
if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
ix = tree_to_shwi (t) + 1;
}
- else if (wide_int::ltu_p (t, INTEGER_SHARE_LIMIT))
+ else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
ix = tree_to_shwi (t) + 1;
}
}
@@ -1459,7 +1453,7 @@ cache_integer_cst (tree t)
/* If there is already an entry for the number verify it's the
same. */
if (*slot)
- gcc_assert (wide_int::eq_p (((tree)*slot), t));
+ gcc_assert (wi::eq_p (tree (*slot), t));
else
/* Otherwise insert this one into the hash table. */
*slot = t;
@@ -1475,8 +1469,8 @@ build_low_bits_mask (tree type, unsigned bits)
{
gcc_assert (bits <= TYPE_PRECISION (type));
- return wide_int_to_tree (type, wide_int::mask (bits, false,
- TYPE_PRECISION (type)));
+ return wide_int_to_tree (type, wi::mask (bits, false,
+ TYPE_PRECISION (type)));
}
/* Build a newly constructed TREE_VEC node of length LEN. */
@@ -2006,7 +2000,7 @@ integer_zerop (const_tree expr)
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return wide_int (expr).zero_p ();
+ return wi::eq_p (expr, 0);
case COMPLEX_CST:
return (integer_zerop (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
@@ -2034,7 +2028,7 @@ integer_onep (const_tree expr)
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return wide_int (expr).one_p ();
+ return wi::eq_p (expr, 1);
case COMPLEX_CST:
return (integer_onep (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
@@ -2076,7 +2070,7 @@ integer_all_onesp (const_tree expr)
else if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- return wide_int::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr;
+ return wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr;
}
/* Return 1 if EXPR is the integer constant minus one. */
@@ -2109,7 +2103,7 @@ integer_pow2p (const_tree expr)
if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- return wide_int (expr).popcount () == 1;
+ return wi::popcount (expr) == 1;
}
/* Return 1 if EXPR is an integer constant other than zero or a
@@ -2121,7 +2115,7 @@ integer_nonzerop (const_tree expr)
STRIP_NOPS (expr);
return ((TREE_CODE (expr) == INTEGER_CST
- && (!wide_int (expr).zero_p ()))
+ && !wi::eq_p (expr, 0))
|| (TREE_CODE (expr) == COMPLEX_CST
&& (integer_nonzerop (TREE_REALPART (expr))
|| integer_nonzerop (TREE_IMAGPART (expr)))));
@@ -2147,7 +2141,7 @@ tree_log2 (const_tree expr)
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- return wide_int (expr).exact_log2 ().to_shwi ();
+ return wi::exact_log2 (expr);
}
/* Similar, but return the largest integer Y such that 2 ** Y is less
@@ -2161,7 +2155,7 @@ tree_floor_log2 (const_tree expr)
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- return wide_int (expr).floor_log2 ().to_shwi ();
+ return wi::floor_log2 (expr);
}
/* Return 1 if EXPR is the real constant zero. Trailing zeroes matter for
@@ -4249,7 +4243,7 @@ addr_wide_int
mem_ref_offset (const_tree t)
{
tree toff = TREE_OPERAND (t, 1);
- return addr_wide_int (toff).sext (TYPE_PRECISION (TREE_TYPE (toff)));
+ return wi::sext (addr_wide_int (toff), TYPE_PRECISION (TREE_TYPE (toff)));
}
/* Return the pointer-type relevant for TBAA purposes from the
@@ -6765,11 +6759,11 @@ tree_int_cst_equal (const_tree t1, const_tree t2)
prec2 = TYPE_PRECISION (TREE_TYPE (t2));
if (prec1 == prec2)
- return wide_int::eq_p (t1, t2);
+ return wi::eq_p (t1, t2);
else if (prec1 < prec2)
- return (wide_int (t1)).force_to_size (prec2, TYPE_SIGN (TREE_TYPE (t1))) == t2;
+ return wide_int::from (t1, prec2, TYPE_SIGN (TREE_TYPE (t1))) == t2;
else
- return (wide_int (t2)).force_to_size (prec1, TYPE_SIGN (TREE_TYPE (t2))) == t1;
+ return wide_int::from (t2, prec1, TYPE_SIGN (TREE_TYPE (t2))) == t1;
return 0;
}
@@ -6835,7 +6829,7 @@ tree_int_cst_sign_bit (const_tree t)
{
unsigned bitno = TYPE_PRECISION (TREE_TYPE (t)) - 1;
- return wide_int (t).extract_to_hwi (bitno, 1);
+ return wi::extract_uhwi (t, bitno, 1);
}
/* Return an indication of the sign of the integer constant T.
@@ -6846,11 +6840,11 @@ int
tree_int_cst_sgn (const_tree t)
{
wide_int w = t;
- if (w.zero_p ())
+ if (w == 0)
return 0;
else if (TYPE_UNSIGNED (TREE_TYPE (t)))
return 1;
- else if (w.neg_p ())
+ else if (wi::neg_p (w))
return -1;
else
return 1;
@@ -6939,7 +6933,7 @@ simple_cst_equal (const_tree t1, const_tree t2)
switch (code1)
{
case INTEGER_CST:
- return max_wide_int (t1) == max_wide_int (t2);
+ return wi::eq_p (t1, t2);
case REAL_CST:
return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2));
@@ -8565,15 +8559,15 @@ retry:
wd = type_low_bound;
if (sgn_c != TYPE_SIGN (TREE_TYPE (type_low_bound)))
{
- int c_neg = (sgn_c == SIGNED && wc.neg_p ());
- int t_neg = (sgn_c == UNSIGNED && wd.neg_p ());
+ int c_neg = (sgn_c == SIGNED && wi::neg_p (wc));
+ int t_neg = (sgn_c == UNSIGNED && wi::neg_p (wd));
if (c_neg && !t_neg)
return false;
- if ((c_neg || !t_neg) && wc.ltu_p (wd))
+ if ((c_neg || !t_neg) && wi::ltu_p (wc, wd))
return false;
}
- else if (wc.lt_p (wd, sgn_c))
+ else if (wi::lt_p (wc, wd, sgn_c))
return false;
ok_for_low_bound = true;
}
@@ -8586,15 +8580,15 @@ retry:
wd = type_high_bound;
if (sgn_c != TYPE_SIGN (TREE_TYPE (type_high_bound)))
{
- int c_neg = (sgn_c == SIGNED && wc.neg_p ());
- int t_neg = (sgn_c == UNSIGNED && wd.neg_p ());
+ int c_neg = (sgn_c == SIGNED && wi::neg_p (wc));
+ int t_neg = (sgn_c == UNSIGNED && wi::neg_p (wd));
if (t_neg && !c_neg)
return false;
- if ((t_neg || !c_neg) && wc.gtu_p (wd))
+ if ((t_neg || !c_neg) && wi::gtu_p (wc, wd))
return false;
}
- else if (wc.gt_p (wd, sgn_c))
+ else if (wi::gt_p (wc, wd, sgn_c))
return false;
ok_for_high_bound = true;
}
@@ -8608,7 +8602,7 @@ retry:
/* Perform some generic filtering which may allow making a decision
even if the bounds are not constant. First, negative integers
never fit in unsigned types, */
- if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wc.neg_p ())
+ if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (wc))
return false;
/* Second, narrower types always fit in wider ones. */
@@ -8616,7 +8610,7 @@ retry:
return true;
/* Third, unsigned integers with top bit set never fit signed types. */
- if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED && wc.neg_p ())
+ if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED && wi::neg_p (wc))
return false;
/* If we haven't been able to decide at this point, there nothing more we
@@ -8631,7 +8625,7 @@ retry:
}
/* Or to fits_to_tree_p, if nothing else. */
- return wc.fits_to_tree_p (type);
+ return wi::fits_to_tree_p (wc, type);
}
/* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant
@@ -8644,25 +8638,25 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max)
{
if (!POINTER_TYPE_P (type) && TYPE_MIN_VALUE (type)
&& TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST)
- wide_int (TYPE_MIN_VALUE (type)).to_mpz (min, TYPE_SIGN (type));
+ wi::to_mpz (TYPE_MIN_VALUE (type), min, TYPE_SIGN (type));
else
{
if (TYPE_UNSIGNED (type))
mpz_set_ui (min, 0);
else
{
- wide_int mn = wide_int::min_value (TYPE_PRECISION (type), SIGNED);
- mn.to_mpz (min, SIGNED);
+ wide_int mn = wi::min_value (TYPE_PRECISION (type), SIGNED);
+ wi::to_mpz (mn, min, SIGNED);
}
}
if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type)
&& TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
- wide_int (TYPE_MAX_VALUE (type)).to_mpz (max, TYPE_SIGN (type));
+ wi::to_mpz (TYPE_MAX_VALUE (type), max, TYPE_SIGN (type));
else
{
- wide_int mn = wide_int::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
- mn.to_mpz (max, TYPE_SIGN (type));
+ wide_int mn = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+ wi::to_mpz (mn, max, TYPE_SIGN (type));
}
}
@@ -10681,7 +10675,7 @@ upper_bound_in_type (tree outer, tree inner)
}
return wide_int_to_tree (outer,
- wide_int::mask (prec, false, TYPE_PRECISION (outer)));
+ wi::mask (prec, false, TYPE_PRECISION (outer)));
}
/* Returns the smallest value obtainable by casting something in INNER type to
@@ -10700,7 +10694,7 @@ lower_bound_in_type (tree outer, tree inner)
contains all values of INNER type. In particular, both INNER
and OUTER types have zero in common. */
|| (oprec > iprec && TYPE_UNSIGNED (inner)))
- return wide_int_to_tree (outer, 0);
+ return build_int_cst (outer, 0);
else
{
/* If we are widening a signed type to another signed type, we
@@ -10709,8 +10703,8 @@ lower_bound_in_type (tree outer, tree inner)
-2^(oprec-1). */
unsigned prec = oprec > iprec ? iprec : oprec;
return wide_int_to_tree (outer,
- wide_int::mask (prec - 1,
- true, TYPE_PRECISION (outer)));
+ wi::mask (prec - 1, true,
+ TYPE_PRECISION (outer)));
}
}
@@ -10735,8 +10729,7 @@ operand_equal_for_phi_arg_p (const_tree arg0, const_tree arg1)
tree
num_ending_zeros (const_tree x)
{
- tree type = TREE_TYPE (x);
- return wide_int_to_tree (type, wide_int (x).ctz ());
+ return build_int_cst (TREE_TYPE (x), wi::ctz (x));
}
diff --git a/gcc/tree.h b/gcc/tree.h
index 5985d041995..851875a0cf9 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#define GCC_TREE_H
#include "tree-core.h"
+#include "wide-int.h"
/* Macros for initializing `tree_contains_struct'. */
#define MARK_TS_BASE(C) \
@@ -877,10 +878,10 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
/* Define additional fields and accessors for nodes representing constants. */
#define INT_CST_LT(A, B) \
- (wide_int::lts_p (A, B))
+ (wi::lts_p (A, B))
#define INT_CST_LT_UNSIGNED(A, B) \
- (wide_int::ltu_p (A, B))
+ (wi::ltu_p (A, B))
#define TREE_INT_CST_NUNITS(NODE) (INTEGER_CST_CHECK (NODE)->base.u.length)
#define TREE_INT_CST_ELT(NODE, I) TREE_INT_CST_ELT_CHECK (NODE, I)
@@ -3633,8 +3634,10 @@ extern tree build_var_debug_value_stat (tree, tree MEM_STAT_DECL);
/* Constructs double_int from tree CST. */
extern tree double_int_to_tree (tree, double_int);
-class wide_int;
-extern tree force_fit_type (tree, const wide_int&, int, bool);
+
+extern addr_wide_int mem_ref_offset (const_tree);
+extern tree wide_int_to_tree (tree type, const wide_int_ref &cst);
+extern tree force_fit_type (tree, const wide_int_ref &, int, bool);
/* Create an INT_CST node with a CST value zero extended. */
@@ -5155,5 +5158,94 @@ builtin_decl_implicit_p (enum built_in_function fncode)
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
+/* The tree and const_tree overload templates. */
+namespace wi
+{
+ template <>
+ struct int_traits <const_tree>
+ {
+ static const enum precision_type precision_type = FLEXIBLE_PRECISION;
+ static const bool host_dependent_precision = false;
+ static unsigned int get_precision (const_tree);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const_tree);
+ };
+
+ template <>
+ struct int_traits <tree> : public int_traits <const_tree> {};
+}
+
+inline unsigned int
+wi::int_traits <const_tree>::get_precision (const_tree tcst)
+{
+ return TYPE_PRECISION (TREE_TYPE (tcst));
+}
+
+inline wi::storage_ref
+wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, const_tree x)
+{
+ unsigned int xprecision = get_precision (x);
+ unsigned int len = TREE_INT_CST_NUNITS (x);
+ const HOST_WIDE_INT *val = (const HOST_WIDE_INT *) &TREE_INT_CST_ELT (x, 0);
+ unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+ /* Truncate the constant if necessary. */
+ if (len > max_len)
+ return wi::storage_ref (val, max_len, precision);
+
+ /* Otherwise we can use the constant as-is when not extending. */
+ if (precision <= xprecision)
+ return wi::storage_ref (val, len, precision);
+
+ /* Widen the constant according to its sign. */
+ len = wi::force_to_size (scratch, val, len, xprecision, precision,
+ TYPE_SIGN (TREE_TYPE (x)));
+ return wi::storage_ref (scratch, len, precision);
+}
+
+namespace wi
+{
+ hwi_with_prec hwi (HOST_WIDE_INT, const_tree);
+
+ template <typename T>
+ bool fits_to_tree_p (const T &x, const_tree);
+
+ wide_int min_value (const_tree);
+ wide_int max_value (const_tree);
+ wide_int from_mpz (const_tree, mpz_t, bool);
+}
+
+inline wi::hwi_with_prec
+wi::hwi (HOST_WIDE_INT val, const_tree type)
+{
+ return hwi_with_prec (val, TYPE_PRECISION (type), TYPE_SIGN (type));
+}
+
+template <typename T>
+bool
+wi::fits_to_tree_p (const T &x, const_tree type)
+{
+ if (TYPE_SIGN (type) == UNSIGNED)
+ return x == zext (x, TYPE_PRECISION (type));
+ else
+ return x == sext (x, TYPE_PRECISION (type));
+}
+
+/* Produce the smallest number that is represented in TYPE. The precision
+ and sign are taken from TYPE. */
+inline wide_int
+wi::min_value (const_tree type)
+{
+ return min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+}
+
+/* Produce the largest number that is represented in TYPE. The precision
+ and sign are taken from TYPE. */
+inline wide_int
+wi::max_value (const_tree type)
+{
+ return max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+}
#endif /* GCC_TREE_H */
diff --git a/gcc/varasm.c b/gcc/varasm.c
index 69dd7767bd2..1e5ccb481d4 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -4816,7 +4816,7 @@ array_size_for_constructor (tree val)
/* Multiply by the array element unit size to find number of bytes. */
i *= addr_wide_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val))));
- gcc_assert (i.fits_uhwi_p ());
+ gcc_assert (wi::fits_uhwi_p (i));
return i.to_uhwi ();
}
@@ -4898,7 +4898,7 @@ output_constructor_regular_field (oc_local_state *local)
but we are using an unsigned sizetype. */
unsigned prec = TYPE_PRECISION (sizetype);
addr_wide_int idx
- = (addr_wide_int (local->index) - local->min_index).sext (prec);
+ = wi::sext (addr_wide_int (local->index) - local->min_index, prec);
fieldpos = (idx * TYPE_SIZE_UNIT (TREE_TYPE (local->val))).to_shwi ();
}
else if (local->field != NULL_TREE)
diff --git a/gcc/wide-int-print.cc b/gcc/wide-int-print.cc
index 29eb9299f79..c83d9e44a0d 100644
--- a/gcc/wide-int-print.cc
+++ b/gcc/wide-int-print.cc
@@ -61,7 +61,7 @@ print_decs (const wide_int &wi, char *buf)
if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT)
|| (wi.get_len () == 1))
{
- if (wi.neg_p ())
+ if (wi::neg_p (wi))
sprintf (buf, "-" HOST_WIDE_INT_PRINT_UNSIGNED, -wi.to_shwi ());
else
sprintf (buf, HOST_WIDE_INT_PRINT_DEC, wi.to_shwi ());
@@ -88,7 +88,7 @@ void
print_decu (const wide_int &wi, char *buf)
{
if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT)
- || (wi.get_len () == 1 && !wi.neg_p ()))
+ || (wi.get_len () == 1 && !wi::neg_p (wi)))
sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, wi.to_uhwi ());
else
print_hex (wi, buf);
@@ -110,11 +110,11 @@ print_hex (const wide_int &wi, char *buf)
{
int i = wi.get_len ();
- if (wi.zero_p ())
+ if (wi == 0)
buf += sprintf (buf, "0x0");
else
{
- if (wi.neg_p ())
+ if (wi::neg_p (wi))
{
int j;
/* If the number is negative, we may need to pad value with
diff --git a/gcc/wide-int-print.h b/gcc/wide-int-print.h
index d1c42b154fd..be93cd1ecf5 100644
--- a/gcc/wide-int-print.h
+++ b/gcc/wide-int-print.h
@@ -23,7 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include <stdio.h>
#include "wide-int.h"
-#define WIDE_INT_PRINT_BUFFER_SIZE ((2 * MAX_BITSIZE_MODE_ANY_INT / BITS_PER_UNIT) + 4)
+#define WIDE_INT_PRINT_BUFFER_SIZE (MAX_BITSIZE_MODE_ANY_INT / 4 + 4)
/* Printing functions. */
extern void print_dec (const wide_int &wi, char *buf, signop sgn);
diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc
index f42c077ec00..6a2df0f5005 100644
--- a/gcc/wide-int.cc
+++ b/gcc/wide-int.cc
@@ -24,13 +24,16 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "hwint.h"
#include "wide-int.h"
-#include "rtl.h"
#include "tree.h"
#include "dumpfile.h"
/* This is the maximal size of the buffer needed for dump. */
-const int MAX_SIZE = 4 * (MAX_BITSIZE_MODE_ANY_INT / 4
- + MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT + 32);
+const unsigned int MAX_SIZE = (4 * (MAX_BITSIZE_MODE_ANY_INT / 4
+ + (MAX_BITSIZE_MODE_ANY_INT
+ / HOST_BITS_PER_WIDE_INT)
+ + 32));
+
+static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
/*
* Internal utilities.
@@ -45,201 +48,104 @@ const int MAX_SIZE = 4 * (MAX_BITSIZE_MODE_ANY_INT / 4
(PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
#define SIGN_MASK(X) (((HOST_WIDE_INT)X) >> (HOST_BITS_PER_WIDE_INT - 1))
-/*
- * Conversion routines in and out of wide-int.
- */
-
-/* Convert OP0 into a wide int of PRECISION. */
-wide_int_ro
-wide_int_ro::from_shwi (HOST_WIDE_INT op0,
- unsigned int precision)
+static unsigned HOST_WIDE_INT
+safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
{
- wide_int result;
-
- result.precision = precision;
- result.val[0] = op0;
- result.len = 1;
-
-#ifdef DEBUG_WIDE_INT
- debug_wh ("wide_int::from_shwi %s " HOST_WIDE_INT_PRINT_HEX ")\n",
- result, op0);
-#endif
-
- return result;
+ return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
}
-/* Convert OP0 into a wide int of PRECISION. */
-wide_int_ro
-wide_int_ro::from_uhwi (unsigned HOST_WIDE_INT op0,
- unsigned int precision)
-{
- wide_int result;
-
- result.precision = precision;
- result.val[0] = op0;
-
- /* If the top bit is a 1, we need to add another word of 0s since
- that would not expand the right value since the infinite
- expansion of any unsigned number must have 0s at the top. */
- if ((HOST_WIDE_INT)op0 < 0 && precision > HOST_BITS_PER_WIDE_INT)
- {
- result.val[1] = 0;
- result.len = 2;
- }
- else
- result.len = 1;
-
-#ifdef DEBUG_WIDE_INT
- debug_wh ("wide_int::from_uhwi %s " HOST_WIDE_INT_PRINT_HEX ")\n",
- result, op0);
-#endif
+/* Convert the integer in VAL to canonical form, returning its new length.
+ LEN is the number of blocks currently in VAL and PRECISION is the number
+ of bits in the integer it represents.
- return result;
-}
-
-/* Create a wide_int from an array of host_wide_ints in OP1 of LEN.
- The result has PRECISION. */
-wide_int_ro
-wide_int_ro::from_array (const HOST_WIDE_INT *op1, unsigned int len,
- unsigned int precision, bool need_canon)
+ This function only changes the representation, not the value. */
+static unsigned int
+canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
{
- unsigned int i;
- wide_int result;
-
- result.len = len;
- result.precision = precision;
-
- for (i=0; i < len; i++)
- result.val[i] = op1[i];
-
-#ifdef DEBUG_WIDE_INT
- debug_wa ("wide_int::from_array %s = %s\n", result, op1, len, precision);
-#endif
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ HOST_WIDE_INT top;
+ int i;
- if (need_canon)
- result.canonize ();
+ if (len > blocks_needed)
+ len = blocks_needed;
- return result;
-}
+ /* Clean up the top bits for any mode that is not a multiple of a
+ HWI and is not compressed. */
+ if (len == blocks_needed && small_prec)
+ val[len - 1] = sext_hwi (val[len - 1], small_prec);
-/* Convert a double int into a wide int with precision PREC. */
-wide_int_ro
-wide_int_ro::from_double_int (double_int di, unsigned int prec)
-{
- HOST_WIDE_INT op = di.low;
- wide_int result;
+ if (len == 1)
+ return len;
- result.precision = prec;
- result.len = (prec <= HOST_BITS_PER_WIDE_INT) ? 1 : 2;
+ top = val[len - 1];
+ if (top != 0 && top != (HOST_WIDE_INT)-1)
+ return len;
- if (prec < HOST_BITS_PER_WIDE_INT)
- result.val[0] = sext_hwi (op, prec);
- else
+ /* At this point we know that the top is either 0 or -1. Find the
+ first block that is not a copy of this. */
+ for (i = len - 2; i >= 0; i--)
{
- result.val[0] = op;
- if (prec > HOST_BITS_PER_WIDE_INT)
+ HOST_WIDE_INT x = val[i];
+ if (x != top)
{
- if (prec < HOST_BITS_PER_DOUBLE_INT)
- result.val[1] = sext_hwi (di.high, prec);
- else
- result.val[1] = di.high;
+ if (SIGN_MASK (x) == top)
+ return i + 1;
+
+ /* We need an extra block because the top bit block i does
+ not match the extension. */
+ return i + 2;
}
}
- if (result.len == 2)
- result.canonize ();
-
- return result;
+ /* The number is 0 or -1. */
+ return 1;
}
-/* Extract a constant integer from the R. The bits of the integer are
- returned. */
-wide_int_ro
-wide_int_ro::from_rtx (const rtx_mode_t r)
-{
- const_rtx x = get_rtx (r);
- enum machine_mode mode = get_mode (r);
- wide_int result;
- unsigned int prec = GET_MODE_PRECISION (mode);
-
- gcc_assert (mode != VOIDmode);
-
- result.precision = prec;
-
- switch (GET_CODE (x))
- {
- case CONST_INT:
- result.val[0] = INTVAL (x);
- result.len = 1;
-
-#if 0
- if (prec != HOST_BITS_PER_WIDE_INT)
- gcc_assert (result.val[0] == sext_hwi (result.val[0], prec));
-#endif
-
-#ifdef DEBUG_WIDE_INT
- debug_wh ("wide_int:: %s = from_rtx ("HOST_WIDE_INT_PRINT_HEX")\n",
- result, INTVAL (x));
-#endif
- break;
-
-#if TARGET_SUPPORTS_WIDE_INT
- case CONST_WIDE_INT:
- {
- int i;
- result.len = CONST_WIDE_INT_NUNITS (x);
-
- for (i = 0; i < result.len; i++)
- result.val[i] = CONST_WIDE_INT_ELT (x, i);
- }
- break;
-#else
- case CONST_DOUBLE:
- result.len = 2;
- result.val[0] = CONST_DOUBLE_LOW (x);
- result.val[1] = CONST_DOUBLE_HIGH (x);
-
-#ifdef DEBUG_WIDE_INT
- debug_whh ("wide_int:: %s = from_rtx ("HOST_WIDE_INT_PRINT_HEX" "HOST_WIDE_INT_PRINT_HEX")\n",
- result, CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
-#endif
-
- break;
-#endif
-
- default:
- gcc_unreachable ();
- }
+/*
+ * Conversion routines in and out of wide-int.
+ */
- return result;
+/* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
+ result for an integer with precision PRECISION. Return the length
+ of VAL (after any canonization. */
+unsigned int
+wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision, bool need_canon)
+{
+ for (unsigned i = 0; i < xlen; i++)
+ val[i] = xval[i];
+ return need_canon ? canonize (val, xlen, precision) : xlen;
}
/* Construct a wide int from a buffer of length LEN. BUFFER will be
read according to byte endianess and word endianess of the target.
Only the lower LEN bytes of the result are set; the remaining high
bytes are cleared. */
-wide_int_ro
-wide_int_ro::from_buffer (const unsigned char *buffer, int len)
+wide_int
+wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
{
- wide_int result = wide_int::zero (len * BITS_PER_UNIT);
- int words = len / UNITS_PER_WORD;
+ unsigned int precision = buffer_len * BITS_PER_UNIT;
+ wide_int result = wide_int::create (precision);
+ unsigned int words = buffer_len / UNITS_PER_WORD;
/* We have to clear all the bits ourself, as we merely or in values
below. */
- result.len = BLOCKS_NEEDED (len*BITS_PER_UNIT);
- for (int i = 0; i < result.len; ++i)
- result.val[i] = 0;
+ unsigned int len = BLOCKS_NEEDED (precision);
+ HOST_WIDE_INT *val = result.write_val ();
+ for (unsigned int i = 0; i < len; ++i)
+ val[i] = 0;
- for (int byte = 0; byte < len; byte++)
+ for (unsigned int byte = 0; byte < buffer_len; byte++)
{
- int offset;
- int index;
- int bitpos = byte * BITS_PER_UNIT;
+ unsigned int offset;
+ unsigned int index;
+ unsigned int bitpos = byte * BITS_PER_UNIT;
unsigned HOST_WIDE_INT value;
- if (len > UNITS_PER_WORD)
+ if (buffer_len > UNITS_PER_WORD)
{
- int word = byte / UNITS_PER_WORD;
+ unsigned int word = byte / UNITS_PER_WORD;
if (WORDS_BIG_ENDIAN)
word = (words - 1) - word;
@@ -252,37 +158,35 @@ wide_int_ro::from_buffer (const unsigned char *buffer, int len)
offset += byte % UNITS_PER_WORD;
}
else
- offset = BYTES_BIG_ENDIAN ? (len - 1) - byte : byte;
+ offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
value = (unsigned HOST_WIDE_INT) buffer[offset];
index = bitpos / HOST_BITS_PER_WIDE_INT;
- result.val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
+ val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
}
- result.canonize ();
+ result.set_len (canonize (val, len, precision));
return result;
}
/* Sets RESULT from THIS, the sign is taken according to SGN. */
void
-wide_int_ro::to_mpz (mpz_t result, signop sgn) const
+wi::to_mpz (wide_int x, mpz_t result, signop sgn)
{
bool negative = false;
- wide_int tmp;
- if ((*this).neg_p (sgn))
+ if (wi::neg_p (x, sgn))
{
negative = true;
/* We use ones complement to avoid -x80..0 edge case that -
won't work on. */
- tmp = ~(*this);
+ x = ~x;
}
- else
- tmp = *this;
- mpz_import (result, tmp.len, -1, sizeof (HOST_WIDE_INT), 0, 0, tmp.val);
+ mpz_import (result, x.get_len (), -1, sizeof (HOST_WIDE_INT), 0, 0,
+ x.get_val ());
if (negative)
mpz_com (result, result);
@@ -291,11 +195,11 @@ wide_int_ro::to_mpz (mpz_t result, signop sgn) const
/* Returns VAL converted to TYPE. If WRAP is true, then out-of-range
values of VAL will be wrapped; otherwise, they will be set to the
appropriate minimum or maximum TYPE bound. */
-wide_int_ro
-wide_int_ro::from_mpz (const_tree type, mpz_t val, bool wrap)
+wide_int
+wi::from_mpz (const_tree type, mpz_t x, bool wrap)
{
size_t count, numb;
- wide_int res;
+ wide_int res = wide_int::create (TYPE_PRECISION (type));
unsigned int i;
if (!wrap)
@@ -306,10 +210,10 @@ wide_int_ro::from_mpz (const_tree type, mpz_t val, bool wrap)
mpz_init (max);
get_type_static_bounds (type, min, max);
- if (mpz_cmp (val, min) < 0)
- mpz_set (val, min);
- else if (mpz_cmp (val, max) > 0)
- mpz_set (val, max);
+ if (mpz_cmp (x, min) < 0)
+ mpz_set (x, min);
+ else if (mpz_cmp (x, max) > 0)
+ mpz_set (x, max);
mpz_clear (min);
mpz_clear (max);
@@ -320,21 +224,21 @@ wide_int_ro::from_mpz (const_tree type, mpz_t val, bool wrap)
extracted from the GMP manual, section "Integer Import and Export":
http://gmplib.org/manual/Integer-Import-and-Export.html */
numb = 8*sizeof(HOST_WIDE_INT);
- count = (mpz_sizeinbase (val, 2) + numb-1) / numb;
+ count = (mpz_sizeinbase (x, 2) + numb-1) / numb;
if (count < 1)
count = 1;
/* Need to initialize the number because it writes nothing for
zero. */
+ HOST_WIDE_INT *val = res.write_val ();
for (i = 0; i < count; i++)
- res.val[i] = 0;
+ val[i] = 0;
- res.len = count;
+ res.set_len (count);
- mpz_export (res.val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, val);
+ mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
- res.precision = TYPE_PRECISION (type);
- if (mpz_sgn (val) < 0)
+ if (mpz_sgn (x) < 0)
res = -res;
return res;
@@ -344,212 +248,89 @@ wide_int_ro::from_mpz (const_tree type, mpz_t val, bool wrap)
* Largest and smallest values in a mode.
*/
-/* Largest and smallest values that are represented in a TYPE_PREC.
- RESULT_PREC is the precision of the value that the answer is
- returned within. The default value of 0 says return the answer
- with TYPE_PREC precision.
+/* Return the largest SGNed number that is representable in PRECISION bits.
TODO: There is still code from the double_int era that trys to
make up for the fact that double int's could not represent the
min and max values of all types. This code should be removed
because the min and max values can always be represented in
wide-ints and int-csts. */
-wide_int_ro
-wide_int_ro::max_value (unsigned int type_prec, signop sgn,
- unsigned int result_prec)
+wide_int
+wi::max_value (unsigned int precision, signop sgn)
{
- unsigned int prec = result_prec ? result_prec : type_prec;
-
- if (type_prec == 0)
- return wide_int::zero (result_prec
- ? result_prec
- : TYPE_PRECISION (integer_type_node));
-
- if (sgn == UNSIGNED)
- {
- if (prec <= type_prec)
- /* The unsigned max is just all ones, for which the
- compressed rep is just a single HWI. */
- return wide_int::minus_one (prec);
- else
- return wide_int::mask (type_prec, false, prec);
- }
+ if (precision == 0)
+ return shwi (0, precision);
+ else if (sgn == UNSIGNED)
+ /* The unsigned max is just all ones. */
+ return shwi (-1, precision);
else
/* The signed max is all ones except the top bit. This must be
explicitly represented. */
- return wide_int::mask (type_prec-1, false, prec);
+ return mask (precision - 1, false, precision);
}
-/* Produce the smallest SGNed number that is represented in TYPE_PREC.
- The resulting number is placed in a wide int of size RESULT_PREC.
- IF RESULT_PREC is 0, answer will have TYPE_PREC precision. */
-wide_int_ro
-wide_int_ro::min_value (unsigned int type_prec, signop sgn,
- unsigned int result_prec)
+/* Return the largest SGNed number that is representable in PRECISION bits. */
+wide_int
+wi::min_value (unsigned int precision, signop sgn)
{
- if (result_prec == 0)
- result_prec = type_prec;
-
- if (type_prec == 0)
- return wide_int_ro::zero (result_prec
- ? result_prec
- : TYPE_PRECISION (integer_type_node));
-
- if (sgn == UNSIGNED)
- {
- /* The unsigned min is just all zeros, for which the compressed
- rep is just a single HWI. */
- wide_int result;
- result.len = 1;
- result.precision = result_prec;
- result.val[0] = 0;
- return result;
- }
+ if (precision == 0 || sgn == UNSIGNED)
+ return uhwi (0, precision);
else
- {
- /* The signed min is all zeros except the top bit. This must be
- explicitly represented. */
- return set_bit_in_zero (type_prec - 1, result_prec);
- }
+ /* The signed min is all zeros except the top bit. This must be
+ explicitly represented. */
+ return wi::set_bit_in_zero (precision - 1, precision);
}
/*
* Public utilities.
*/
-/* Check the upper HOST_WIDE_INTs of src to see if the length can be
- shortened. An upper HOST_WIDE_INT is unnecessary if it is all ones
- or zeros and the top bit of the next lower word matches.
+/* Convert the number represented by XVAL, XLEN and XPRECISION, which has
+ signedness SGN, to an integer that has PRECISION bits. Store the blocks
+ in VAL and return the number of blocks used.
- This function may change the representation of THIS, but does not
- change the value that THIS represents. It does not sign extend in
- the case that the size of the mode is less than
- HOST_BITS_PER_WIDE_INT. */
-void
-wide_int_ro::canonize ()
+ This function can handle both extension (PRECISION > XPRECISION)
+ and truncation (PRECISION < XPRECISION). */
+unsigned int
+wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int xprecision,
+ unsigned int precision, signop sgn)
{
- int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
- int blocks_needed = BLOCKS_NEEDED (precision);
- HOST_WIDE_INT top;
- int i;
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
+ for (unsigned i = 0; i < len; i++)
+ val[i] = xval[i];
- if (len > blocks_needed)
- len = blocks_needed;
-
- /* Clean up the top bits for any mode that is not a multiple of a
- HWI and is not compressed. */
- if (len == blocks_needed && small_prec)
- val[len - 1] = sext_hwi (val[len - 1], small_prec);
-
- if (len == 1)
- return;
-
- top = val[len - 1];
- if (top != 0 && top != (HOST_WIDE_INT)-1)
- return;
-
- /* At this point we know that the top is either 0 or -1. Find the
- first block that is not a copy of this. */
- for (i = len - 2; i >= 0; i--)
+ if (precision > xprecision)
{
- HOST_WIDE_INT x = val[i];
- if (x != top)
- {
- if (SIGN_MASK (x) == top)
- {
- len = i + 1;
- return;
- }
-
- /* We need an extra block because the top bit block i does
- not match the extension. */
- len = i + 2;
- return;
- }
- }
-
- /* The number is 0 or -1. */
- len = 1;
-}
-
-/* Copy THIS replacing the precision with PREC. It can do any of
- truncation, extension or copying. This function is only available
- with the default wide-int form as the other forms have fixed
- precisions. */
-wide_int_ro
-wide_int_ro::force_to_size (unsigned int prec, signop sgn) const
-{
- wide_int result;
- int blocks_needed = BLOCKS_NEEDED (prec);
- int i;
-
- result.precision = prec;
- /* If this is a value that has come in from a hwi, then it does not
- have a proper precision. However, it is in canonical form, so
- just copy and zap in the precision and return. */
- if (precision == 0)
- {
- /* Some zero prec numbers take 2 hwi's. If the target prec is
- small, we may need to shorten it. */
- result.len = len;
- if (prec <= HOST_BITS_PER_WIDE_INT)
- result.len = 1;
- for (int i = 0; i < result.len; ++i)
- result.val[i] = val[i];
- return result;
- }
-
- result.len = blocks_needed < len ? blocks_needed : len;
- for (i = 0; i < result.len; i++)
- result.val[i] = val[i];
-
- if (prec == precision)
- /* Nothing much to do. */
- ;
- else if (prec > precision)
- {
- /* Expanding */
- int small_precision = precision & (HOST_BITS_PER_WIDE_INT - 1);
+ /* Expanding. */
+ unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
if (sgn == UNSIGNED)
{
- /* The top block in the existing rep must be zero extended. */
- if (small_precision
- /* We need to ensure we only extend the last block of
- the original number, if the number has not been
- compressed. If the number has been compressed, then
- all the bits are significant. */
- && len == BLOCKS_NEEDED (precision))
- result.val[len-1] = zext_hwi (result.val[len-1], small_precision);
- else if (len < blocks_needed
- && small_precision == 0
- && result.val[result.len - 1] < 0)
+ if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
+ val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
+ else if (val[len - 1] < 0)
{
- /* We need to uncompress the original value first. */
- while (result.len < BLOCKS_NEEDED (precision))
- result.val[result.len++] = (HOST_WIDE_INT)-1;
- /* We need to put the 0 block on top to keep the value
- from being sign extended. */
- if (result.len < blocks_needed)
- result.val[result.len++] = 0;
+ while (len < BLOCKS_NEEDED (xprecision))
+ val[len++] = -1;
+ if (small_xprecision)
+ val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
+ else
+ val[len++] = 0;
}
}
/* We have to do this because we cannot guarantee that there is
not trash in the top block of an uncompressed value. For a
compressed value, all the bits are significant. */
- else if (small_precision
- && len == BLOCKS_NEEDED (precision))
- result.val[len-1] = sext_hwi (result.val[len-1], small_precision);
+ else if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
+ val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
}
- else
- result.canonize ();
+ else if (precision < xprecision)
+ /* Contracting. */
+ len = canonize (val, len, precision);
-#ifdef DEBUG_WIDE_INT
- debug_wwvs ("wide_int:: %s = force_to_size (%s, prec = %d %s)\n",
- result, *this, prec, sgn==UNSIGNED ? "U" : "S");
-#endif
-
- return result;
+ return len;
}
/* This function hides the fact that we cannot rely on the bits beyond
@@ -602,9 +383,9 @@ top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
/* Return true if OP0 == OP1. */
bool
-wide_int_ro::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int prec,
- const HOST_WIDE_INT *op1, unsigned int op1len)
+wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ const HOST_WIDE_INT *op1, unsigned int op1len,
+ unsigned int prec)
{
int l0 = op0len - 1;
unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
@@ -632,10 +413,10 @@ wide_int_ro::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
/* Return true if OP0 < OP1 using signed comparisons. */
bool
-wide_int_ro::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int p0,
- const HOST_WIDE_INT *op1, unsigned int op1len,
- unsigned int p1)
+wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ unsigned int p0,
+ const HOST_WIDE_INT *op1, unsigned int op1len,
+ unsigned int p1)
{
HOST_WIDE_INT s0, s1;
unsigned HOST_WIDE_INT u0, u1;
@@ -673,10 +454,10 @@ wide_int_ro::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
signed compares. */
int
-wide_int_ro::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int p0,
- const HOST_WIDE_INT *op1, unsigned int op1len,
- unsigned int p1)
+wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ unsigned int p0,
+ const HOST_WIDE_INT *op1, unsigned int op1len,
+ unsigned int p1)
{
HOST_WIDE_INT s0, s1;
unsigned HOST_WIDE_INT u0, u1;
@@ -713,8 +494,8 @@ wide_int_ro::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
/* Return true if OP0 < OP1 using unsigned comparisons. */
bool
-wide_int_ro::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0,
- const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1)
+wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0,
+ const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1)
{
unsigned HOST_WIDE_INT x0;
unsigned HOST_WIDE_INT x1;
@@ -741,8 +522,8 @@ wide_int_ro::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigne
/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
unsigned compares. */
int
-wide_int_ro::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0,
- const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1)
+wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0,
+ const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1)
{
unsigned HOST_WIDE_INT x0;
unsigned HOST_WIDE_INT x1;
@@ -766,526 +547,292 @@ wide_int_ro::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned
return 0;
}
-/* Return true if THIS has the sign bit set to 1 and all other bits are
- zero. */
-bool
-wide_int_ro::only_sign_bit_p (unsigned int prec) const
-{
- int i;
- HOST_WIDE_INT x;
- int small_prec;
- bool result;
-
- if (BLOCKS_NEEDED (prec) != len)
- {
- result = false;
- goto ex;
- }
-
- for (i=0; i < len - 1; i++)
- if (val[i] != 0)
- {
- result = false;
- goto ex;
- }
-
- x = val[len - 1];
- small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
- if (small_prec)
- x = x << (HOST_BITS_PER_WIDE_INT - small_prec);
-
- result = x == ((HOST_WIDE_INT)1) << (HOST_BITS_PER_WIDE_INT - 1);
-
- ex:
-
-#ifdef DEBUG_WIDE_INT
- debug_vw ("wide_int:: %d = only_sign_bit_p (%s)\n", result, *this);
-#endif
- return result;
-}
-
-/* Returns true if THIS fits into range of TYPE. Signedness of OP0 is
- assumed to be the same as the signedness of TYPE. */
-bool
-wide_int_ro::fits_to_tree_p (const_tree type) const
-{
- unsigned int type_prec = TYPE_PRECISION (type);
-
- if (precision <= type_prec)
- return true;
-
- if (TYPE_SIGN (type) == UNSIGNED)
- return *this == zext (type_prec);
- else
- {
- /* For signed, we can do a couple of quick tests since the
- compressed rep looks like it was just sign extended. */
- if (len < BLOCKS_NEEDED (type_prec))
- return true;
-
- if (len > BLOCKS_NEEDED (type_prec))
- return false;
-
- return *this == sext (type_prec);
- }
-}
-
/*
* Extension.
*/
-/* Sign extend THIS starting at OFFSET. The precision of the result
- are the same as THIS. */
-wide_int_ro
-wide_int_ro::sext (unsigned int offset) const
-{
- wide_int result;
- int off;
-
- gcc_assert (precision >= offset);
-
- if (precision <= HOST_BITS_PER_WIDE_INT)
- {
- result.precision = precision;
- if (offset < precision)
- result.val[0] = sext_hwi (val[0], offset);
- else
- /* If offset is greater or equal to precision there is nothing
- to do since the internal rep is already sign extended. */
- result.val[0] = val[0];
-
- result.len = 1;
- }
- else if (precision == offset)
- result = *this;
+/* Sign-extend the number represented by XVAL and XLEN into VAL,
+ starting at OFFSET. Return the number of blocks in VAL. Both XVAL
+ and VAL have PRECISION bits. */
+unsigned int
+wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision, unsigned int offset)
+{
+ unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
+ /* Extending beyond the precision is a no-op. If we have only stored
+ OFFSET bits or fewer, the rest are already signs. */
+ if (offset >= precision || len >= xlen)
+ {
+ for (unsigned i = 0; i < xlen; ++i)
+ val[i] = xval[i];
+ return xlen;
+ }
+ unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
+ for (unsigned int i = 0; i < len; i++)
+ val[i] = xval[i];
+ if (suboffset > 0)
+ {
+ val[len] = sext_hwi (xval[len], suboffset);
+ len += 1;
+ }
+ return canonize (val, len, precision);
+}
+
+/* Zero-extend the number represented by XVAL and XLEN into VAL,
+ starting at OFFSET. Return the number of blocks in VAL. Both XVAL
+ and VAL have PRECISION bits. */
+unsigned int
+wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision, unsigned int offset)
+{
+ unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
+ /* Extending beyond the precision is a no-op. If we have only stored
+ OFFSET bits or fewer, and the upper stored bit is zero, then there
+ is nothing to do. */
+ if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
+ {
+ for (unsigned i = 0; i < xlen; ++i)
+ val[i] = xval[i];
+ return xlen;
+ }
+ unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
+ for (unsigned int i = 0; i < len; i++)
+ val[i] = i < xlen ? xval[i] : -1;
+ if (suboffset > 0)
+ val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
else
- {
- result = decompress (offset, precision);
-
- /* Now we can do the real sign extension. */
- off = offset & (HOST_BITS_PER_WIDE_INT - 1);
- if (off)
- {
- int block = BLOCK_OF (offset);
- result.val[block] = sext_hwi (val[block], off);
- result.len = block + 1;
- }
- /* We never need an extra element for sign extended values but
- we may need to compress. */
- result.canonize ();
- }
-
-#ifdef DEBUG_WIDE_INT
- debug_wwv ("wide_int:: %s = (%s sext %d)\n", result, *this, offset);
-#endif
-
- return result;
-}
-
-/* Zero extend THIS starting at OFFSET. The precision of the result
- are the same as THIS. */
-wide_int_ro
-wide_int_ro::zext (unsigned int offset) const
-{
- wide_int result;
- int off;
- int block;
-
- gcc_assert (precision >= offset);
-
- if (precision <= HOST_BITS_PER_WIDE_INT)
- {
- result.precision = precision;
- if (offset < precision)
- result.val[0] = zext_hwi (val[0], offset);
- else if (offset == precision)
- result.val[0] = val[0];
- /* If offset was greater than the precision we need to zero
- extend from the old precision since the internal rep was
- equivalent to sign extended. */
- else
- result.val[0] = zext_hwi (val[0], precision);
-
- result.len = 1;
- }
- else if (precision == offset)
- result = *this;
- else
- {
- result = decompress (offset, precision);
-
- /* Now we can do the real zero extension. */
- off = offset & (HOST_BITS_PER_WIDE_INT - 1);
- block = BLOCK_OF (offset);
- if (off)
- {
- result.val[block] = zext_hwi (val[block], off);
- result.len = block + 1;
- }
- else
- /* See if we need an extra zero element to satisfy the
- compression rule. */
- if (result.val[block - 1] < 0 && offset < precision)
- {
- result.val[block] = 0;
- result.len += 1;
- }
- result.canonize ();
- }
-#ifdef DEBUG_WIDE_INT
- debug_wwv ("wide_int:: %s = (%s zext %d)\n", result, *this, offset);
-#endif
- return result;
+ val[len] = 0;
+ return canonize (val, len + 1, precision);
}
/*
* Masking, inserting, shifting, rotating.
*/
-/* Return a value with a one bit inserted in THIS at BITPOS. */
-wide_int_ro
-wide_int_ro::set_bit (unsigned int bitpos) const
-{
- wide_int result;
- int i, j;
-
- if (bitpos >= precision)
- result = *this;
- else
- {
- result = decompress (bitpos, precision);
- j = bitpos / HOST_BITS_PER_WIDE_INT;
- i = bitpos & (HOST_BITS_PER_WIDE_INT - 1);
- result.val[j] |= ((HOST_WIDE_INT)1) << i;
- }
-
-#ifdef DEBUG_WIDE_INT
- debug_wwv ("wide_int_ro:: %s = (%s set_bit %d)\n", result, *this, bitpos);
-#endif
- return result;
-}
-
-/* Insert a 1 bit into 0 at BITPOS producing an number with PREC. */
-wide_int_ro
-wide_int_ro::set_bit_in_zero (unsigned int bitpos, unsigned int prec)
-{
- wide_int result;
- int extra_bit = 0;
- /* We need one extra bit of 0 above the set bit for the compression
- of the bits above the set bit when the bit that is set is the top
- bit of a compressed number. When setting the actual top bit
- (non-compressed) we can just set it as there are no bits above
- it. */
- if (bitpos % HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDE_INT-1
- && bitpos+1 != prec)
- extra_bit = 1;
- int blocks_needed = BLOCKS_NEEDED (bitpos + 1 + extra_bit);
- int i, j;
-
- result.precision = prec;
- if (bitpos >= prec)
- {
- result.len = 1;
- result.val[0] = 0;
- }
- else
- {
- result.len = blocks_needed;
- for (i = 0; i < blocks_needed; i++)
- result.val[i] = 0;
-
- j = bitpos / HOST_BITS_PER_WIDE_INT;
- i = bitpos & (HOST_BITS_PER_WIDE_INT - 1);
- result.val[j] |= ((HOST_WIDE_INT)1) << i;
- }
-
-#ifdef DEBUG_WIDE_INT
- debug_wv ("wide_int_ro:: %s = set_bit_in_zero (%d)\n", result, bitpos);
-#endif
-
- return result;
-}
-
-/* Insert WIDTH bits from OP0 into THIS starting at START. */
-wide_int_ro
-wide_int_ro::insert (const wide_int_ro &op0, unsigned int start,
- unsigned int width) const
+/* Insert WIDTH bits from Y into X starting at START. */
+wide_int
+wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
+ unsigned int width)
{
wide_int result;
wide_int mask;
wide_int tmp;
+ unsigned int precision = x.get_precision ();
if (start >= precision)
- return *this;
+ return x;
- gcc_checking_assert (op0.precision >= width);
+ gcc_checking_assert (precision >= width);
if (start + width >= precision)
width = precision - start;
- mask = shifted_mask (start, width, false, precision);
- tmp = op0.lshift_widen (start, precision);
+ mask = wi::shifted_mask (start, width, false, precision);
+ tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
result = tmp & mask;
- tmp = and_not (mask);
+ tmp = wi::bit_and_not (x, mask);
result = result | tmp;
-#ifdef DEBUG_WIDE_INT
- debug_wwwvv ("wide_int_ro:: %s = (%s insert %s start = %d width = %d)\n",
- result, *this, op0, start, width);
-#endif
-
return result;
}
+/* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
+ Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
+ bits. */
+unsigned int
+wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision, unsigned int bit)
+{
+ unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
+ unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
+
+ if (block + 1 >= xlen)
+ {
+ /* The operation either affects the last current block or needs
+ a new block. */
+ unsigned int len = block + 1;
+ for (unsigned int i = 0; i < len; i++)
+ val[i] = safe_uhwi (xval, xlen, i);
+ val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
+
+ /* If the bit we just set is at the msb of the block, make sure
+ that any higher bits are zeros. */
+ if (bit + 1 < precision && bit == HOST_BITS_PER_WIDE_INT - 1)
+ val[len++] = 0;
+ return len;
+ }
+ else
+ {
+ for (unsigned int i = 0; i < xlen; i++)
+ val[i] = xval[i];
+ val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
+ return canonize (val, xlen, precision);
+ }
+}
+
/* bswap THIS. */
-wide_int_ro
-wide_int_ro::bswap () const
+wide_int
+wide_int_storage::bswap () const
{
- wide_int result;
- int i, s;
- int end;
- int len = BLOCKS_NEEDED (precision);
+ wide_int result = wide_int::create (precision);
+ unsigned int i, s;
+ unsigned int len = BLOCKS_NEEDED (precision);
+ unsigned int xlen = get_len ();
+ const HOST_WIDE_INT *xval = get_val ();
+ HOST_WIDE_INT *val = result.write_val ();
/* This is not a well defined operation if the precision is not a
multiple of 8. */
gcc_assert ((precision & 0x7) == 0);
- result.precision = precision;
- result.len = len;
-
for (i = 0; i < len; i++)
- result.val[i] = 0;
+ val[i] = 0;
/* Only swap the bytes that are not the padding. */
- if ((precision & (HOST_BITS_PER_WIDE_INT - 1))
- && (this->len == len))
- end = precision;
- else
- end = this->len * HOST_BITS_PER_WIDE_INT;
-
- for (s = 0; s < end; s += 8)
+ for (s = 0; s < precision; s += 8)
{
unsigned int d = precision - s - 8;
unsigned HOST_WIDE_INT byte;
- int block = s / HOST_BITS_PER_WIDE_INT;
- int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int block = s / HOST_BITS_PER_WIDE_INT;
+ unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
- byte = (val[block] >> offset) & 0xff;
+ byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
block = d / HOST_BITS_PER_WIDE_INT;
offset = d & (HOST_BITS_PER_WIDE_INT - 1);
- result.val[block] |= byte << offset;
+ val[block] |= byte << offset;
}
- result.canonize ();
-
-#ifdef DEBUG_WIDE_INT
- debug_ww ("wide_int_ro:: %s = bswap (%s)\n", result, *this);
-#endif
+ result.set_len (canonize (val, len, precision));
return result;
}
-/* Return a result mask where the lower WIDTH bits are ones and the
- bits above that up to the precision are zeros. The result is
- inverted if NEGATE is true. The result is made with PREC. */
-wide_int_ro
-wide_int_ro::mask (unsigned int width, bool negate, unsigned int prec)
+/* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
+ above that up to PREC are zeros. The result is inverted if NEGATE
+ is true. Return the number of blocks in VAL. */
+unsigned int
+wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
+ unsigned int prec)
{
- wide_int result;
- unsigned int i = 0;
- int shift;
-
gcc_assert (width < 4 * MAX_BITSIZE_MODE_ANY_INT);
gcc_assert (prec <= 4 * MAX_BITSIZE_MODE_ANY_INT);
if (width == prec)
{
- if (negate)
- result = wide_int::zero (prec);
- else
- result = wide_int::minus_one (prec);
+ val[0] = negate ? 0 : -1;
+ return 1;
}
else if (width == 0)
{
- if (negate)
- result = wide_int::minus_one (prec);
- else
- result = wide_int::zero (prec);
+ val[0] = negate ? -1 : 0;
+ return 1;
}
- else
- {
- result.precision = prec;
- while (i < width / HOST_BITS_PER_WIDE_INT)
- result.val[i++] = negate ? 0 : (HOST_WIDE_INT)-1;
+ unsigned int i = 0;
+ while (i < width / HOST_BITS_PER_WIDE_INT)
+ val[i++] = negate ? 0 : -1;
- shift = width & (HOST_BITS_PER_WIDE_INT - 1);
- if (shift != 0)
- {
- HOST_WIDE_INT last = (((HOST_WIDE_INT)1) << shift) - 1;
- result.val[i++] = negate ? ~last : last;
- }
- else
- result.val[i++] = negate ? (HOST_WIDE_INT)-1 : 0;
- result.len = i;
+ unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
+ if (shift != 0)
+ {
+ HOST_WIDE_INT last = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
+ val[i++] = negate ? ~last : last;
}
+ else
+ val[i++] = negate ? -1 : 0;
-#ifdef DEBUG_WIDE_INT
- debug_wvv ("wide_int_ro:: %s = mask (%d, negate = %d)\n", result, width, negate);
-#endif
- return result;
+ return i;
}
-/* Return a result mask of WIDTH ones starting at START and the
- bits above that up to the precision are zeros. The result is
- inverted if NEGATE is true. */
-wide_int_ro
-wide_int_ro::shifted_mask (unsigned int start, unsigned int width,
- bool negate, unsigned int prec)
+/* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
+ bits are ones, and the bits above that up to PREC are zeros. The result
+ is inverted if NEGATE is true. Return the number of blocks in VAL. */
+unsigned int
+wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
+ bool negate, unsigned int prec)
{
- wide_int result;
- unsigned int i = 0;
- unsigned int shift;
- unsigned int end = start + width;
- HOST_WIDE_INT block;
-
gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT);
if (start + width > prec)
width = prec - start;
+ unsigned int end = start + width;
if (width == 0)
{
- if (negate)
- result = wide_int::minus_one (prec);
- else
- result = wide_int::zero (prec);
-#ifdef DEBUG_WIDE_INT
- debug_wvvv
- ("wide_int:: %s = shifted_mask (start = %d width = %d negate = %d)\n",
- result, start, width, negate);
-#endif
- return result;
+ val[0] = negate ? -1 : 0;
+ return 1;
}
- result.precision = prec;
-
+ unsigned int i = 0;
while (i < start / HOST_BITS_PER_WIDE_INT)
- result.val[i++] = negate ? (HOST_WIDE_INT)-1 : 0;
+ val[i++] = negate ? -1 : 0;
- shift = start & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
if (shift)
{
- block = (((HOST_WIDE_INT)1) << shift) - 1;
- shift = (end) & (HOST_BITS_PER_WIDE_INT - 1);
+ HOST_WIDE_INT block = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
+ shift = end & (HOST_BITS_PER_WIDE_INT - 1);
if (shift)
{
/* case 000111000 */
- block = (((HOST_WIDE_INT)1) << shift) - block - 1;
- result.val[i++] = negate ? ~block : block;
- result.len = i;
-
-#ifdef DEBUG_WIDE_INT
- debug_wvvv
- ("wide_int_ro:: %s = shifted_mask (start = %d width = %d negate = %d)\n",
- result, start, width, negate);
-#endif
- return result;
+ block = (((unsigned HOST_WIDE_INT) 1) << shift) - block - 1;
+ val[i++] = negate ? ~block : block;
+ return i;
}
else
/* ...111000 */
- result.val[i++] = negate ? block : ~block;
+ val[i++] = negate ? block : ~block;
}
while (i < end / HOST_BITS_PER_WIDE_INT)
/* 1111111 */
- result.val[i++] = negate ? 0 : (HOST_WIDE_INT)-1;
+ val[i++] = negate ? 0 : -1;
shift = end & (HOST_BITS_PER_WIDE_INT - 1);
if (shift != 0)
{
/* 000011111 */
- block = (((HOST_WIDE_INT)1) << shift) - 1;
- result.val[i++] = negate ? ~block : block;
+ HOST_WIDE_INT block = (((unsigned HOST_WIDE_INT) 1) << shift) - 1;
+ val[i++] = negate ? ~block : block;
}
else if (end < prec)
- result.val[i++] = negate ? (HOST_WIDE_INT)-1 : 0;
-
- result.len = i;
+ val[i++] = negate ? -1 : 0;
-#ifdef DEBUG_WIDE_INT
- debug_wvvv
- ("wide_int_ro:: %s = shifted_mask (start = %d width = %d negate = %d)\n",
- result, start, width, negate);
-#endif
-
- return result;
+ return i;
}
-/* Ensure there are no undefined bits returned by elt (). This is
- useful for when we might hash the value returned by elt and want to
- ensure the top undefined bit are in fact, defined. If sgn is
- UNSIGNED, the bits are zeroed, if sgn is SIGNED, then the bits are
- copies of the top bit (aka sign bit) as determined by
- precision. */
-void
-wide_int_ro::clear_undef (signop sgn)
-{
- int small_prec = precision % HOST_BITS_PER_WIDE_INT;
- if (small_prec)
- {
- if (len == (precision + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
- {
- if (sgn == UNSIGNED)
- val[len-1] &= ((unsigned HOST_WIDE_INT)1 << small_prec) - 1;
- else
- {
- int cnt = HOST_BITS_PER_WIDE_INT - small_prec;
- val[len-1] = (val[len-1] << cnt) >> cnt;
- }
- }
- }
- /* Do we have a int:0 inside a struct? */
- else if (precision == 0)
- val[0] = 0;
-}
-
-
/*
* logical operations.
*/
-/* Return THIS & OP1. */
-wide_int_ro
-wide_int_ro::and_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int prec,
- const HOST_WIDE_INT *op1, unsigned int op1len)
+/* Set VAL to OP0 & OP1. Return the number of blocks used. */
+unsigned int
+wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
{
- wide_int result;
int l0 = op0len - 1;
int l1 = op1len - 1;
bool need_canon = true;
- result.len = MAX (op0len, op1len);
- result.precision = prec;
-
+ unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
if (op1mask == 0)
{
l0 = l1;
- result.len = l1 + 1;
+ len = l1 + 1;
}
else
{
need_canon = false;
while (l0 > l1)
{
- result.val[l0] = op0[l0];
+ val[l0] = op0[l0];
l0--;
}
}
@@ -1294,13 +841,13 @@ wide_int_ro::and_large (const HOST_WIDE_INT *op0, unsigned int op0len,
{
HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
if (op0mask == 0)
- result.len = l0 + 1;
+ len = l0 + 1;
else
{
need_canon = false;
while (l1 > l0)
{
- result.val[l1] = op1[l1];
+ val[l1] = op1[l1];
l1--;
}
}
@@ -1308,44 +855,42 @@ wide_int_ro::and_large (const HOST_WIDE_INT *op0, unsigned int op0len,
while (l0 >= 0)
{
- result.val[l0] = op0[l0] & op1[l0];
+ val[l0] = op0[l0] & op1[l0];
l0--;
}
if (need_canon)
- result.canonize ();
+ len = canonize (val, len, prec);
- return result;
+ return len;
}
-/* Return THIS & ~OP1. */
-wide_int_ro
-wide_int_ro::and_not_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int prec,
- const HOST_WIDE_INT *op1, unsigned int op1len)
+/* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
+unsigned int
+wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
{
wide_int result;
int l0 = op0len - 1;
int l1 = op1len - 1;
bool need_canon = true;
- result.len = MAX (op0len, op1len);
- result.precision = prec;
-
+ unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
if (op1mask != 0)
{
l0 = l1;
- result.len = l1 + 1;
+ len = l1 + 1;
}
else
{
need_canon = false;
while (l0 > l1)
{
- result.val[l0] = op0[l0];
+ val[l0] = op0[l0];
l0--;
}
}
@@ -1354,13 +899,13 @@ wide_int_ro::and_not_large (const HOST_WIDE_INT *op0, unsigned int op0len,
{
HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
if (op0mask == 0)
- result.len = l0 + 1;
+ len = l0 + 1;
else
{
need_canon = false;
while (l1 > l0)
{
- result.val[l1] = ~op1[l1];
+ val[l1] = ~op1[l1];
l1--;
}
}
@@ -1368,44 +913,42 @@ wide_int_ro::and_not_large (const HOST_WIDE_INT *op0, unsigned int op0len,
while (l0 >= 0)
{
- result.val[l0] = op0[l0] & ~op1[l0];
+ val[l0] = op0[l0] & ~op1[l0];
l0--;
}
if (need_canon)
- result.canonize ();
+ len = canonize (val, len, prec);
- return result;
+ return len;
}
-/* Return THIS | OP1. */
-wide_int_ro
-wide_int_ro::or_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int prec,
- const HOST_WIDE_INT *op1, unsigned int op1len)
+/* Set VAL to OP0 | OP1. Return the number of blocks used. */
+unsigned int
+wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
{
wide_int result;
int l0 = op0len - 1;
int l1 = op1len - 1;
bool need_canon = true;
- result.len = MAX (op0len, op1len);
- result.precision = prec;
-
+ unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
if (op1mask != 0)
{
l0 = l1;
- result.len = l1 + 1;
+ len = l1 + 1;
}
else
{
need_canon = false;
while (l0 > l1)
{
- result.val[l0] = op0[l0];
+ val[l0] = op0[l0];
l0--;
}
}
@@ -1414,13 +957,13 @@ wide_int_ro::or_large (const HOST_WIDE_INT *op0, unsigned int op0len,
{
HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
if (op0mask != 0)
- result.len = l0 + 1;
+ len = l0 + 1;
else
{
need_canon = false;
while (l1 > l0)
{
- result.val[l1] = op1[l1];
+ val[l1] = op1[l1];
l1--;
}
}
@@ -1428,44 +971,42 @@ wide_int_ro::or_large (const HOST_WIDE_INT *op0, unsigned int op0len,
while (l0 >= 0)
{
- result.val[l0] = op0[l0] | op1[l0];
+ val[l0] = op0[l0] | op1[l0];
l0--;
}
if (need_canon)
- result.canonize ();
+ len = canonize (val, len, prec);
- return result;
+ return len;
}
-/* Return THIS | ~OP1. */
-wide_int_ro
-wide_int_ro::or_not_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int prec,
- const HOST_WIDE_INT *op1, unsigned int op1len)
+/* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
+unsigned int
+wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
{
wide_int result;
int l0 = op0len - 1;
int l1 = op1len - 1;
bool need_canon = true;
- result.len = MAX (op0len, op1len);
- result.precision = prec;
-
+ unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
if (op1mask == 0)
{
l0 = l1;
- result.len = l1 + 1;
+ len = l1 + 1;
}
else
{
need_canon = false;
while (l0 > l1)
{
- result.val[l0] = op0[l0];
+ val[l0] = op0[l0];
l0--;
}
}
@@ -1474,13 +1015,13 @@ wide_int_ro::or_not_large (const HOST_WIDE_INT *op0, unsigned int op0len,
{
HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
if (op0mask != 0)
- result.len = l0 + 1;
+ len = l0 + 1;
else
{
need_canon = false;
while (l1 > l0)
{
- result.val[l1] = ~op1[l1];
+ val[l1] = ~op1[l1];
l1--;
}
}
@@ -1488,35 +1029,33 @@ wide_int_ro::or_not_large (const HOST_WIDE_INT *op0, unsigned int op0len,
while (l0 >= 0)
{
- result.val[l0] = op0[l0] | ~op1[l0];
+ val[l0] = op0[l0] | ~op1[l0];
l0--;
}
if (need_canon)
- result.canonize ();
+ len = canonize (val, len, prec);
- return result;
+ return len;
}
-/* Return the exclusive ior (xor) of THIS and OP1. */
-wide_int_ro
-wide_int_ro::xor_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int prec,
- const HOST_WIDE_INT *op1, unsigned int op1len)
+/* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
+unsigned int
+wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
{
wide_int result;
int l0 = op0len - 1;
int l1 = op1len - 1;
- result.len = MAX (op0len, op1len);
- result.precision = prec;
-
+ unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
while (l0 > l1)
{
- result.val[l0] = op0[l0] ^ op1mask;
+ val[l0] = op0[l0] ^ op1mask;
l0--;
}
}
@@ -1526,56 +1065,33 @@ wide_int_ro::xor_large (const HOST_WIDE_INT *op0, unsigned int op0len,
HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
while (l1 > l0)
{
- result.val[l1] = op0mask ^ op1[l1];
+ val[l1] = op0mask ^ op1[l1];
l1--;
}
}
while (l0 >= 0)
{
- result.val[l0] = op0[l0] ^ op1[l0];
+ val[l0] = op0[l0] ^ op1[l0];
l0--;
}
- result.canonize ();
-
-#ifdef DEBUG_WIDE_INT
- debug_waa ("wide_int_ro:: %s = (%s ^ %s)\n",
- result, op0, op0len, prec, op1, op1len, prec);
-#endif
- return result;
+ return canonize (val, len, prec);
}
/*
* math
*/
-/* Absolute value of THIS. */
-wide_int_ro
-wide_int_ro::abs () const
+/* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
+ whether the result overflows when OP0 and OP1 are treated as having
+ signedness SGN. Return the number of blocks in VAL. */
+unsigned int
+wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec,
+ signop sgn, bool *overflow)
{
- wide_int result;
- gcc_checking_assert (precision);
-
- if (sign_mask ())
- result = -*this;
- else
- result = *this;
-
-#ifdef DEBUG_WIDE_INT
- debug_ww ("wide_int_ro:: %s = abs (%s)\n", result, *this);
-#endif
- return result;
-}
-
-/* Add of THIS and OP1. No overflow is detected. */
-wide_int_ro
-wide_int_ro::add_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int prec,
- const HOST_WIDE_INT *op1, unsigned int op1len,
- signop sgn, bool *overflow)
-{
- wide_int result;
unsigned HOST_WIDE_INT o0 = 0;
unsigned HOST_WIDE_INT o1 = 0;
unsigned HOST_WIDE_INT x = 0;
@@ -1584,26 +1100,25 @@ wide_int_ro::add_large (const HOST_WIDE_INT *op0, unsigned int op0len,
unsigned HOST_WIDE_INT mask0, mask1;
unsigned int i, small_prec;
- result.precision = prec;
- result.len = MAX (op0len, op1len);
+ unsigned int len = MAX (op0len, op1len);
mask0 = -top_bit_of (op0, op0len, prec);
mask1 = -top_bit_of (op1, op1len, prec);
/* Add all of the explicitly defined elements. */
- for (i = 0; i < result.len; i++)
+ for (i = 0; i < len; i++)
{
o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
x = o0 + o1 + carry;
- result.val[i] = x;
+ val[i] = x;
old_carry = carry;
carry = carry == 0 ? x < o0 : x <= o0;
}
- if (result.len * HOST_BITS_PER_WIDE_INT < prec)
+ if (len * HOST_BITS_PER_WIDE_INT < prec)
{
- result.val[result.len] = mask0 + mask1 + carry;
- result.len++;
+ val[len] = mask0 + mask1 + carry;
+ len++;
if (overflow)
*overflow = false;
}
@@ -1611,212 +1126,71 @@ wide_int_ro::add_large (const HOST_WIDE_INT *op0, unsigned int op0len,
{
if (sgn == SIGNED)
{
- int p = (result.len == BLOCKS_NEEDED (prec)
- ? HOST_BITS_PER_WIDE_INT
- : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1;
- HOST_WIDE_INT x = (result.val[result.len - 1] ^ o0)
- & (result.val[result.len - 1] ^ o1);
+ unsigned int p = (len == BLOCKS_NEEDED (prec)
+ ? HOST_BITS_PER_WIDE_INT
+ : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1;
+ HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
x = (x >> p) & 1;
*overflow = (x != 0);
}
else
{
if (old_carry)
- *overflow = ((unsigned HOST_WIDE_INT)result.val[result.len - 1] <= o0);
+ *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] <= o0);
else
- *overflow = ((unsigned HOST_WIDE_INT)result.val[result.len - 1] < o0);
+ *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] < o0);
}
}
small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
- if (small_prec != 0 && BLOCKS_NEEDED (prec) == result.len)
+ if (small_prec != 0 && BLOCKS_NEEDED (prec) == len)
{
/* Modes with weird precisions. */
- i = result.len - 1;
- result.val[i] = sext_hwi (result.val[i], small_prec);
+ i = len - 1;
+ val[i] = sext_hwi (val[i], small_prec);
}
- result.canonize ();
-
- return result;
+ return canonize (val, len, prec);
}
-
-/* Count leading zeros of THIS but only looking at the bits in the
- smallest HWI of size mode. */
-wide_int_ro
-wide_int_ro::clz () const
+/* This is bogus. We should always return the precision and leave the
+ caller to handle target dependencies. */
+static int
+clz_zero (unsigned int precision)
{
- int i;
- int start;
- int count;
- HOST_WIDE_INT v;
- int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
-
- gcc_checking_assert (precision);
-
- if (zero_p ())
- {
- enum machine_mode mode = mode_for_size (precision, MODE_INT, 0);
- if (mode == BLKmode)
- mode_for_size (precision, MODE_PARTIAL_INT, 0);
-
- /* Even if the value at zero is undefined, we have to come up
- with some replacement. Seems good enough. */
- if (mode == BLKmode)
- count = precision;
- else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode, count))
- count = precision;
- }
- else if (neg_p ())
- count = 0;
- else
- {
- /* The high order block is special if it is the last block and the
- precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
- have to clear out any ones above the precision before doing clz
- on this block. */
- if (BLOCKS_NEEDED (precision) == len && small_prec)
- {
- v = zext_hwi (val[len - 1], small_prec);
- count = clz_hwi (v) - (HOST_BITS_PER_WIDE_INT - small_prec);
- start = len - 2;
- if (v != 0)
- {
-#ifdef DEBUG_WIDE_INT
- debug_vw ("wide_int:: %d = clz (%s)\n", count, *this);
-#endif
- return from_shwi (count, precision);
- }
- }
- else
- {
- count = HOST_BITS_PER_WIDE_INT * (BLOCKS_NEEDED (precision) - len);
- start = len - 1;
- }
+ unsigned int count;
- for (i = start; i >= 0; i--)
- {
- v = elt (i);
- count += clz_hwi (v);
- if (v != 0)
- break;
- }
+ enum machine_mode mode = mode_for_size (precision, MODE_INT, 0);
+ if (mode == BLKmode)
+ mode_for_size (precision, MODE_PARTIAL_INT, 0);
- }
-
-#ifdef DEBUG_WIDE_INT
- debug_vw ("wide_int_ro:: %d = clz (%s)\n", count, *this);
-#endif
- return from_shwi (count, precision);
+ /* Even if the value at zero is undefined, we have to come up
+ with some replacement. Seems good enough. */
+ if (mode == BLKmode)
+ count = precision;
+ else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode, count))
+ count = precision;
+ return count;
}
-/* Count the number of redundant leading bits of THIS. Return result
- as a HOST_WIDE_INT. */
-wide_int_ro
-wide_int_ro::clrsb () const
+/* This is bogus. We should always return the precision and leave the
+ caller to handle target dependencies. */
+static int
+ctz_zero (unsigned int precision)
{
- gcc_checking_assert (precision);
+ unsigned int count;
- if (neg_p ())
- return operator ~ ().clz () - 1;
+ enum machine_mode mode = mode_for_size (precision, MODE_INT, 0);
+ if (mode == BLKmode)
+ mode_for_size (precision, MODE_PARTIAL_INT, 0);
- return clz () - 1;
-}
-
-/* Count zeros of THIS. */
-wide_int_ro
-wide_int_ro::ctz () const
-{
- int i;
- unsigned int count = 0;
- HOST_WIDE_INT v;
- int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
- int end;
- bool more_to_do;
-
- gcc_checking_assert (precision);
-
- if (zero_p ())
- {
- enum machine_mode mode = mode_for_size (precision, MODE_INT, 0);
- if (mode == BLKmode)
- mode_for_size (precision, MODE_PARTIAL_INT, 0);
-
- /* Even if the value at zero is undefined, we have to come up
- with some replacement. Seems good enough. */
- if (mode == BLKmode)
- count = precision;
- else if (!CTZ_DEFINED_VALUE_AT_ZERO (mode, count))
- count = precision;
- }
- else
- {
- /* The high order block is special if it is the last block and the
- precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
- have to clear out any ones above the precision before doing clz
- on this block. */
- if (BLOCKS_NEEDED (precision) == len && small_prec)
- {
- end = len - 1;
- more_to_do = true;
- }
- else
- {
- end = len;
- more_to_do = false;
- }
-
- for (i = 0; i < end; i++)
- {
- v = val[i];
- count += ctz_hwi (v);
- if (v != 0)
- {
-#ifdef DEBUG_WIDE_INT
- debug_vw ("wide_int_ro:: %d = ctz (%s)\n", count, *this);
-#endif
- return wide_int_ro::from_shwi (count, precision);
- }
- }
-
- if (more_to_do)
- {
- v = zext_hwi (val[len - 1], small_prec);
- count = ctz_hwi (v);
- /* The top word was all zeros so we have to cut it back to prec,
- because we are counting some of the zeros above the
- interesting part. */
- if (count > precision)
- count = precision;
- }
- else
- /* Skip over the blocks that are not represented. They must be
- all zeros at this point. */
- count = precision;
- }
-
-#ifdef DEBUG_WIDE_INT
- debug_vw ("wide_int_ro:: %d = ctz (%s)\n", count, *this);
-#endif
- return wide_int_ro::from_shwi (count, precision);
-}
-
-/* ffs of THIS. */
-wide_int_ro
-wide_int_ro::ffs () const
-{
- HOST_WIDE_INT count = ctz ().to_shwi ();
-
- if (count == precision)
- count = 0;
- else
- count += 1;
-
-#ifdef DEBUG_WIDE_INT
- debug_vw ("wide_int_ro:: %d = ffs (%s)\n", count, *this);
-#endif
- return wide_int_ro::from_shwi (count, precision);
+ /* Even if the value at zero is undefined, we have to come up
+ with some replacement. Seems good enough. */
+ if (mode == BLKmode)
+ count = precision;
+ else if (!CTZ_DEFINED_VALUE_AT_ZERO (mode, count))
+ count = precision;
+ return count;
}
/* Subroutines of the multiplication and division operations. Unpack
@@ -1826,12 +1200,13 @@ wide_int_ro::ffs () const
static void
wi_unpack (unsigned HOST_HALF_WIDE_INT *result,
const unsigned HOST_WIDE_INT *input,
- int in_len, int out_len, unsigned int prec, signop sgn)
+ unsigned int in_len, unsigned int out_len,
+ unsigned int prec, signop sgn)
{
- int i;
- int j = 0;
- int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
- int blocks_needed = BLOCKS_NEEDED (prec);
+ unsigned int i;
+ unsigned int j = 0;
+ unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int blocks_needed = BLOCKS_NEEDED (prec);
HOST_WIDE_INT mask;
if (sgn == SIGNED)
@@ -1866,12 +1241,12 @@ wi_unpack (unsigned HOST_HALF_WIDE_INT *result,
static void
wi_pack (unsigned HOST_WIDE_INT *result,
const unsigned HOST_HALF_WIDE_INT *input,
- int in_len)
+ unsigned int in_len)
{
- int i = 0;
- int j = 0;
+ unsigned int i = 0;
+ unsigned int j = 0;
- while (i < in_len - 2)
+ while (i + 2 < in_len)
{
result[j++] = (unsigned HOST_WIDE_INT)input[i]
| ((unsigned HOST_WIDE_INT)input[i + 1]
@@ -1887,65 +1262,6 @@ wi_pack (unsigned HOST_WIDE_INT *result,
| ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
}
-/* Return an integer that is the exact log2 of THIS. */
-wide_int_ro
-wide_int_ro::exact_log2 () const
-{
- int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
- wide_int count;
- wide_int result;
-
- gcc_checking_assert (precision);
- if (precision <= HOST_BITS_PER_WIDE_INT)
- {
- HOST_WIDE_INT v;
- if (small_prec)
- v = zext_hwi (val[0], small_prec);
- else
- v = val[0];
- result = wide_int_ro::from_shwi (::exact_log2 (v), precision);
- }
- else
- {
- count = ctz ();
- if (clz () + count + 1 == precision)
- result = count;
- else
- result = wide_int_ro::from_shwi (-1, precision);
- }
-
-#ifdef DEBUG_WIDE_INT
- debug_ww ("wide_int_ro:: %s = exact_log2 (%s)\n", result, *this);
-#endif
- return result;
-}
-
-/* Return an integer that is the floor log2 of THIS. */
-wide_int_ro
-wide_int_ro::floor_log2 () const
-{
- int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
- wide_int result;
-
- gcc_checking_assert (precision);
- if (precision <= HOST_BITS_PER_WIDE_INT)
- {
- HOST_WIDE_INT v;
- if (small_prec)
- v = zext_hwi (val[0], small_prec);
- else
- v = val[0];
- result = wide_int_ro::from_shwi (::floor_log2 (v), precision);
- }
- else
- result = wide_int_ro::from_shwi (precision, precision) - 1 - clz ();
-
-#ifdef DEBUG_WIDE_INT
- debug_ww ("wide_int_ro:: %s = floor_log2 (%s)\n", result, *this);
-#endif
- return result;
-}
-
/* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
result is returned. If FULL is set, the entire result is returned
in a mode that is twice the width of the inputs. However, that
@@ -1957,15 +1273,12 @@ wide_int_ro::floor_log2 () const
way to check for overflow than to do this. OVERFLOW is assumed to
be sticky so it should be initialized. SGN controls the signedness
and is used to check overflow or if HIGH or FULL is set. */
-wide_int_ro
-wide_int_ro::mul_internal (bool high, bool full,
- const HOST_WIDE_INT *op1, unsigned int op1len,
- unsigned int prec,
- const HOST_WIDE_INT *op2, unsigned int op2len,
- signop sgn, bool *overflow,
- bool needs_overflow)
+unsigned int
+wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1,
+ unsigned int op1len, const HOST_WIDE_INT *op2,
+ unsigned int op2len, unsigned int prec, signop sgn,
+ bool *overflow, bool high, bool full)
{
- wide_int result;
unsigned HOST_WIDE_INT o0, o1, k, t;
unsigned int i;
unsigned int j;
@@ -1987,9 +1300,9 @@ wide_int_ro::mul_internal (bool high, bool full,
/* If the top level routine did not really pass in an overflow, then
just make sure that we never attempt to set it. */
- if (overflow == 0)
- needs_overflow = false;
- result.precision = prec;
+ bool needs_overflow = (overflow != 0);
+ if (needs_overflow)
+ *overflow = false;
/* If we need to check for overflow, we can only do half wide
multiplies quickly because we need to look at the top bits to
@@ -1998,7 +1311,6 @@ wide_int_ro::mul_internal (bool high, bool full,
&& (prec <= HOST_BITS_PER_HALF_WIDE_INT))
{
HOST_WIDE_INT r;
- result.len = 1;
if (sgn == SIGNED)
{
@@ -2032,20 +1344,12 @@ wide_int_ro::mul_internal (bool high, bool full,
*overflow = true;
}
if (full)
- {
- result.val[0] = sext_hwi (r, prec * 2);
- result.precision = prec * 2;
- }
+ val[0] = sext_hwi (r, prec * 2);
else if (high)
- result.val[0] = r >> prec;
+ val[0] = r >> prec;
else
- result.val[0] = sext_hwi (r, prec);
-#ifdef DEBUG_WIDE_INT
- debug_wvasa ("wide_int_ro:: %s O=%d = (%s *%s %s)\n",
- result, overflow ? *overflow : 0, op1, op1len, prec,
- sgn==UNSIGNED ? "U" : "S", op2, op2len, prec);
-#endif
- return result;
+ val[0] = sext_hwi (r, prec);
+ return 1;
}
/* We do unsigned mul and then correct it. */
@@ -2124,102 +1428,66 @@ wide_int_ro::mul_internal (bool high, bool full,
if (full)
{
/* compute [2prec] <- [prec] * [prec] */
- wi_pack ((unsigned HOST_WIDE_INT*)result.val, r, 2 * half_blocks_needed);
- result.len = blocks_needed * 2;
- result.precision = prec * 2;
+ wi_pack ((unsigned HOST_WIDE_INT *) val, r, 2 * half_blocks_needed);
+ return canonize (val, blocks_needed * 2, prec * 2);
}
else if (high)
{
/* compute [prec] <- ([prec] * [prec]) >> [prec] */
- wi_pack ((unsigned HOST_WIDE_INT*)&result.val [blocks_needed >> 1],
- r, half_blocks_needed);
- result.len = blocks_needed;
+ wi_pack ((unsigned HOST_WIDE_INT *) val,
+ &r[half_blocks_needed], half_blocks_needed);
+ return canonize (val, blocks_needed, prec);
}
else
{
/* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
- wi_pack ((unsigned HOST_WIDE_INT*)result.val, r, half_blocks_needed);
- result.len = blocks_needed;
+ wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
+ return canonize (val, blocks_needed, prec);
}
-
- result.canonize ();
-
-#ifdef DEBUG_WIDE_INT
- debug_wvasa ("wide_int_ro:: %s O=%d = (%s *%s %s)\n",
- result, overflow ? *overflow : 0, op1, op1len, prec,
- sgn==UNSIGNED ? "U" : "S", op2, op2len, prec);
-#endif
- return result;
}
-/* Compute the parity of THIS. */
-wide_int_ro
-wide_int_ro::parity () const
-{
- wide_int count = popcount ();
- return count & 1;
-}
-
-/* Compute the population count of THIS. */
-wide_int_ro
-wide_int_ro::popcount () const
+/* Compute the population count of X. */
+int
+wi::popcount (const wide_int_ref &x)
{
- int i;
- int start;
+ unsigned int i;
int count;
- HOST_WIDE_INT v;
- int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
- int blocks_needed = BLOCKS_NEEDED (precision);
- gcc_checking_assert (precision);
+ if (x.precision == 0)
+ return 0;
/* The high order block is special if it is the last block and the
precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
have to clear out any ones above the precision before doing
popcount on this block. */
- if (small_prec)
+ count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
+ unsigned int stop = x.len;
+ if (count < 0)
{
- v = zext_hwi (elt (blocks_needed - 1), small_prec);
- count = popcount_hwi (v);
-
- if (len == blocks_needed)
- start = len - 2;
- else
- {
- start = len - 1;
- blocks_needed--;
- }
+ count = popcount_hwi (x.uhigh () << -count);
+ stop -= 1;
}
else
{
- start = len - 1;
- count = 0;
+ if (x.sign_mask () >= 0)
+ count = 0;
}
- if (sign_mask ())
- count += HOST_BITS_PER_WIDE_INT * (blocks_needed - len);
-
- for (i = start; i >= 0; i--)
- {
- v = val[i];
- count += popcount_hwi (v);
- }
+ for (i = 0; i < stop; ++i)
+ count += popcount_hwi (x.val[i]);
-#ifdef DEBUG_WIDE_INT
- debug_vw ("wide_int_ro:: %d = popcount (%s)\n", count, *this);
-#endif
- return wide_int_ro::from_shwi (count, precision);
+ return count;
}
-/* Subtract of THIS and OP1. If the pointer to OVERFLOW is not 0, set
- OVERFLOW if the value overflows. */
-wide_int_ro
-wide_int_ro::sub_large (const HOST_WIDE_INT *op0, unsigned int op0len,
- unsigned int prec,
- const HOST_WIDE_INT *op1, unsigned int op1len,
- signop sgn, bool *overflow)
+/* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
+ whether the result overflows when OP0 and OP1 are treated as having
+ signedness SGN. Return the number of blocks in VAL. */
+unsigned int
+wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec,
+ signop sgn, bool *overflow)
{
- wide_int result;
unsigned HOST_WIDE_INT o0 = 0;
unsigned HOST_WIDE_INT o1 = 0;
unsigned HOST_WIDE_INT x = 0;
@@ -2232,26 +1500,25 @@ wide_int_ro::sub_large (const HOST_WIDE_INT *op0, unsigned int op0len,
unsigned HOST_WIDE_INT mask0, mask1;
unsigned int i, small_prec;
- result.precision = prec;
- result.len = MAX (op0len, op1len);
+ unsigned int len = MAX (op0len, op1len);
mask0 = -top_bit_of (op0, op0len, prec);
mask1 = -top_bit_of (op1, op1len, prec);
/* Subtract all of the explicitly defined elements. */
- for (i = 0; i < result.len; i++)
+ for (i = 0; i < len; i++)
{
o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
x = o0 - o1 - borrow;
- result.val[i] = x;
+ val[i] = x;
old_borrow = borrow;
borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
}
- if (result.len * HOST_BITS_PER_WIDE_INT < prec)
+ if (len * HOST_BITS_PER_WIDE_INT < prec)
{
- result.val[result.len] = mask0 - mask1 - borrow;
- result.len++;
+ val[len] = mask0 - mask1 - borrow;
+ len++;
if (overflow)
*overflow = false;
}
@@ -2259,34 +1526,31 @@ wide_int_ro::sub_large (const HOST_WIDE_INT *op0, unsigned int op0len,
{
if (sgn == SIGNED)
{
- int p = (result.len == BLOCKS_NEEDED (prec)
- ? HOST_BITS_PER_WIDE_INT
- : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1;
- HOST_WIDE_INT x
- = (((o0 ^ o1) & (result.val[result.len - 1] ^ o0)) >> p) & 1;
+ unsigned int p = (len == BLOCKS_NEEDED (prec)
+ ? HOST_BITS_PER_WIDE_INT
+ : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1;
+ HOST_WIDE_INT x = (((o0 ^ o1) & (val[len - 1] ^ o0)) >> p) & 1;
*overflow = (x != 0);
}
else
{
if (old_borrow)
- *overflow = ((unsigned HOST_WIDE_INT)result.val[result.len - 1] >= o0);
+ *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] >= o0);
else
- *overflow = ((unsigned HOST_WIDE_INT)result.val[result.len - 1] > o0);
+ *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] > o0);
}
}
small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
- if (small_prec != 0 && BLOCKS_NEEDED (prec) == result.len)
+ if (small_prec != 0 && BLOCKS_NEEDED (prec) == len)
{
/* Modes with weird precisions. */
- i = result.len - 1;
- result.val[i] = sext_hwi (result.val[i], small_prec);
+ i = len - 1;
+ val[i] = sext_hwi (val[i], small_prec);
}
- result.canonize ();
-
- return result;
+ return canonize (val, len, prec);
}
@@ -2300,12 +1564,12 @@ wide_int_ro::sub_large (const HOST_WIDE_INT *op0, unsigned int op0len,
algorithm. M is the number of significant elements of U however
there needs to be at least one extra element of B_DIVIDEND
allocated, N is the number of elements of B_DIVISOR. */
-void
-wide_int_ro::divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
- unsigned HOST_HALF_WIDE_INT *b_remainder,
- unsigned HOST_HALF_WIDE_INT *b_dividend,
- unsigned HOST_HALF_WIDE_INT *b_divisor,
- int m, int n)
+static void
+divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
+ unsigned HOST_HALF_WIDE_INT *b_remainder,
+ unsigned HOST_HALF_WIDE_INT *b_dividend,
+ unsigned HOST_HALF_WIDE_INT *b_divisor,
+ unsigned int m, unsigned int n)
{
/* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
HOST_WIDE_INT and stored in the lower bits of each word. This
@@ -2403,28 +1667,23 @@ wide_int_ro::divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
}
-/* Do a truncating divide DIVISOR into DIVIDEND. The result is the
- same size as the operands. SIGN is either SIGNED or UNSIGNED. If
- COMPUTE_QUOTIENT is set, the quotient is computed and returned. If
- it is not set, the result is undefined. If COMPUTE_REMAINDER is
- set, the remaineder is returned in remainder. If it is not set,
- the remainder is undefined. If OFLOW is not null, it is set to the
- overflow value. */
-wide_int_ro
-wide_int_ro::divmod_internal (bool compute_quotient,
- const HOST_WIDE_INT *dividend,
- unsigned int dividend_len,
- unsigned int dividend_prec,
- const HOST_WIDE_INT *divisor,
- unsigned int divisor_len,
- unsigned int divisor_prec,
- signop sgn, wide_int_ro *remainder,
- bool compute_remainder,
- bool *oflow)
-{
- wide_int quotient, u0, u1;
- int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
- int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
+/* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
+ the result. If QUOTIENT is nonnull, store the value of the quotient
+ there and return the number of blocks in it. The return value is
+ not defined otherwise. If REMAINDER is nonnull, store the value
+ of the remainder there and store the number of blocks in
+ *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
+ the division overflowed. */
+unsigned int
+wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
+ HOST_WIDE_INT *remainder, const HOST_WIDE_INT *dividend,
+ unsigned int dividend_len, unsigned int dividend_prec,
+ const HOST_WIDE_INT *divisor, unsigned int divisor_len,
+ unsigned int divisor_prec, signop sgn,
+ bool *oflow)
+{
+ unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
+ unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
unsigned HOST_HALF_WIDE_INT
b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
unsigned HOST_HALF_WIDE_INT
@@ -2433,7 +1692,9 @@ wide_int_ro::divmod_internal (bool compute_quotient,
b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
unsigned HOST_HALF_WIDE_INT
b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
- int m, n;
+ unsigned int m, n;
+ HOST_WIDE_INT u0[WIDE_INT_MAX_ELTS];
+ HOST_WIDE_INT u1[WIDE_INT_MAX_ELTS];
bool dividend_neg = false;
bool divisor_neg = false;
bool overflow = false;
@@ -2461,7 +1722,7 @@ wide_int_ro::divmod_internal (bool compute_quotient,
zero. */
unsigned int i;
bool all_zero = true;
- for (i = 0; i < dividend_len - 1; i++)
+ for (i = 0; i + 1 < dividend_len; i++)
if (dividend[i] != 0)
{
all_zero = false;
@@ -2472,58 +1733,57 @@ wide_int_ro::divmod_internal (bool compute_quotient,
}
}
- quotient.precision = dividend_prec;
- remainder->precision = dividend_prec;
-
- /* Initialize the incoming overflow if it has been provided. */
- if (oflow)
- *oflow = false;
-
/* If overflow is set, just get out. There will only be grief by
continuing. */
if (overflow)
{
- if (compute_remainder)
+ if (remainder)
{
- remainder->len = 1;
- remainder->val[0] = 0;
+ *remainder_len = 1;
+ remainder[0] = 0;
}
if (oflow != 0)
*oflow = true;
- return wide_int::zero (dividend_prec);
+ if (quotient)
+ quotient[0] = 0;
+ return 1;
}
+ if (oflow)
+ *oflow = false;
+
/* Do it on the host if you can. */
if (dividend_prec <= HOST_BITS_PER_WIDE_INT
&& divisor_prec <= HOST_BITS_PER_WIDE_INT)
{
- quotient.len = 1;
- remainder->len = 1;
if (sgn == SIGNED)
{
HOST_WIDE_INT o0 = sext_hwi (dividend[0], dividend_prec);
HOST_WIDE_INT o1 = sext_hwi (divisor[0], divisor_prec);
- quotient.val[0] = sext_hwi (o0 / o1, dividend_prec);
- remainder->val[0] = sext_hwi (o0 % o1, dividend_prec);
+ if (quotient)
+ quotient[0] = sext_hwi (o0 / o1, dividend_prec);
+ if (remainder)
+ {
+ remainder[0] = sext_hwi (o0 % o1, dividend_prec);
+ *remainder_len = 1;
+ }
}
else
{
unsigned HOST_WIDE_INT o0 = zext_hwi (dividend[0], dividend_prec);
unsigned HOST_WIDE_INT o1 = zext_hwi (divisor[0], divisor_prec);
- quotient.val[0] = zext_hwi (o0 / o1, dividend_prec);
- remainder->val[0] = zext_hwi (o0 % o1, dividend_prec);
+ if (quotient)
+ quotient[0] = zext_hwi (o0 / o1, dividend_prec);
+ if (remainder)
+ {
+ remainder[0] = zext_hwi (o0 % o1, dividend_prec);
+ *remainder_len = 1;
+ }
}
-#ifdef DEBUG_WIDE_INT
- debug_wwasa ("wide_int_ro:: (q = %s) (r = %s) = (%s /%s %s)\n",
- quotient, *remainder,
- dividend, dividend_len, dividend_prec,
- sgn == SIGNED ? "S" : "U",
- divisor, divisor_len, divisor_prec);
-#endif
- return quotient;
+ return 1;
}
/* Make the divisor and dividend positive and remember what we
@@ -2532,18 +1792,16 @@ wide_int_ro::divmod_internal (bool compute_quotient,
{
if (top_bit_of (dividend, dividend_len, dividend_prec))
{
- u0 = sub_large (wide_int (0).val, 1,
- dividend_prec, dividend, dividend_len, UNSIGNED);
- dividend = u0.val;
- dividend_len = u0.len;
+ dividend_len = wi::sub_large (u0, zeros, 1, dividend, dividend_len,
+ dividend_prec, UNSIGNED, 0);
+ dividend = u0;
dividend_neg = true;
}
if (top_bit_of (divisor, divisor_len, divisor_prec))
{
- u1 = sub_large (wide_int (0).val, 1,
- divisor_prec, divisor, divisor_len, UNSIGNED);
- divisor = u1.val;
- divisor_len = u1.len;
+ divisor_len = wi::sub_large (u1, zeros, 1, divisor, divisor_len,
+ divisor_prec, UNSIGNED, 0);
+ divisor = u1;
divisor_neg = true;
}
}
@@ -2574,251 +1832,298 @@ wide_int_ro::divmod_internal (bool compute_quotient,
divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
- if (compute_quotient)
+ unsigned int quotient_len = 0;
+ if (quotient)
{
- wi_pack ((unsigned HOST_WIDE_INT*)quotient.val, b_quotient, m);
- quotient.len = m / 2;
- quotient.canonize ();
+ wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
+ quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
/* The quotient is neg if exactly one of the divisor or dividend is
neg. */
if (dividend_neg != divisor_neg)
- quotient = -quotient;
+ quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
+ quotient_len, dividend_prec,
+ UNSIGNED, 0);
}
- else
- quotient = wide_int::zero (dividend_prec);
- if (compute_remainder)
+ if (remainder)
{
- wi_pack ((unsigned HOST_WIDE_INT*)remainder->val, b_remainder, n);
- if (n & 1)
- n++;
- remainder->len = n / 2;
- (*remainder).canonize ();
+ wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
+ *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
/* The remainder is always the same sign as the dividend. */
if (dividend_neg)
- *remainder = -(*remainder);
+ *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
+ *remainder_len, dividend_prec,
+ UNSIGNED, 0);
}
- else
- *remainder = wide_int::zero (dividend_prec);
-#ifdef DEBUG_WIDE_INT
- debug_wwasa ("wide_int_ro:: (q = %s) (r = %s) = (%s /%s %s)\n",
- quotient, *remainder,
- dividend, dividend_len, dividend_prec,
- sgn == SIGNED ? "S" : "U",
- divisor, divisor_len, divisor_prec);
-#endif
- return quotient;
+ return quotient_len;
}
+/*
+ * Shifting, rotating and extraction.
+ */
-/* Return TRUE iff PRODUCT is an integral multiple of FACTOR, and return
- the multiple in *MULTIPLE. Otherwise return FALSE and leave *MULTIPLE
- unchanged. */
-bool
-wide_int_ro::multiple_of_p (const wide_int_ro &factor,
- signop sgn, wide_int_ro *multiple) const
-{
- wide_int remainder;
- wide_int quotient = divmod_trunc (factor, &remainder, sgn);
- if (remainder.zero_p ())
+/* Left shift XVAL by SHIFT and store the result in VAL. Return the
+ number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
+unsigned int
+wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision,
+ unsigned int shift)
+{
+ /* Split the shift into a whole-block shift and a subblock shift. */
+ unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
+ unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
+
+ /* The whole-block shift fills with zeros. */
+ unsigned int len = BLOCKS_NEEDED (precision);
+ for (unsigned int i = 0; i < skip; ++i)
+ val[i] = 0;
+
+ /* It's easier to handle the simple block case specially. */
+ if (small_shift == 0)
+ for (unsigned int i = skip; i < len; ++i)
+ val[i] = safe_uhwi (xval, xlen, i - skip);
+ else
{
- *multiple = quotient;
- return true;
+ /* The first unfilled output block is a left shift of the first
+ block in XVAL. The other output blocks contain bits from two
+ consecutive input blocks. */
+ unsigned HOST_WIDE_INT carry = 0;
+ for (unsigned int i = skip; i < len; ++i)
+ {
+ unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
+ val[i] = (x << small_shift) | carry;
+ carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
+ }
}
-
- return false;
+ return canonize (val, len, precision);
}
-/*
- * Shifting, rotating and extraction.
- */
-
-/* Extract WIDTH bits from THIS starting at OFFSET. The result is
- assumed to fit in a HOST_WIDE_INT. This function is safe in that
- it can properly access elements that may not be explicitly
- represented. */
-HOST_WIDE_INT
-wide_int_ro::extract_to_hwi (int offset, int width) const
+/* Right shift XVAL by SHIFT and store the result in VAL. Return the
+ number of blocks in VAL. The input has XPRECISION bits and the
+ output has XPRECISION - SHIFT bits. */
+static unsigned int
+rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int xprecision,
+ unsigned int shift)
{
- int start_elt, end_elt, shift;
- HOST_WIDE_INT x;
+ /* Split the shift into a whole-block shift and a subblock shift. */
+ unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
+ unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
- /* Get rid of the easy cases first. */
- if (offset >= len * HOST_BITS_PER_WIDE_INT)
- return sign_mask ();
- if (offset + width <= 0)
- return 0;
+ /* Work out how many blocks are needed to store the significant bits
+ (excluding the upper zeros or signs). */
+ unsigned int len = BLOCKS_NEEDED (xprecision - shift);
- shift = offset & (HOST_BITS_PER_WIDE_INT - 1);
- if (offset < 0)
- {
- start_elt = -1;
- end_elt = 0;
- x = 0;
- }
+ /* It's easier to handle the simple block case specially. */
+ if (small_shift == 0)
+ for (unsigned int i = 0; i < len; ++i)
+ val[i] = safe_uhwi (xval, xlen, i + skip);
else
{
- start_elt = offset / HOST_BITS_PER_WIDE_INT;
- end_elt = (offset + width - 1) / HOST_BITS_PER_WIDE_INT;
- x = start_elt >= len
- ? sign_mask ()
- : (unsigned HOST_WIDE_INT)val[start_elt] >> shift;
+ /* Each output block but the last is a combination of two input blocks.
+ The last block is a right shift of the last block in XVAL. */
+ unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
+ for (unsigned int i = 0; i < len; ++i)
+ {
+ val[i] = curr >> small_shift;
+ curr = safe_uhwi (xval, xlen, i + skip + 1);
+ val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
+ }
}
+ return len;
+}
- if (start_elt != end_elt)
- {
- HOST_WIDE_INT y = end_elt == len
- ? sign_mask () : val[end_elt];
+/* Logically right shift XVAL by SHIFT and store the result in VAL.
+ Return the number of blocks in VAL. XVAL has XPRECISION bits and
+ VAL has PRECISION bits. */
+unsigned int
+wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int xprecision,
+ unsigned int precision, unsigned int shift)
+{
+ unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
- x |= y << (HOST_BITS_PER_WIDE_INT - shift);
+ /* The value we just created has precision XPRECISION - SHIFT.
+ Zero-extend it to wider precisions. */
+ if (precision > xprecision - shift)
+ {
+ unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
+ if (small_prec)
+ val[len - 1] = zext_hwi (val[len - 1], small_prec);
+ else if (val[len - 1] < 0)
+ {
+ /* Add a new block with a zero. */
+ val[len++] = 0;
+ return len;
+ }
}
-
- if (width != HOST_BITS_PER_WIDE_INT)
- x &= ((HOST_WIDE_INT)1 << width) - 1;
-
- return x;
+ return canonize (val, len, precision);
}
-
-/* Left shift THIS by CNT. See the definition of Op.TRUNC for how to
- set Z. Since this is used internally, it has the ability to
- specify the BISIZE and PRECISION independently. This is useful
- when inserting a small value into a larger one. */
-wide_int_ro
-wide_int_ro::lshift_large (unsigned int cnt, unsigned int res_prec) const
+/* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
+ Return the number of blocks in VAL. XVAL has XPRECISION bits and
+ VAL has PRECISION bits. */
+unsigned int
+wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int xprecision,
+ unsigned int precision, unsigned int shift)
{
- wide_int result;
- unsigned int i;
-
- result.precision = res_prec;
+ unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
- if (cnt >= res_prec)
+ /* The value we just created has precision XPRECISION - SHIFT.
+ Sign-extend it to wider types. */
+ if (precision > xprecision - shift)
{
- result.val[0] = 0;
- result.len = 1;
- return result;
+ unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
+ if (small_prec)
+ val[len - 1] = sext_hwi (val[len - 1], small_prec);
}
+ return canonize (val, len, precision);
+}
- for (i = 0; i < res_prec; i += HOST_BITS_PER_WIDE_INT)
- result.val[i / HOST_BITS_PER_WIDE_INT]
- = extract_to_hwi (i - cnt, HOST_BITS_PER_WIDE_INT);
+/* Return the number of leading (upper) zeros in X. */
+int
+wi::clz (const wide_int_ref &x)
+{
+ /* Calculate how many bits there above the highest represented block. */
+ int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
+
+ unsigned HOST_WIDE_INT high = x.uhigh ();
+ if (count < 0)
+ /* The upper -COUNT bits of HIGH are not part of the value.
+ Clear them out. */
+ high = (high << -count) >> -count;
+ else if (x.sign_mask () < 0)
+ /* The upper bit is set, so there are no leading zeros. */
+ return 0;
- result.len = BLOCKS_NEEDED (res_prec);
- result.canonize ();
+ /* Check whether the value is zero. */
+ if (high == 0 && x.len == 1)
+ return clz_zero (x.precision);
- return result;
+ /* We don't need to look below HIGH. Either HIGH is nonzero,
+ or the top bit of the block below is nonzero; clz_hwi is
+ HOST_BITS_PER_WIDE_INT in the latter case. */
+ return count + clz_hwi (high);
}
-/* Unsigned right shift THIS by CNT. */
-wide_int_ro
-wide_int_ro::rshiftu_large (unsigned int cnt) const
+/* Return the number of redundant sign bits in X. (That is, the number
+ of bits immediately below the sign bit that have the same value as
+ the sign bit.) */
+int
+wi::clrsb (const wide_int_ref &x)
{
- wide_int result;
- int i;
- int small_prec = (precision - cnt) & (HOST_BITS_PER_WIDE_INT - 1);
-
- if (cnt == 0)
- return *this;
+ /* Calculate how many bits there above the highest represented block. */
+ int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
- result.precision = precision;
-
- if (cnt >= precision)
+ unsigned HOST_WIDE_INT high = x.uhigh ();
+ unsigned HOST_WIDE_INT mask = -1;
+ if (count < 0)
{
- result.val[0] = 0;
- result.len = 1;
- return result;
+ /* The upper -COUNT bits of HIGH are not part of the value.
+ Clear them from both MASK and HIGH. */
+ mask >>= -count;
+ high &= mask;
}
- result.len = BLOCKS_NEEDED (precision - cnt);
-
- for (i = 0; i < result.len; i++)
- result.val[i]
- = extract_to_hwi ((i * HOST_BITS_PER_WIDE_INT) + cnt,
- HOST_BITS_PER_WIDE_INT);
+ /* If the top bit is 1, count the number of leading 1s. If the top
+ bit is zero, count the number of leading zeros. */
+ if (high > mask / 2)
+ high ^= mask;
- /* Extract_to_hwi sign extends. So we need to fix that up. */
- if (small_prec)
- result.val [result.len - 1]
- = zext_hwi (result.val [result.len - 1], small_prec);
- else if (result.val[result.len - 1] < 0)
- {
- /* Add a new block with a zero. */
- result.val[result.len++] = 0;
- return result;
- }
+ /* There are no sign bits below the top block, so we don't need to look
+ beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
+ HIGH is 0. */
+ return count + clz_hwi (high) - 1;
+}
- result.canonize ();
+/* Return the number of trailing (lower) zeros in X. */
+int
+wi::ctz (const wide_int_ref &x)
+{
+ if (x.len == 1 && x.ulow () == 0)
+ return ctz_zero (x.precision);
- return result;
+ /* Having dealt with the zero case, there must be a block with a
+ nonzero bit. We don't care about the bits above the first 1. */
+ unsigned int i = 0;
+ while (x.val[i] == 0)
+ ++i;
+ return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
}
-/* Signed right shift THIS by CNT. */
-wide_int_ro
-wide_int_ro::rshifts_large (unsigned int cnt) const
+/* If X is an exact power of 2, return the base-2 logarithm, otherwise
+ return -1. */
+int
+wi::exact_log2 (const wide_int_ref &x)
{
- wide_int result;
- int i;
-
- if (cnt == 0)
- return *this;
+ /* 0-precision values can only hold 0. */
+ if (x.precision == 0)
+ return -1;
- result.precision = precision;
+ /* Reject cases where there are implicit -1 blocks above HIGH. */
+ if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
+ return -1;
- if (cnt >= precision)
- {
- HOST_WIDE_INT m = sign_mask ();
- result.val[0] = m;
- result.len = 1;
- return result;
- }
+ /* Set CRUX to the index of the entry that should be nonzero.
+ If the top block is zero then the next lowest block (if any)
+ must have the high bit set. */
+ unsigned int crux = x.len - 1;
+ if (crux > 0 && x.val[crux] == 0)
+ crux -= 1;
+
+ /* Check that all lower blocks are zero. */
+ for (unsigned int i = 0; i < crux; ++i)
+ if (x.val[i] != 0)
+ return -1;
+
+ /* Get a zero-extended form of block CRUX. */
+ unsigned HOST_WIDE_INT hwi = x.val[crux];
+ if (crux * HOST_BITS_PER_WIDE_INT > x.precision)
+ hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
+
+ /* Now it's down to whether HWI is a power of 2. */
+ int res = ::exact_log2 (hwi);
+ if (res >= 0)
+ res += crux * HOST_BITS_PER_WIDE_INT;
+ return res;
+}
- result.len = BLOCKS_NEEDED (precision - cnt);
+/* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
+int
+wi::floor_log2 (const wide_int_ref &x)
+{
+ return x.precision - 1 - clz (x);
+}
- for (i = 0; i < result.len; i++)
- result.val[i]
- = extract_to_hwi ((i * HOST_BITS_PER_WIDE_INT) + cnt,
- HOST_BITS_PER_WIDE_INT);
+/* Return the index of the first (lowest) set bit in X, counting from 1.
+ Return 0 if X is 0. */
+int
+wi::ffs (const wide_int_ref &x)
+{
+ return eq_p (x, 0) ? 0 : ctz (x) + 1;
+}
- result.canonize ();
+/* Return true if sign-extending X to have precision PRECISION would give
+ the minimum signed value at that precision. */
+bool
+wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
+{
+ return ctz (x) + 1 == int (precision);
+}
- return result;
+/* Return true if X represents the minimum signed value. */
+bool
+wi::only_sign_bit_p (const wide_int_ref &x)
+{
+ return only_sign_bit_p (x, x.precision);
}
/*
* Private utilities.
*/
-/* Decompress THIS for at least TARGET bits into a result with
- precision PREC. */
-wide_int_ro
-wide_int_ro::decompress (unsigned int target, unsigned int prec) const
-{
- wide_int result;
- int blocks_needed = BLOCKS_NEEDED (target);
- HOST_WIDE_INT mask;
- int len, i;
-
- result.precision = prec;
- result.len = blocks_needed;
-
- for (i = 0; i < this->len; i++)
- result.val[i] = val[i];
-
- len = this->len;
-
- if (target > result.precision)
- return result;
-
- /* The extension that we are doing here is not sign extension, it is
- decompression. */
- mask = sign_mask ();
- while (len < blocks_needed)
- result.val[len++] = mask;
-
- return result;
-}
-
void gt_ggc_mx(max_wide_int*) { }
void gt_pch_nx(max_wide_int*,void (*)(void*, void*), void*) { }
void gt_pch_nx(max_wide_int*) { }
@@ -2833,7 +2138,7 @@ static char *
dumpa (const HOST_WIDE_INT *val, unsigned int len, unsigned int prec, char *buf)
{
int i;
- int l;
+ unsigned int l;
const char * sep = "";
l = sprintf (buf, "[%d (", prec);
@@ -2854,13 +2159,14 @@ dumpa (const HOST_WIDE_INT *val, unsigned int len, unsigned int prec, char *buf)
}
#endif
+#if 0
/* The debugging routines print results of wide operations into the
dump files of the respective passes in which they were called. */
char *
wide_int_ro::dump (char* buf) const
{
int i;
- int l;
+ unsigned int l;
const char * sep = "";
l = sprintf (buf, "[%d (", precision);
@@ -2877,293 +2183,11 @@ wide_int_ro::dump (char* buf) const
gcc_assert (l < MAX_SIZE);
return buf;
}
-
-#ifdef DEBUG_WIDE_INT
-
-#if 0
-#define wide_int_dump_file (dump_file ? dump_file : stdout)
-#else
-#define wide_int_dump_file (dump_file)
#endif
-void
-wide_int_ro::debug_vaa (const char* fmt, int r,
- const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0,
- const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r,
- dumpa (o0, l0, p0, buf0),
- dumpa (o1, l1, p1, buf1));
-}
-
-void
-wide_int_ro::debug_vw (const char* fmt, int r, const wide_int_ro& o0)
-{
- char buf0[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r, o0.dump (buf0));
-}
-
-void
-wide_int_ro::debug_vwa (const char* fmt, int r, const wide_int_ro &o0,
- const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r, o0.dump (buf0), dumpa (o1, l1, p1, buf1));
-}
-
-void
-wide_int_ro::debug_vwh (const char* fmt, int r, const wide_int_ro &o0,
- HOST_WIDE_INT o1)
-{
- char buf0[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r, o0.dump (buf0), o1);
-}
-
-void
-wide_int_ro::debug_vww (const char* fmt, int r, const wide_int_ro &o0,
- const wide_int_ro &o1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r, o0.dump (buf0), o1.dump (buf1));
-}
-
-void
-wide_int_ro::debug_wa (const char* fmt, const wide_int_ro &r,
- const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), dumpa (o0, l0, p0, buf1));
-}
-
-void
-wide_int_ro::debug_waa (const char* fmt, const wide_int_ro &r,
- const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0,
- const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), dumpa (o0, l0, p0, buf1),
- dumpa (o1, l1, p1, buf2));
-}
-
-void
-wide_int_ro::debug_waav (const char* fmt, const wide_int_ro &r,
- const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0,
- const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1,
- int s)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), dumpa (o0, l0, p0, buf1),
- dumpa (o1, l1, p1, buf2), s);
-}
-
-void
-wide_int_ro::debug_wh (const char* fmt, const wide_int_ro &r,
- HOST_WIDE_INT o1)
-{
- char buf0[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), o1);
-}
-
-void
-wide_int_ro::debug_whh (const char* fmt, const wide_int_ro &r,
- HOST_WIDE_INT o1, HOST_WIDE_INT o2)
+HOST_WIDE_INT foo (tree x)
{
- char buf0[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), o1, o2);
+ addr_wide_int y = x;
+ addr_wide_int z = y;
+ return z.to_shwi ();
}
-
-void
-wide_int_ro::debug_wv (const char* fmt, const wide_int_ro &r, int v0)
-{
- char buf0[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0);
-}
-
-void
-wide_int_ro::debug_wvv (const char* fmt, const wide_int_ro &r,
- int v0, int v1)
-{
- char buf0[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0, v1);
-}
-
-void
-wide_int_ro::debug_wvvv (const char* fmt, const wide_int_ro &r,
- int v0, int v1, int v2)
-{
- char buf0[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0, v1, v2);
-}
-
-void
-wide_int_ro::debug_wvwa (const char* fmt, const wide_int_ro &r, int v0,
- const wide_int_ro &o0,
- const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0,
- o0.dump (buf1), dumpa (o1, l1, p1, buf2));
-}
-
-void
-wide_int_ro::debug_wvasa (const char* fmt, const wide_int_ro &r, int v0,
- const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0,
- const char* s,
- const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0,
- dumpa (o0, l0, p0, buf1), s, dumpa (o1, l1, p1, buf2));
-}
-
-void
-wide_int_ro::debug_wvww (const char* fmt, const wide_int_ro &r, int v0,
- const wide_int_ro &o0, const wide_int_ro &o1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0,
- o0.dump (buf1), o1.dump (buf2));
-}
-
-void
-wide_int_ro::debug_ww (const char* fmt, const wide_int_ro &r,
- const wide_int_ro &o0)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1));
-}
-
-void
-wide_int_ro::debug_wwa (const char* fmt, const wide_int_ro &r,
- const wide_int_ro &o0,
- const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1),
- dumpa (o1, l1, p1, buf2));
-}
-
-void
-wide_int_ro::debug_wwv (const char* fmt, const wide_int_ro &r,
- const wide_int_ro &o0, int v0)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1), v0);
-}
-
-void
-wide_int_ro::debug_wwvs (const char* fmt, const wide_int_ro &r,
- const wide_int_ro &o0, int v0,
- const char *s)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1), v0, s);
-}
-
-void
-wide_int_ro::debug_wwvvs (const char* fmt, const wide_int_ro &r,
- const wide_int_ro &o0, int v0, int v1,
- const char *s)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1), v0, v1, s);
-}
-
-void
-wide_int_ro::debug_wwwvv (const char* fmt, const wide_int_ro &r,
- const wide_int_ro &o0, const wide_int_ro &o1,
- int v0, int v1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0),
- o0.dump (buf1), o1.dump (buf2), v0, v1);
-}
-
-void
-wide_int_ro::debug_www (const char* fmt, const wide_int_ro &r,
- const wide_int_ro &o0, const wide_int_ro &o1)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0),
- o0.dump (buf1), o1.dump (buf2));
-}
-
-void
-wide_int_ro::debug_wwasa (const char* fmt, const wide_int_ro &r, const wide_int_ro &o0,
- const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1,
- const char* s,
- const HOST_WIDE_INT *o2, unsigned int l2, unsigned int p2)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- char buf3[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0),
- o0.dump (buf1), dumpa (o1, l1, p1, buf2), s, dumpa (o2, l2, p2, buf3));
-}
-
-void
-wide_int_ro::debug_wwww (const char* fmt, const wide_int_ro &r,
- const wide_int_ro &o0, const wide_int_ro &o1,
- const wide_int_ro &o2)
-{
- char buf0[MAX_SIZE];
- char buf1[MAX_SIZE];
- char buf2[MAX_SIZE];
- char buf3[MAX_SIZE];
- if (wide_int_dump_file)
- fprintf (wide_int_dump_file, fmt, r.dump (buf0),
- o0.dump (buf1), o1.dump (buf2), o2.dump (buf3));
-}
-
-#endif
-
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index 83c2170c18e..90472212b8c 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -211,20 +211,11 @@ along with GCC; see the file COPYING3. If not see
all of the values may not be the same precision. */
-#ifndef GENERATOR_FILE
#include <utility>
-#include "tree.h"
#include "system.h"
#include "hwint.h"
-#include "options.h"
-#include "tm.h"
-#include "insn-modes.h"
-#include "machmode.h"
-#include "double-int.h"
-#include <gmp.h>
-#include "dumpfile.h"
-#include "real.h"
#include "signop.h"
+#include "insn-modes.h"
#if 0
#define DEBUG_WIDE_INT
@@ -259,2176 +250,1954 @@ along with GCC; see the file COPYING3. If not see
#define ADDR_MAX_PRECISION \
((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) & ~(HOST_BITS_PER_WIDE_INT - 1))
-/* This is used to bundle an rtx and a mode together so that the pair
- can be used as the second operand of a wide int expression. If we
- ever put modes into rtx integer constants, this should go away and
- then just pass an rtx in. */
-typedef std::pair <rtx, enum machine_mode> rtx_mode_t;
-
-template <typename T>
-inline bool
-signedp (T)
+namespace wi
{
- return ~(T) 0 < (T) 0;
-}
-
-template <>
-inline bool
-signedp <unsigned int> (unsigned int)
-{
- return false;
-}
-
-template <>
-inline bool
-signedp <unsigned long> (unsigned long)
-{
- return false;
-}
-
-class wide_int;
-
-class GTY(()) wide_int_ro
-{
- template <int bitsize>
- friend class fixed_wide_int;
- friend class wide_int;
- /* Internal representation. */
-
-protected:
- HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
- unsigned short len;
- unsigned int precision;
-
- const HOST_WIDE_INT *get_val () const;
- wide_int_ro &operator = (const wide_int_ro &);
-
-public:
- wide_int_ro ();
- wide_int_ro (const_tree);
- wide_int_ro (HOST_WIDE_INT);
- wide_int_ro (int);
- wide_int_ro (unsigned HOST_WIDE_INT);
- wide_int_ro (unsigned int);
- wide_int_ro (const rtx_mode_t &);
-
- /* Conversions. */
- static wide_int_ro from_shwi (HOST_WIDE_INT, unsigned int = 0);
- static wide_int_ro from_uhwi (unsigned HOST_WIDE_INT, unsigned int = 0);
- static wide_int_ro from_hwi (HOST_WIDE_INT, const_tree);
- static wide_int_ro from_shwi (HOST_WIDE_INT, enum machine_mode);
- static wide_int_ro from_uhwi (unsigned HOST_WIDE_INT, enum machine_mode);
- static wide_int_ro from_array (const HOST_WIDE_INT *, unsigned int,
- unsigned int, bool = true);
- static wide_int_ro from_double_int (double_int, unsigned int);
- static wide_int_ro from_buffer (const unsigned char *, int);
- void to_mpz (mpz_t, signop) const;
- static wide_int_ro from_mpz (const_tree, mpz_t, bool);
- HOST_WIDE_INT to_shwi (unsigned int = 0) const;
-
- unsigned HOST_WIDE_INT to_uhwi (unsigned int = 0) const;
- HOST_WIDE_INT to_short_addr () const;
-
- static wide_int_ro max_value (unsigned int, signop, unsigned int = 0);
- static wide_int_ro max_value (const_tree);
- static wide_int_ro max_value (enum machine_mode, signop);
- static wide_int_ro min_value (unsigned int, signop, unsigned int = 0);
- static wide_int_ro min_value (const_tree);
- static wide_int_ro min_value (enum machine_mode, signop);
-
- /* Small constants. These are generally only needed in the places
- where the precision must be provided. For instance in binary
- operations where the other operand has a precision, or for use
- with max_wide_int or addr_wide_int, these are never needed. */
- static wide_int_ro minus_one (unsigned int);
- static wide_int_ro zero (unsigned int);
- static wide_int_ro one (unsigned int);
- static wide_int_ro two (unsigned int);
-
- /* Public accessors for the interior of a wide int. */
- unsigned short get_len () const;
- unsigned int get_precision () const;
- HOST_WIDE_INT elt (unsigned int) const;
-
- /* Comparative functions. */
- bool minus_one_p () const;
- bool zero_p () const;
- bool one_p () const;
- bool neg_p (signop sgn = SIGNED) const;
- bool multiple_of_p (const wide_int_ro &, signop, wide_int_ro *) const;
-
- /* Comparisons, note that only equality is an operator. The other
- comparisons cannot be operators since they are inherently signed or
- unsigned and C++ has no such operators. */
- template <typename T>
- bool operator == (const T &) const;
+ /* Classifies an integer based on its precision. */
+ enum precision_type {
+ /* The integer has both a precision and defined signedness. This allows
+ the integer to be converted to any width, since we know whether to fill
+ any extra bits with zeros or signs. */
+ FLEXIBLE_PRECISION,
- template <typename T1, typename T2>
- static bool eq_p (const T1 &, const T2 &);
+ /* The integer has a variable precision but no defined signedness. */
+ VAR_PRECISION,
- template <typename T>
- bool operator != (const T &) const;
+ /* The integer has a constant precision (known at GCC compile time)
+ but no defined signedness. */
+ CONST_PRECISION
+ };
- template <typename T>
- bool lt_p (const T &, signop) const;
+ /* This class, which has no default implementation, is expected to
+ provide the following members:
- template <typename T1, typename T2>
- static bool lt_p (const T1 &, const T2 &, signop);
+ static const enum precision_type precision_type;
+ Classifies the type of T.
- template <typename T>
- bool lts_p (const T &) const;
+ static const unsigned int precision;
+ Only defined if precision_type == CONST_PRECISION. Specifies the
+ precision of all integers of type T.
- template <typename T1, typename T2>
- static bool lts_p (const T1 &, const T2 &);
+ static const bool host_dependent_precision;
+ True if the precision of T depends (or can depend) on the host.
- template <typename T>
- bool ltu_p (const T &) const;
+ static unsigned int get_precision (const T &x)
+ Return the number of bits in X.
- template <typename T1, typename T2>
- static bool ltu_p (const T1 &, const T2 &);
+ static wi::storage_ref *decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, const T &x)
+ Decompose X as a PRECISION-bit integer, returning the associated
+ wi::storage_ref. SCRATCH is available as scratch space if needed.
+ The routine should assert that PRECISION is acceptable. */
+ template <typename T> struct int_traits;
- template <typename T>
- bool le_p (const T &, signop) const;
-
- template <typename T1, typename T2>
- static bool le_p (const T1 &, const T2 &, signop);
+ /* This class provides a single type, result_type, which specifies the
+ type of integer produced by a binary operation whose inputs have
+ types T1 and T2. The definition should be symmetric. */
+ template <typename T1, typename T2,
+ enum precision_type P1 = int_traits <T1>::precision_type,
+ enum precision_type P2 = int_traits <T2>::precision_type>
+ struct binary_traits;
+ /* The result of a unary operation on T is the same as the result of
+ a binary operation on two values of type T. */
template <typename T>
- bool les_p (const T &) const;
+ struct unary_traits : public binary_traits <T, T> {};
+}
- template <typename T1, typename T2>
- static bool les_p (const T1 &, const T2 &);
+/* The type of result produced by a binary operation on types T1 and T2.
+ Defined purely for brevity. */
+#define WI_BINARY_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::result_type
- template <typename T>
- bool leu_p (const T &) const;
-
- template <typename T1, typename T2>
- static bool leu_p (const T1 &, const T2 &);
+/* The type of result produced by a unary operation on type T. */
+#define WI_UNARY_RESULT(T) \
+ typename wi::unary_traits <T>::result_type
- template <typename T>
- bool gt_p (const T &, signop) const;
+/* Define a variable RESULT to hold the result of a binary operation on
+ X and Y, which have types T1 and T2 respectively. Define VAR to
+ point to the blocks of RESULT. Once the user of the macro has
+ filled in VAR, it should call RESULT.set_len to set the number
+ of initialized blocks. */
+#define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \
+ WI_BINARY_RESULT (T1, T2) RESULT = \
+ wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \
+ HOST_WIDE_INT *VAL = RESULT.write_val ()
- template <typename T1, typename T2>
- static bool gt_p (const T1 &, const T2 &, signop);
+/* Similar for the result of a unary operation on X, which has type T. */
+#define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \
+ WI_UNARY_RESULT (T) RESULT = \
+ wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \
+ HOST_WIDE_INT *VAL = RESULT.write_val ()
- template <typename T>
- bool gts_p (const T &) const;
+template <typename T> struct generic_wide_int;
- template <typename T1, typename T2>
- static bool gts_p (const T1 &, const T2 &);
+struct wide_int_storage;
+typedef generic_wide_int <wide_int_storage> wide_int;
- template <typename T>
- bool gtu_p (const T &) const;
-
- template <typename T1, typename T2>
- static bool gtu_p (const T1 &, const T2 &);
-
- template <typename T>
- bool ge_p (const T &, signop) const;
-
- template <typename T1, typename T2>
- static bool ge_p (const T1 &, const T2 &, signop);
+struct wide_int_ref_storage;
+typedef generic_wide_int <wide_int_ref_storage> wide_int_ref;
+/* Public functions for querying and operating on integers. */
+namespace wi
+{
template <typename T>
- bool ges_p (const T &) const;
+ unsigned int get_precision (const T &);
template <typename T1, typename T2>
- static bool ges_p (const T1 &, const T2 &);
+ unsigned int get_binary_precision (const T1 &, const T2 &);
+ /* FIXME: should disappear. */
template <typename T>
- bool geu_p (const T &) const;
+ void clear_undef (T &, signop);
- template <typename T1, typename T2>
- static bool geu_p (const T1 &, const T2 &);
+ bool fits_shwi_p (const wide_int_ref &);
+ bool fits_uhwi_p (const wide_int_ref &);
+ bool neg_p (const wide_int_ref &, signop = SIGNED);
+ bool only_sign_bit_p (const wide_int_ref &, unsigned int);
+ bool only_sign_bit_p (const wide_int_ref &);
+ HOST_WIDE_INT sign_mask (const wide_int_ref &);
- template <typename T>
- int cmp (const T &, signop) const;
template <typename T1, typename T2>
- static int cmp (const T1 &, const T2 &, signop);
+ bool eq_p (const T1 &, const T2 &);
- template <typename T>
- int cmps (const T &) const;
template <typename T1, typename T2>
- static int cmps (const T1 &, const T2 &);
+ bool ne_p (const T1 &, const T2 &);
+
+ bool lt_p (const wide_int_ref &, const wide_int_ref &, signop);
+ bool lts_p (const wide_int_ref &, const wide_int_ref &);
+ bool ltu_p (const wide_int_ref &, const wide_int_ref &);
+ bool le_p (const wide_int_ref &, const wide_int_ref &, signop);
+ bool les_p (const wide_int_ref &, const wide_int_ref &);
+ bool leu_p (const wide_int_ref &, const wide_int_ref &);
+ bool gt_p (const wide_int_ref &, const wide_int_ref &, signop);
+ bool gts_p (const wide_int_ref &, const wide_int_ref &);
+ bool gtu_p (const wide_int_ref &, const wide_int_ref &);
+ bool ge_p (const wide_int_ref &, const wide_int_ref &, signop);
+ bool ges_p (const wide_int_ref &, const wide_int_ref &);
+ bool geu_p (const wide_int_ref &, const wide_int_ref &);
+ int cmp (const wide_int_ref &, const wide_int_ref &, signop);
+ int cmps (const wide_int_ref &, const wide_int_ref &);
+ int cmpu (const wide_int_ref &, const wide_int_ref &);
+
+#define UNARY_FUNCTION \
+ template <typename T> WI_UNARY_RESULT (T)
+#define BINARY_FUNCTION \
+ template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2)
+#define SHIFT_FUNCTION \
+ template <typename T> WI_UNARY_RESULT (T)
+
+ UNARY_FUNCTION bit_not (const T &);
+ UNARY_FUNCTION neg (const T &);
+ UNARY_FUNCTION neg (const T &, bool *);
+ UNARY_FUNCTION abs (const T &);
+ UNARY_FUNCTION ext (const T &, unsigned int, signop);
+ UNARY_FUNCTION sext (const T &, unsigned int);
+ UNARY_FUNCTION zext (const T &, unsigned int);
+ UNARY_FUNCTION set_bit (const T &, unsigned int);
+
+ BINARY_FUNCTION min (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION smin (const T1 &, const T2 &);
+ BINARY_FUNCTION umin (const T1 &, const T2 &);
+ BINARY_FUNCTION max (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION smax (const T1 &, const T2 &);
+ BINARY_FUNCTION umax (const T1 &, const T2 &);
+
+ BINARY_FUNCTION bit_and (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_and_not (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_or (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
+ BINARY_FUNCTION add (const T1 &, const T2 &);
+ BINARY_FUNCTION add (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION sub (const T1 &, const T2 &);
+ BINARY_FUNCTION sub (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION mul (const T1 &, const T2 &);
+ BINARY_FUNCTION mul (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION smul (const T1 &, const T2 &, bool *);
+ BINARY_FUNCTION umul (const T1 &, const T2 &, bool *);
+ BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
+ WI_BINARY_RESULT (T1, T2) *);
+ BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, bool * = 0);
- template <typename T>
- int cmpu (const T &) const;
template <typename T1, typename T2>
- static int cmpu (const T1 &, const T2 &);
-
- bool only_sign_bit_p (unsigned int) const;
- bool only_sign_bit_p () const;
- bool fits_shwi_p () const;
- bool fits_uhwi_p () const;
- bool fits_to_tree_p (const_tree) const;
-
- /* Min and max. */
-
- template <typename T>
- wide_int_ro min (const T &, signop) const;
- wide_int_ro min (const wide_int_ro &, signop) const;
-
- template <typename T>
- wide_int_ro max (const T &, signop) const;
- wide_int_ro max (const wide_int_ro &, signop) const;
-
- template <typename T>
- wide_int_ro smin (const T &) const;
- wide_int_ro smin (const wide_int_ro &) const;
-
- template <typename T>
- wide_int_ro smax (const T &) const;
- wide_int_ro smax (const wide_int_ro &) const;
-
- template <typename T>
- wide_int_ro umin (const T &) const;
- wide_int_ro umin (const wide_int_ro &) const;
-
- template <typename T>
- wide_int_ro umax (const T &) const;
- wide_int_ro umax (const wide_int_ro &) const;
-
- /* Extensions. These do not change the precision. */
+ bool multiple_of_p (const T1 &, const T2 &, signop,
+ WI_BINARY_RESULT (T1, T2) *);
- wide_int_ro ext (unsigned int, signop) const;
- wide_int_ro sext (unsigned int) const;
- wide_int_ro zext (unsigned int) const;
+ unsigned int trunc_shift (const wide_int_ref &, unsigned int, unsigned int);
- /* Size changing. These change the underlying precision and are not
- available for max_wide_int or addr_wide_int. */
+ SHIFT_FUNCTION lshift (const T &, const wide_int_ref &, unsigned int = 0);
+ SHIFT_FUNCTION lrshift (const T &, const wide_int_ref &, unsigned int = 0);
+ SHIFT_FUNCTION arshift (const T &, const wide_int_ref &, unsigned int = 0);
+ SHIFT_FUNCTION rshift (const T &, const wide_int_ref &, signop sgn,
+ unsigned int = 0);
+ SHIFT_FUNCTION lrotate (const T &, const wide_int_ref &, unsigned int = 0);
+ SHIFT_FUNCTION rrotate (const T &, const wide_int_ref &, unsigned int = 0);
- wide_int_ro force_to_size (unsigned int, signop) const;
- wide_int_ro sforce_to_size (unsigned int) const;
- wide_int_ro zforce_to_size (unsigned int) const;
+#undef SHIFT_FUNCTION
+#undef BINARY_FUNCTION
+#undef UNARY_FUNCTION
- /* Masking, and Insertion. */
-
- wide_int_ro set_bit (unsigned int) const;
- static wide_int_ro set_bit_in_zero (unsigned int, unsigned int);
- wide_int_ro insert (const wide_int_ro &, unsigned int, unsigned int) const;
-
- wide_int_ro bswap () const;
-
- static wide_int_ro mask (unsigned int, bool, unsigned int);
- static wide_int_ro shifted_mask (unsigned int, unsigned int, bool,
- unsigned int);
-
- HOST_WIDE_INT sign_mask () const;
-
- void clear_undef (signop);
-
- /* Logicals. */
-
- template <typename T>
- wide_int_ro operator & (const T &) const;
-
- template <typename T>
- wide_int_ro and_not (const T &) const;
-
- wide_int_ro operator ~ () const;
+ int clz (const wide_int_ref &);
+ int clrsb (const wide_int_ref &);
+ int ctz (const wide_int_ref &);
+ int exact_log2 (const wide_int_ref &);
+ int floor_log2 (const wide_int_ref &);
+ int ffs (const wide_int_ref &);
+ int popcount (const wide_int_ref &);
+ int parity (const wide_int_ref &);
template <typename T>
- wide_int_ro operator | (const T &) const;
-
- template <typename T>
- wide_int_ro or_not (const T &) const;
-
- template <typename T>
- wide_int_ro operator ^ (const T &) const;
-
- /* Arithmetic operation functions, alpha sorted (except divmod).. */
-
- wide_int_ro abs () const;
-
- template <typename T>
- wide_int_ro operator + (const T &) const;
-
- template <typename T>
- wide_int_ro add (const T &, signop, bool *) const;
-
- wide_int_ro clz () const;
- wide_int_ro clrsb () const;
- wide_int_ro ctz () const;
- wide_int_ro exact_log2 () const;
- wide_int_ro floor_log2 () const;
- wide_int_ro ffs () const;
-
- template <typename T>
- wide_int_ro operator * (const T &) const;
-
- template <typename T>
- wide_int_ro mul (const T &, signop, bool *) const;
-
- template <typename T>
- wide_int_ro smul (const T &, bool *) const;
+ unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int);
+}
- template <typename T>
- wide_int_ro umul (const T &, bool *) const;
+namespace wi
+{
+ /* Contains the components of a decomposed integer for easy, direct
+ access. */
+ struct storage_ref
+ {
+ storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int);
- template <typename T>
- wide_int_ro mul_full (const T &, signop) const;
+ const HOST_WIDE_INT *val;
+ unsigned int len;
+ unsigned int precision;
- template <typename T>
- wide_int_ro smul_full (const T &) const;
+ /* Provide enough trappings for this class to act as storage for
+ generic_wide_int. */
+ unsigned int get_len () const;
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ };
+}
- template <typename T>
- wide_int_ro umul_full (const T &) const;
+inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in,
+ unsigned int len_in,
+ unsigned int precision_in)
+ : val (val_in), len (len_in), precision (precision_in)
+{
+}
- template <typename T>
- wide_int_ro mul_high (const T &, signop) const;
+inline unsigned int
+wi::storage_ref::get_len () const
+{
+ return len;
+}
- wide_int_ro operator - () const;
- wide_int_ro neg (bool *) const;
- wide_int_ro parity () const;
- wide_int_ro popcount () const;
+inline unsigned int
+wi::storage_ref::get_precision () const
+{
+ return precision;
+}
- template <typename T>
- wide_int_ro operator - (const T &) const;
+inline const HOST_WIDE_INT *
+wi::storage_ref::get_val () const
+{
+ return val;
+}
- template <typename T>
- wide_int_ro sub (const T &, signop, bool *) const;
+namespace wi
+{
+ template <>
+ struct int_traits <wi::storage_ref>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* wi::storage_ref can be a reference to a primitive type,
+ so this is the conservatively-correct setting. */
+ static const bool host_dependent_precision = true;
+ };
+}
- /* Division and modulus. These are the ones that are actually used in
- the compiler. More can be added where they are needed. */
+/* This class defines an integer type using the storage provided by the
+ template argument. The storage class must provide the following
+ functions:
- template <typename T>
- wide_int_ro div_trunc (const T &, signop, bool * = 0) const;
+ unsigned int get_precision () const
+ Return the number of bits in the integer.
- template <typename T>
- wide_int_ro sdiv_trunc (const T &) const;
+ HOST_WIDE_INT *get_val () const
+ Return a pointer to the array of blocks that encodes the integer.
- template <typename T>
- wide_int_ro udiv_trunc (const T &) const;
+ unsigned int get_len () const
+ Return the number of blocks in get_val (). If this is smaller
+ than the number of blocks implied by get_precision (), the
+ remaining blocks are sign extensions of block get_len () - 1.
- template <typename T>
- wide_int_ro div_floor (const T &, signop, bool * = 0) const;
+ Although not required by generic_wide_int itself, writable storage
+ classes can also provide the following functions:
- template <typename T>
- wide_int_ro udiv_floor (const T &) const;
+ HOST_WIDE_INT *write_val ()
+ Get a modifiable version of get_val ()
- template <typename T>
- wide_int_ro sdiv_floor (const T &) const;
+ unsigned int set_len (unsigned int len)
+ Set the value returned by get_len () to LEN. */
+template <typename storage>
+class GTY(()) generic_wide_int : public storage
+{
+public:
+ generic_wide_int ();
template <typename T>
- wide_int_ro div_ceil (const T &, signop, bool * = 0) const;
+ generic_wide_int (const T &);
template <typename T>
- wide_int_ro div_round (const T &, signop, bool * = 0) const;
+ generic_wide_int (const T &, unsigned int);
- template <typename T>
- wide_int_ro divmod_trunc (const T &, wide_int_ro *, signop) const;
+ /* Conversions. */
+ HOST_WIDE_INT to_shwi (unsigned int = 0) const;
+ unsigned HOST_WIDE_INT to_uhwi (unsigned int = 0) const;
+ HOST_WIDE_INT to_short_addr () const;
- template <typename T>
- wide_int_ro sdivmod_trunc (const T &, wide_int_ro *) const;
+ /* Public accessors for the interior of a wide int. */
+ HOST_WIDE_INT sign_mask () const;
+ HOST_WIDE_INT elt (unsigned int) const;
+ unsigned HOST_WIDE_INT ulow () const;
+ unsigned HOST_WIDE_INT uhigh () const;
+
+#define BINARY_PREDICATE(OP, F) \
+ template <typename T> \
+ bool OP (const T &c) const { return wi::F (*this, c); }
+
+#define UNARY_OPERATOR(OP, F) \
+ generic_wide_int OP () const { return wi::F (*this); }
+
+#define BINARY_OPERATOR(OP, F) \
+ template <typename T> \
+ generic_wide_int OP (const T &c) const { return wi::F (*this, c); }
+
+#define ASSIGNMENT_OPERATOR(OP, F) \
+ template <typename T> \
+ generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
+
+#define INCDEC_OPERATOR(OP, DELTA) \
+ generic_wide_int &OP () { *this += DELTA; return *this; }
+
+ UNARY_OPERATOR (operator ~, bit_not) \
+ UNARY_OPERATOR (operator -, neg) \
+ BINARY_PREDICATE (operator ==, eq_p) \
+ BINARY_PREDICATE (operator !=, ne_p) \
+ BINARY_OPERATOR (operator &, bit_and) \
+ BINARY_OPERATOR (and_not, bit_and_not) \
+ BINARY_OPERATOR (operator |, bit_or) \
+ BINARY_OPERATOR (or_not, bit_or_not) \
+ BINARY_OPERATOR (operator ^, bit_xor) \
+ BINARY_OPERATOR (operator +, add) \
+ BINARY_OPERATOR (operator -, sub) \
+ BINARY_OPERATOR (operator *, mul) \
+ ASSIGNMENT_OPERATOR (operator &=, bit_and) \
+ ASSIGNMENT_OPERATOR (operator |=, bit_or) \
+ ASSIGNMENT_OPERATOR (operator ^=, bit_xor) \
+ ASSIGNMENT_OPERATOR (operator +=, add) \
+ ASSIGNMENT_OPERATOR (operator -=, sub) \
+ ASSIGNMENT_OPERATOR (operator *=, mul) \
+ INCDEC_OPERATOR (operator ++, 1) \
+ INCDEC_OPERATOR (operator --, -1)
+
+#undef BINARY_PREDICATE
+#undef UNARY_OPERATOR
+#undef BINARY_OPERATOR
+#undef ASSIGNMENT_OPERATOR
+#undef INCDEC_OPERATOR
- template <typename T>
- wide_int_ro udivmod_trunc (const T &, wide_int_ro *) const;
+ char *dump (char *) const;
+};
- template <typename T>
- wide_int_ro divmod_floor (const T &, wide_int_ro *, signop) const;
+template <typename storage>
+inline generic_wide_int <storage>::generic_wide_int () {}
- template <typename T>
- wide_int_ro sdivmod_floor (const T &, wide_int_ro *) const;
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage>::generic_wide_int (const T &x)
+ : storage (x)
+{
+}
- template <typename T>
- wide_int_ro mod_trunc (const T &, signop, bool * = 0) const;
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage>::generic_wide_int (const T &x,
+ unsigned int precision)
+ : storage (x, precision)
+{
+}
- template <typename T>
- wide_int_ro smod_trunc (const T &) const;
+/* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION.
+ If THIS does not fit in PRECISION, the information is lost. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_shwi (unsigned int precision) const
+{
+ if (precision == 0)
+ precision = this->get_precision ();
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ return sext_hwi (this->get_val ()[0], precision);
+ else
+ return this->get_val ()[0];
+}
- template <typename T>
- wide_int_ro umod_trunc (const T &) const;
+/* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from
+ PRECISION. If THIS does not fit in PRECISION, the information
+ is lost. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::to_uhwi (unsigned int precision) const
+{
+ if (precision == 0)
+ precision = this->get_precision ();
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ return zext_hwi (this->get_val ()[0], precision);
+ else
+ return this->get_val ()[0];
+}
- template <typename T>
- wide_int_ro mod_floor (const T &, signop, bool * = 0) const;
+/* TODO: The compiler is half converted from using HOST_WIDE_INT to
+ represent addresses to using addr_wide_int to represent addresses.
+ We use to_short_addr at the interface from new code to old,
+ unconverted code. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_short_addr () const
+{
+ return this->get_val ()[0];
+}
- template <typename T>
- wide_int_ro umod_floor (const T &) const;
+/* Return the implicit value of blocks above get_len (). */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::sign_mask () const
+{
+ return this->get_val ()[this->get_len () - 1] < 0 ? -1 : 0;
+}
- template <typename T>
- wide_int_ro mod_ceil (const T &, signop, bool * = 0) const;
+/* Return the value of the least-significant explicitly-encoded block. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::ulow () const
+{
+ return this->get_val ()[0];
+}
- template <typename T>
- wide_int_ro mod_round (const T &, signop, bool * = 0) const;
+/* Return the value of the most-significant explicitly-encoded block. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::uhigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
- HOST_WIDE_INT extract_to_hwi (int, int) const;
+/* Return block I, which might be implicitly or explicit encoded. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::elt (unsigned int i) const
+{
+ if (i >= this->get_len ())
+ return sign_mask ();
+ else
+ return this->get_val ()[i];
+}
- template <typename T>
- wide_int_ro lshift (const T &, unsigned int = 0) const;
+namespace wi
+{
+ template <>
+ template <typename storage>
+ struct int_traits < generic_wide_int <storage> >
+ : public wi::int_traits <storage>
+ {
+ static unsigned int get_precision (const generic_wide_int <storage> &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const generic_wide_int <storage> &);
+ };
+}
- template <typename T>
- wide_int_ro lshift_widen (const T &, unsigned int) const;
+template <typename storage>
+inline unsigned int
+wi::int_traits < generic_wide_int <storage> >::
+get_precision (const generic_wide_int <storage> &x)
+{
+ return x.get_precision ();
+}
- template <typename T>
- wide_int_ro lrotate (const T &, unsigned int = 0) const;
+template <typename storage>
+inline wi::storage_ref
+wi::int_traits < generic_wide_int <storage> >::
+decompose (HOST_WIDE_INT *, unsigned int precision,
+ const generic_wide_int <storage> &x)
+{
+ gcc_checking_assert (precision == x.get_precision ());
+ return wi::storage_ref (x.get_val (), x.get_len (), precision);
+}
- wide_int_ro lrotate (unsigned HOST_WIDE_INT, unsigned int = 0) const;
+/* Provide the storage for a wide_int_ref. This acts like a read-only
+ wide_int, with the optimization that VAL is normally a pointer to another
+ integer's storage, so that no array copy is needed. */
+struct wide_int_ref_storage : public wi::storage_ref
+{
+private:
+ /* Scratch space that can be used when decomposing the original integer.
+ It must live as long as this object. */
+ HOST_WIDE_INT scratch[WIDE_INT_MAX_ELTS];
+public:
template <typename T>
- wide_int_ro rshift (const T &, signop, unsigned int = 0) const;
+ wide_int_ref_storage (const T &);
template <typename T>
- wide_int_ro rshiftu (const T &, unsigned int = 0) const;
+ wide_int_ref_storage (const T &, unsigned int);
+};
- template <typename T>
- wide_int_ro rshifts (const T &, unsigned int = 0) const;
+/* Create a reference to integer X in its natural precision. Note that
+ the natural precision is host-dependent for primitive types. */
+template <typename T>
+inline wide_int_ref_storage::wide_int_ref_storage (const T &x)
+ : storage_ref (wi::int_traits <T>::decompose (scratch,
+ wi::get_precision (x), x))
+{
+}
- template <typename T>
- wide_int_ro rrotate (const T &, unsigned int = 0) const;
+/* Create a reference to integer X in precision PRECISION. */
+template <typename T>
+inline wide_int_ref_storage::wide_int_ref_storage (const T &x,
+ unsigned int precision)
+ : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
+{
+}
- wide_int_ro rrotate (unsigned HOST_WIDE_INT, unsigned int = 0) const;
+namespace wi
+{
+ template <>
+ struct int_traits <wide_int_ref_storage>
+ : public int_traits <wi::storage_ref>
+ {
+ };
+}
- char *dump (char *) const;
+namespace wi
+{
+ unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ signop sgn);
+ unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, bool = true);
+}
+/* The storage used by wide_int. */
+class GTY(()) wide_int_storage
+{
private:
- /* Internal versions that do the work if the values do not fit in a HWI. */
-
- /* Comparisons */
- static bool eq_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int);
- static bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
- static int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
- static bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
- static int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
- static void check_precision (unsigned int *, unsigned int *, bool, bool);
-
-
- /* Logicals. */
- static wide_int_ro and_large (const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int);
- static wide_int_ro and_not_large (const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int);
- static wide_int_ro or_large (const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int);
- static wide_int_ro or_not_large (const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int);
- static wide_int_ro xor_large (const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int);
-
- /* Arithmetic */
- static wide_int_ro add_large (const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int, signop, bool * = 0);
- static wide_int_ro sub_large (const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int, signop, bool * = 0);
-
- wide_int_ro lshift_large (unsigned int, unsigned int) const;
- wide_int_ro rshiftu_large (unsigned int) const;
- wide_int_ro rshifts_large (unsigned int) const;
-
- static wide_int_ro mul_internal (bool, bool, const HOST_WIDE_INT *,
- unsigned int, unsigned int,
- const HOST_WIDE_INT *, unsigned int,
- signop, bool *, bool);
- static void divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *,
- unsigned HOST_HALF_WIDE_INT *,
- unsigned HOST_HALF_WIDE_INT *,
- unsigned HOST_HALF_WIDE_INT *, int, int);
- static wide_int_ro divmod_internal (bool, const HOST_WIDE_INT *,
- unsigned int, unsigned int,
- const HOST_WIDE_INT *,
- unsigned int, unsigned int,
- signop, wide_int_ro *, bool, bool *);
-
- /* Private utility routines. */
- int trunc_shift (const HOST_WIDE_INT *cnt, unsigned int bitsize) const;
- wide_int_ro decompress (unsigned int, unsigned int) const;
- void canonize ();
- static wide_int_ro from_rtx (const rtx_mode_t);
-
- template <typename T>
- static bool top_bit_set (T);
-
- template <typename T>
- static const HOST_WIDE_INT *to_shwi1 (HOST_WIDE_INT *, unsigned int *,
- unsigned int *, const T &);
-
- template <typename T>
- static const HOST_WIDE_INT *to_shwi2 (HOST_WIDE_INT *, unsigned int *,
- const T &);
-
-#ifdef DEBUG_WIDE_INT
- /* Debugging routines. */
- static void debug_wa (const char *, const wide_int_ro &,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
- static void debug_waa (const char *, const wide_int_ro &,
- const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int, unsigned int);
- static void debug_waav (const char *, const wide_int_ro &,
- const HOST_WIDE_INT *, unsigned int,
- unsigned int, const HOST_WIDE_INT *,
- unsigned int, unsigned int, int);
- static void debug_vw (const char *, int, const wide_int_ro &);
- static void debug_vwh (const char *, int, const wide_int_ro &,
- HOST_WIDE_INT);
- static void debug_vaa (const char *, int, const HOST_WIDE_INT *,
- unsigned int, unsigned int, const HOST_WIDE_INT *,
- unsigned int, unsigned int);
- static void debug_vwa (const char *, int, const wide_int_ro &,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
- static void debug_vww (const char *, int, const wide_int_ro &,
- const wide_int_ro &);
- static void debug_wh (const char *, const wide_int_ro &, HOST_WIDE_INT);
- static void debug_whh (const char *, const wide_int_ro &,
- HOST_WIDE_INT, HOST_WIDE_INT);
- static void debug_wv (const char *, const wide_int_ro &, int);
- static void debug_wvv (const char *, const wide_int_ro &, int, int);
- static void debug_wvvv (const char *, const wide_int_ro &, int, int, int);
- static void debug_wvwa (const char *, const wide_int_ro &, int,
- const wide_int_ro &, const HOST_WIDE_INT *,
- unsigned int, unsigned int);
- static void debug_wvasa (const char *, const wide_int_ro &, int,
- const HOST_WIDE_INT *, unsigned int, unsigned int,
- const char *, const HOST_WIDE_INT *,
- unsigned int, unsigned int);
- static void debug_wvww (const char *, const wide_int_ro &, int,
- const wide_int_ro &, const wide_int_ro &);
- static void debug_wwa (const char *, const wide_int_ro &,
- const wide_int_ro &, const HOST_WIDE_INT *,
- unsigned int, unsigned int);
- static void debug_wwv (const char *, const wide_int_ro &,
- const wide_int_ro &, int);
- static void debug_wwvs (const char *, const wide_int_ro &,
- const wide_int_ro &, int, const char *);
- static void debug_wwvvs (const char *, const wide_int_ro &,
- const wide_int_ro &, int, int, const char *);
- static void debug_wwwvv (const char *, const wide_int_ro &,
- const wide_int_ro &, const wide_int_ro &, int, int);
- static void debug_ww (const char *, const wide_int_ro &,
- const wide_int_ro &);
- static void debug_www (const char *, const wide_int_ro &,
- const wide_int_ro &, const wide_int_ro &);
- static void debug_wwasa (const char *, const wide_int_ro &,
- const wide_int_ro &, const HOST_WIDE_INT *,
- unsigned int, unsigned int, const char *,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
- static void debug_wwwa (const char *, const wide_int_ro &,
- const wide_int_ro &, const wide_int_ro &,
- const HOST_WIDE_INT *, unsigned int, unsigned int);
- static void debug_wwww (const char *, const wide_int_ro &,
- const wide_int_ro &, const wide_int_ro &,
- const wide_int_ro &);
-#endif
-};
-
-class GTY(()) wide_int : public wide_int_ro {
- public:
- wide_int ();
- wide_int (const wide_int_ro &);
- wide_int (const_tree);
- wide_int (HOST_WIDE_INT);
- wide_int (int);
- wide_int (unsigned HOST_WIDE_INT);
- wide_int (unsigned int);
- wide_int (const rtx_mode_t &);
-
- wide_int &operator = (const wide_int_ro &);
- wide_int &operator = (const_tree);
- wide_int &operator = (HOST_WIDE_INT);
- wide_int &operator = (int);
- wide_int &operator = (unsigned HOST_WIDE_INT);
- wide_int &operator = (unsigned int);
- wide_int &operator = (const rtx_mode_t &);
-
- wide_int &operator ++ ();
- wide_int& operator -- ();
-
- template <typename T>
- wide_int &operator &= (const T &);
-
- template <typename T>
- wide_int &operator |= (const T &);
+ HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
+ unsigned int len;
+ unsigned int precision;
+public:
+ wide_int_storage ();
template <typename T>
- wide_int &operator ^= (const T &);
+ wide_int_storage (const T &);
- template <typename T>
- wide_int &operator += (const T &);
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int);
- template <typename T>
- wide_int &operator -= (const T &);
+ static wide_int from (const wide_int_ref &, unsigned int, signop);
+ static wide_int from_array (const HOST_WIDE_INT *, unsigned int,
+ unsigned int, bool = true);
+ static wide_int create (unsigned int);
- template <typename T>
- wide_int &operator *= (const T &);
+ /* FIXME: target-dependent, so should disappear. */
+ wide_int bswap () const;
};
-inline const HOST_WIDE_INT *
-wide_int_ro::get_val () const
-{
- return val;
-}
+inline wide_int_storage::wide_int_storage () {}
-inline wide_int_ro &
-wide_int_ro::operator = (const wide_int_ro &r)
+/* Initialize the storage from integer X, in its natural precision.
+ Note that we do not allow integers with host-dependent precision
+ to become wide_ints; wide_ints must always be logically independent
+ of the host. */
+template <typename T>
+inline wide_int_storage::wide_int_storage (const T &x)
{
- for (unsigned int i = 0; i < r.get_len (); ++i)
- val[i] = r.get_val () [i];
- len = r.get_len ();
- precision = r.get_precision ();
- return *this;
+ STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision);
+ wide_int_ref xi (x);
+ precision = xi.precision;
+ len = xi.len;
+ for (unsigned int i = 0; i < len; ++i)
+ val[i] = xi.val[i];
}
-inline wide_int_ro::wide_int_ro () : len (0) {}
-
-/* Convert an INTEGER_CST into a wide int. */
-inline wide_int_ro::wide_int_ro (const_tree tcst)
+inline unsigned int
+wide_int_storage::get_precision () const
{
- *this = from_array (&TREE_INT_CST_ELT (tcst, 0),
- TREE_INT_CST_NUNITS (tcst),
- TYPE_PRECISION (TREE_TYPE (tcst)), false);
+ return precision;
}
-inline wide_int_ro::wide_int_ro (HOST_WIDE_INT op0)
+inline const HOST_WIDE_INT *
+wide_int_storage::get_val () const
{
- precision = 0;
- val[0] = op0;
- len = 1;
+ return val;
}
-inline wide_int_ro::wide_int_ro (int op0)
+inline unsigned int
+wide_int_storage::get_len () const
{
- precision = 0;
- val[0] = op0;
- len = 1;
+ return len;
}
-inline wide_int_ro::wide_int_ro (unsigned HOST_WIDE_INT op0)
+inline HOST_WIDE_INT *
+wide_int_storage::write_val ()
{
- *this = from_uhwi (op0);
+ return val;
}
-inline wide_int_ro::wide_int_ro (unsigned int op0)
+inline void
+wide_int_storage::set_len (unsigned int l)
{
- *this = from_uhwi (op0);
+ len = l;
}
-inline wide_int_ro::wide_int_ro (const rtx_mode_t &op0)
+/* Treat X as having signedness SGN and convert it to a PRECISION-bit
+ number. */
+inline wide_int
+wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
+ signop sgn)
{
- *this = from_rtx (op0);
+ wide_int result = wide_int::create (precision);
+ result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ x.precision, precision, sgn));
+ return result;
}
-/* Convert signed OP0 into a wide_int_ro with the precision taken from
- MODE. */
-inline wide_int_ro
-wide_int_ro::from_shwi (HOST_WIDE_INT op0, enum machine_mode mode)
+/* Create a wide_int from the explicit block encoding given by VAL and LEN.
+ PRECISION is the precision of the integer. NEED_CANON_P is true if the
+ encoding may have redundant trailing blocks. */
+inline wide_int
+wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
+ unsigned int precision, bool need_canon_p)
{
- unsigned int prec = GET_MODE_PRECISION (mode);
- return from_shwi (op0, prec);
+ wide_int result = wide_int::create (precision);
+ result.set_len (wi::from_array (result.write_val (), val, len, precision,
+ need_canon_p));
+ return result;
}
-/* Convert unsigned OP0 into a wide_int_ro with the precision taken from
- MODE. */
-inline wide_int_ro
-wide_int_ro::from_uhwi (unsigned HOST_WIDE_INT op0, enum machine_mode mode)
+/* Return an uninitialized wide_int with precision PRECISION. */
+inline wide_int
+wide_int_storage::create (unsigned int precision)
{
- unsigned int prec = GET_MODE_PRECISION (mode);
- return from_uhwi (op0, prec);
+ wide_int x;
+ x.precision = precision;
+ return x;
}
-/* Convert OP0 into a wide_int with parameters taken from TYPE. */
-inline wide_int_ro
-wide_int_ro::from_hwi (HOST_WIDE_INT op0, const_tree type)
+namespace wi
{
- unsigned int prec = TYPE_PRECISION (type);
+ template <>
+ struct int_traits <wide_int_storage>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* Guaranteed by a static assert in the wide_int_storage constructor. */
+ static const bool host_dependent_precision = false;
+ template <typename T1, typename T2>
+ static wide_int get_binary_result (const T1 &, const T2 &);
+ };
+}
- if (TYPE_UNSIGNED (type))
- return wide_int_ro::from_uhwi (op0, prec);
+template <typename T1, typename T2>
+inline wide_int
+wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
+{
+ /* This shouldn't be used for two flexible-precision inputs. */
+ STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
+ || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION);
+ if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
+ return wide_int::create (wi::get_precision (y));
else
- return wide_int_ro::from_shwi (op0, prec);
+ return wide_int::create (wi::get_precision (x));
}
-/* Return THIS as a signed HOST_WIDE_INT. If THIS does not fit in PREC,
- the information is lost. */
-inline HOST_WIDE_INT
-wide_int_ro::to_shwi (unsigned int prec) const
-{
- HOST_WIDE_INT result;
+/* An N-bit integer. Until we can use typedef templates, use this instead. */
+#define FIXED_WIDE_INT(N) \
+ generic_wide_int < fixed_wide_int_storage <N> >
- if (prec == 0)
- prec = precision;
+/* The storage used by FIXED_WIDE_INT (N). */
+template <int N>
+class GTY(()) fixed_wide_int_storage
+{
+private:
+ HOST_WIDE_INT val[(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT];
+ unsigned int len;
- if (prec < HOST_BITS_PER_WIDE_INT)
- result = sext_hwi (val[0], prec);
- else
- result = val[0];
+public:
+ fixed_wide_int_storage ();
+ template <typename T>
+ fixed_wide_int_storage (const T &);
- return result;
-}
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int);
-/* Return THIS as an unsigned HOST_WIDE_INT. If THIS does not fit in PREC,
- the information is lost. */
-inline unsigned HOST_WIDE_INT
-wide_int_ro::to_uhwi (unsigned int prec) const
-{
- HOST_WIDE_INT result;
+ static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
+ static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
+ bool = true);
+};
- if (prec == 0)
- prec = precision;
+typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) addr_wide_int;
+typedef FIXED_WIDE_INT (MAX_BITSIZE_MODE_ANY_INT) max_wide_int;
- if (prec < HOST_BITS_PER_WIDE_INT)
- result = zext_hwi (val[0], prec);
- else
- result = val[0];
+template <int N>
+inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
- return result;
+/* Initialize the storage from integer X, in precision N. */
+template <int N>
+template <typename T>
+inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x)
+{
+ /* Check for type compatibility. We don't want to initialize a
+ fixed-width integer from something like a wide_int. */
+ WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED;
+ wide_int_ref xi (x, N);
+ len = xi.len;
+ for (unsigned int i = 0; i < len; ++i)
+ val[i] = xi.val[i];
}
-/* TODO: The compiler is half converted from using HOST_WIDE_INT to
- represent addresses to using wide_int_ro to represent addresses.
- We use to_short_addr at the interface from new code to old,
- unconverted code. */
-inline HOST_WIDE_INT
-wide_int_ro::to_short_addr () const
+template <int N>
+inline unsigned int
+fixed_wide_int_storage <N>::get_precision () const
{
- return val[0];
+ return N;
}
-/* Produce the largest number that is represented in TYPE. The precision
- and sign are taken from TYPE. */
-inline wide_int_ro
-wide_int_ro::max_value (const_tree type)
+template <int N>
+inline const HOST_WIDE_INT *
+fixed_wide_int_storage <N>::get_val () const
{
- unsigned int prec = TYPE_PRECISION (type);
- return max_value (prec, TYPE_SIGN (type), prec);
+ return val;
}
-/* Produce the largest number that is represented in MODE. The precision
- is taken from MODE and the sign from SGN. */
-inline wide_int_ro
-wide_int_ro::max_value (enum machine_mode mode, signop sgn)
+template <int N>
+inline unsigned int
+fixed_wide_int_storage <N>::get_len () const
{
- unsigned int prec = GET_MODE_PRECISION (mode);
- return max_value (prec, sgn, prec);
+ return len;
}
-/* Produce the smallest number that is represented in TYPE. The precision
- and sign are taken from TYPE. */
-inline wide_int_ro
-wide_int_ro::min_value (const_tree type)
+template <int N>
+inline HOST_WIDE_INT *
+fixed_wide_int_storage <N>::write_val ()
{
- unsigned int prec = TYPE_PRECISION (type);
- return min_value (prec, TYPE_SIGN (type), prec);
+ return val;
}
-/* Produce the smallest number that is represented in MODE. The precision
- is taken from MODE and the sign from SGN. */
-inline wide_int_ro
-wide_int_ro::min_value (enum machine_mode mode, signop sgn)
+template <int N>
+inline void
+fixed_wide_int_storage <N>::set_len (unsigned int l)
{
- unsigned int prec = GET_MODE_PRECISION (mode);
- return min_value (prec, sgn, prec);
+ len = l;
}
-/* Return a wide int of -1 with precision PREC. */
-inline wide_int_ro
-wide_int_ro::minus_one (unsigned int prec)
+/* Treat X as having signedness SGN and convert it to an N-bit number. */
+template <int N>
+inline FIXED_WIDE_INT (N)
+fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
{
- return from_shwi (-1, prec);
+ FIXED_WIDE_INT (N) result;
+ result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ x.precision, N, sgn));
+ return result;
}
-/* Return a wide int of 0 with precision PREC. */
-inline wide_int_ro
-wide_int_ro::zero (unsigned int prec)
+/* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by
+ VAL and LEN. NEED_CANON_P is true if the encoding may have redundant
+ trailing blocks. */
+template <int N>
+inline FIXED_WIDE_INT (N)
+fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val,
+ unsigned int len,
+ bool need_canon_p)
{
- return from_shwi (0, prec);
+ FIXED_WIDE_INT (N) result;
+ result.set_len (wi::from_array (result.write_val (), val, len,
+ N, need_canon_p));
+ return result;
}
-/* Return a wide int of 1 with precision PREC. */
-inline wide_int_ro
-wide_int_ro::one (unsigned int prec)
+namespace wi
{
- return from_shwi (1, prec);
+ template <>
+ template <int N>
+ struct int_traits < fixed_wide_int_storage <N> >
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const unsigned int precision = N;
+ template <typename T1, typename T2>
+ static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
+ };
}
-/* Return a wide int of 2 with precision PREC. */
-inline wide_int_ro
-wide_int_ro::two (unsigned int prec)
+template <int N>
+template <typename T1, typename T2>
+inline FIXED_WIDE_INT (N)
+wi::int_traits < fixed_wide_int_storage <N> >::
+get_binary_result (const T1 &, const T2 &)
{
- return wide_int_ro::from_shwi (2, prec);
+ return FIXED_WIDE_INT (N) ();
}
-/* Get the number of HOST_WIDE_INTs actually represented within the
- wide int. */
-inline unsigned short
-wide_int_ro::get_len () const
+/* Specify the result type for each supported combination of binary inputs.
+ Note that CONST_PRECISION and VAR_PRECISION cannot be mixed, in order to
+ give stronger type checking. When both inputs are CONST_PRECISION,
+ they must have the same precision. */
+namespace wi
{
- return len;
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef max_wide_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T2>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T1>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T1>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
}
-/* Get the precision of the value represented within the wide int. */
-inline unsigned int
-wide_int_ro::get_precision () const
+namespace wi
{
- return precision;
+ /* Implementation of int_traits for primitive integer types like "int". */
+ template <typename T, bool signed_p>
+ struct primitive_int_traits
+ {
+ static const enum precision_type precision_type = FLEXIBLE_PRECISION;
+ static const bool host_dependent_precision = true;
+ static unsigned int get_precision (T);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
+ };
}
-/* Get a particular element of the wide int. */
-inline HOST_WIDE_INT
-wide_int_ro::elt (unsigned int i) const
+template <typename T, bool signed_p>
+inline unsigned int
+wi::primitive_int_traits <T, signed_p>::get_precision (T)
{
- return i >= len ? sign_mask () : val[i];
+ return sizeof (T) * CHAR_BIT;
}
-/* Return true if THIS is -1. This is correct even if precision is 0. */
-inline bool
-wide_int_ro::minus_one_p () const
+template <typename T, bool signed_p>
+inline wi::storage_ref
+wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, T x)
{
- HOST_WIDE_INT x;
-
- if (precision && precision < HOST_BITS_PER_WIDE_INT)
- x = sext_hwi (val[0], precision);
- else
- x = val[0];
-
- return len == 1 && x == (HOST_WIDE_INT)-1;
+ scratch[0] = x;
+ if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = 0;
+ return wi::storage_ref (scratch, 2, precision);
}
-/* Return true if THIS is 0. This is correct even if precision is 0. */
-inline bool
-wide_int_ro::zero_p () const
+/* Allow primitive C types to be used in wi:: routines. */
+namespace wi
{
- HOST_WIDE_INT x;
+ template <>
+ struct int_traits <int>
+ : public primitive_int_traits <int, true> {};
- if (precision && precision < HOST_BITS_PER_WIDE_INT)
- x = sext_hwi (val[0], precision);
- else
- x = val[0];
+ template <>
+ struct int_traits <unsigned int>
+ : public primitive_int_traits <unsigned int, false> {};
+
+#if HOST_BITS_PER_INT != HOST_BITS_PER_WIDE_INT
+ template <>
+ struct int_traits <HOST_WIDE_INT>
+ : public primitive_int_traits <HOST_WIDE_INT, true> {};
- return len == 1 && x == 0;
+ template <>
+ struct int_traits <unsigned HOST_WIDE_INT>
+ : public primitive_int_traits <unsigned HOST_WIDE_INT, false> {};
+#endif
}
-/* Return true if THIS is 1. This is correct even if precision is 0. */
-inline bool
-wide_int_ro::one_p () const
+namespace wi
{
- HOST_WIDE_INT x;
+ /* Stores HWI-sized integer VAL, treating it as having signedness SGN
+ and precision PRECISION. */
+ struct hwi_with_prec
+ {
+ hwi_with_prec (HOST_WIDE_INT, unsigned int, signop);
+ HOST_WIDE_INT val;
+ unsigned int precision;
+ signop sgn;
+ };
- if (precision && precision < HOST_BITS_PER_WIDE_INT)
- x = zext_hwi (val[0], precision);
- else
- x = val[0];
+ hwi_with_prec shwi (HOST_WIDE_INT, unsigned int);
+ hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int);
- return len == 1 && x == 1;
+ hwi_with_prec minus_one (unsigned int);
+ hwi_with_prec zero (unsigned int);
+ hwi_with_prec one (unsigned int);
+ hwi_with_prec two (unsigned int);
}
-/* Return true if THIS is negative based on the interpretation of SGN.
- For UNSIGNED, this is always false. This is correct even if
- precision is 0. */
-inline bool
-wide_int_ro::neg_p (signop sgn) const
+inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p,
+ signop s)
+ : val (v), precision (p), sgn (s)
{
- if (sgn == UNSIGNED)
- return false;
-
- if (precision == 0)
- return (len == 1 && val[0] < 0);
-
- return sign_mask () != 0;
}
-/* Return true if THIS == C. If both operands have nonzero precisions,
- the precisions must be the same. */
-template <typename T>
-inline bool
-wide_int_ro::operator == (const T &c) const
+/* Return a signed integer that has value VAL and precision PRECISION. */
+inline wi::hwi_with_prec
+wi::shwi (HOST_WIDE_INT val, unsigned int precision)
{
- return wide_int_ro::eq_p (*this, c);
+ return hwi_with_prec (val, precision, SIGNED);
}
-/* Return true if C1 == C2. If both parameters have nonzero precisions,
- then those precisions must be equal. */
-template <typename T1, typename T2>
-inline bool
-wide_int_ro::eq_p (const T1 &c1, const T2 &c2)
-{
- bool result;
- HOST_WIDE_INT ws1[WIDE_INT_MAX_ELTS];
- HOST_WIDE_INT ws2[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s1, *s2; /* Returned data */
- unsigned int cl1, cl2; /* array lengths */
- unsigned int p1, p2; /* precisions */
-
- s1 = to_shwi1 (ws1, &cl1, &p1, c1);
- s2 = to_shwi1 (ws2, &cl2, &p2, c2);
- check_precision (&p1, &p2, true, false);
-
- if (p1 == 0)
- /* There are prec 0 types and we need to do this to check their
- min and max values. */
- result = (cl1 == cl2) && (s1[0] == s2[0]);
- else if (p1 < HOST_BITS_PER_WIDE_INT)
- {
- unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << p1) - 1;
- result = (s1[0] & mask) == (s2[0] & mask);
- }
- else if (p1 == HOST_BITS_PER_WIDE_INT)
- result = s1[0] == s2[0];
- else
- result = eq_p_large (s1, cl1, p1, s2, cl2);
-
- return result;
+/* Return an unsigned integer that has value VAL and precision PRECISION. */
+inline wi::hwi_with_prec
+wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision)
+{
+ return hwi_with_prec (val, precision, UNSIGNED);
}
-/* Return true if THIS != C. If both parameters have nonzero precisions,
- then those precisions must be equal. */
-template <typename T>
-inline bool
-wide_int_ro::operator != (const T &c) const
+/* Return a wide int of -1 with precision PREC. */
+inline wi::hwi_with_prec
+wi::minus_one (unsigned int precision)
{
- return !(*this == c);
+ return wi::shwi (-1, precision);
}
-/* Return true if THIS < C using signed comparisons. */
-template <typename T>
-inline bool
-wide_int_ro::lts_p (const T &c) const
+/* Return a wide int of 0 with precision PREC. */
+inline wi::hwi_with_prec
+wi::zero (unsigned int precision)
{
- return wide_int_ro::lts_p (*this, c);
+ return wi::shwi (0, precision);
}
-/* Return true if C1 < C2 using signed comparisons. */
-template <typename T1, typename T2>
-inline bool
-wide_int_ro::lts_p (const T1 &c1, const T2 &c2)
+/* Return a wide int of 1 with precision PREC. */
+inline wi::hwi_with_prec
+wi::one (unsigned int precision)
{
- bool result;
- HOST_WIDE_INT ws1[WIDE_INT_MAX_ELTS];
- HOST_WIDE_INT ws2[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s1, *s2; /* Returned data */
- unsigned int cl1, cl2; /* array lengths */
- unsigned int p1, p2; /* precisions */
-
- s1 = to_shwi1 (ws1, &cl1, &p1, c1);
- s2 = to_shwi1 (ws2, &cl2, &p2, c2);
- check_precision (&p1, &p2, false, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT
- && p2 <= HOST_BITS_PER_WIDE_INT)
- {
- HOST_WIDE_INT x0 = sext_hwi (s1[0], p1);
- HOST_WIDE_INT x1 = sext_hwi (s2[0], p2);
- result = x0 < x1;
- }
- else
- result = lts_p_large (s1, cl1, p1, s2, cl2, p2);
-
-#ifdef DEBUG_WIDE_INT
- debug_vaa ("wide_int_ro:: %d = (%s lts_p %s\n", result, s1, cl1, p1, s2, cl2, p2);
-#endif
- return result;
+ return wi::shwi (1, precision);
}
-/* Return true if THIS < C using unsigned comparisons. */
-template <typename T>
-inline bool
-wide_int_ro::ltu_p (const T &c) const
+/* Return a wide int of 2 with precision PREC. */
+inline wi::hwi_with_prec
+wi::two (unsigned int precision)
{
- return wide_int_ro::ltu_p (*this, c);
+ return wi::shwi (2, precision);
}
-/* Return true if C1 < C2 using unsigned comparisons. */
-template <typename T1, typename T2>
-inline bool
-wide_int_ro::ltu_p (const T1 &c1, const T2 &c2)
+namespace wi
{
- bool result;
- HOST_WIDE_INT ws1[WIDE_INT_MAX_ELTS];
- HOST_WIDE_INT ws2[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s1, *s2; /* Returned data */
- unsigned int cl1, cl2; /* array lengths */
- unsigned int p1, p2; /* precisions */
-
- s1 = to_shwi1 (ws1, &cl1, &p1, c1);
- s2 = to_shwi1 (ws2, &cl2, &p2, c2);
- check_precision (&p1, &p2, false, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT
- && p2 <= HOST_BITS_PER_WIDE_INT)
- {
- unsigned HOST_WIDE_INT x0 = zext_hwi (s1[0], p1);
- unsigned HOST_WIDE_INT x1 = zext_hwi (s2[0], p2);
- result = x0 < x1;
- }
- else
- result = ltu_p_large (s1, cl1, p1, s2, cl2, p2);
-#ifdef DEBUG_WIDE_INT
- debug_vaa ("wide_int_ro:: %d = (%s ltu_p %s)\n", result, s1, cl1, p1, s2, cl2, p2);
-#endif
- return result;
+ template <>
+ struct int_traits <wi::hwi_with_prec>
+ {
+ /* Since we have a sign, we can extend or truncate the integer to
+ other precisions where necessary. */
+ static const enum precision_type precision_type = FLEXIBLE_PRECISION;
+ /* hwi_with_prec has an explicitly-given precision, rather than the
+ precision of HOST_WIDE_INT. */
+ static const bool host_dependent_precision = false;
+ static unsigned int get_precision (const wi::hwi_with_prec &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const wi::hwi_with_prec &);
+ };
}
-/* Return true if THIS < C. Signedness is indicated by SGN. */
+inline unsigned int
+wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x)
+{
+ return x.precision;
+}
+
+inline wi::storage_ref
+wi::int_traits <wi::hwi_with_prec>::
+decompose (HOST_WIDE_INT *scratch, unsigned int precision,
+ const wi::hwi_with_prec &x)
+{
+ scratch[0] = x.val;
+ if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = 0;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
+/* Private functions for handling large cases out of line. They take
+ individual length and array parameters because that is cheaper for
+ the inline caller than constructing an object on the stack and
+ passing a reference to it. (Although many callers use wide_int_refs,
+ we generally want those to be removed by SRA.) */
+namespace wi
+{
+ bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ unsigned int, unsigned int);
+ unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ unsigned int, unsigned int);
+ unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int);
+ unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int);
+ unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int);
+ unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int);
+ unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int);
+ unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int);
+ unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int,
+ signop, bool *);
+ unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int,
+ signop, bool *);
+ unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, signop, bool *,
+ bool, bool);
+ unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *,
+ HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int,
+ const HOST_WIDE_INT *,
+ unsigned int, unsigned int,
+ signop, bool *);
+}
+
+/* Return the number of bits that integer X can hold. */
template <typename T>
-inline bool
-wide_int_ro::lt_p (const T &c, signop sgn) const
+inline unsigned int
+wi::get_precision (const T &x)
{
- if (sgn == SIGNED)
- return lts_p (c);
- else
- return ltu_p (c);
+ return wi::int_traits <T>::get_precision (x);
}
-/* Return true if C1 < C2. Signedness is indicated by SGN. */
+/* Return the number of bits that the result of a binary operation can hold
+ when the input operands are X and Y. */
template <typename T1, typename T2>
-inline bool
-wide_int_ro::lt_p (const T1 &c1, const T2 &c2, signop sgn)
+inline unsigned int
+wi::get_binary_precision (const T1 &x, const T2 &y)
{
- if (sgn == SIGNED)
- return lts_p (c1, c2);
- else
- return ltu_p (c1, c2);
+ return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>::
+ get_binary_result (x, y));
}
-/* Return true if THIS <= C using signed comparisons. */
+/* Extend undefined bits in X according to SGN. */
template <typename T>
+inline void
+wi::clear_undef (T &x, signop sgn)
+{
+ HOST_WIDE_INT *val = x.write_val ();
+ unsigned int precision = x.get_precision ();
+ unsigned int len = x.get_len ();
+ unsigned int small_prec = precision % HOST_BITS_PER_WIDE_INT;
+ if (small_prec
+ && len == ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT))
+ {
+ if (sgn == UNSIGNED)
+ val[len - 1] = zext_hwi (val[len - 1], small_prec);
+ else
+ val[len - 1] = sext_hwi (val[len - 1], small_prec);
+ }
+}
+
+/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
inline bool
-wide_int_ro::les_p (const T &c) const
+wi::fits_shwi_p (const wide_int_ref &x)
{
- return !gts_p (c);
+ return x.len == 1;
}
-/* Return true if C1 <= C2 using signed comparisons. */
-template <typename T1, typename T2>
+/* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of
+ precision. */
inline bool
-wide_int_ro::les_p (const T1 &c1, const T2 &c2)
+wi::fits_uhwi_p (const wide_int_ref &x)
{
- return !gts_p (c1, c2);
+ if (x.precision <= HOST_BITS_PER_WIDE_INT)
+ return true;
+ if (x.len == 1)
+ return x.sign_mask () == 0;
+ if (x.precision < 2 * HOST_BITS_PER_WIDE_INT)
+ return zext_hwi (x.uhigh (), x.precision % HOST_BITS_PER_WIDE_INT) == 0;
+ return x.len == 2 && x.uhigh () == 0;
}
-/* Return true if THIS <= C using unsigned comparisons. */
-template <typename T>
+/* Return true if X is negative based on the interpretation of SGN.
+ For UNSIGNED, this is always false. */
inline bool
-wide_int_ro::leu_p (const T &c) const
+wi::neg_p (const wide_int_ref &x, signop sgn)
{
- return !gtu_p (c);
+ if (sgn == UNSIGNED)
+ return false;
+ if (x.precision == 0)
+ return false;
+ if (x.len * HOST_BITS_PER_WIDE_INT > x.precision)
+ return (x.uhigh () >> (x.precision % HOST_BITS_PER_WIDE_INT - 1)) & 1;
+ return x.sign_mask () < 0;
}
-/* Return true if C1 <= C2 using unsigned comparisons. */
-template <typename T1, typename T2>
-inline bool
-wide_int_ro::leu_p (const T1 &c1, const T2 &c2)
+/* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
+inline HOST_WIDE_INT
+wi::sign_mask (const wide_int_ref &x)
{
- return !gtu_p (c1, c2);
+ return x.sign_mask ();
}
-/* Return true if THIS <= C. Signedness is indicated by SGN. */
-template <typename T>
+/* Return true if X == Y. X and Y must be binary-compatible. */
+template <typename T1, typename T2>
inline bool
-wide_int_ro::le_p (const T &c, signop sgn) const
+wi::eq_p (const T1 &x, const T2 &y)
{
- if (sgn == SIGNED)
- return les_p (c);
- else
- return leu_p (c);
+ unsigned int precision = get_binary_precision (x, y);
+ if (precision == 0)
+ return true;
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT diff = xi.ulow () ^ yi.ulow ();
+ return (diff << (HOST_BITS_PER_WIDE_INT - precision)) == 0;
+ }
+ return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
}
-/* Return true if C1 <= C2. Signedness is indicated by SGN. */
+/* Return true if X != Y. X and Y must be binary-compatible. */
template <typename T1, typename T2>
inline bool
-wide_int_ro::le_p (const T1 &c1, const T2 &c2, signop sgn)
+wi::ne_p (const T1 &x, const T2 &y)
{
- if (sgn == SIGNED)
- return les_p (c1, c2);
- else
- return leu_p (c1, c2);
+ return !eq_p (x, y);
}
-/* Return true if THIS > C using signed comparisons. */
-template <typename T>
+/* Return true if X < Y when both are treated as signed values. */
inline bool
-wide_int_ro::gts_p (const T &c) const
+wi::lts_p (const wide_int_ref &x, const wide_int_ref &y)
{
- return lts_p (c, *this);
+ if (x.precision <= HOST_BITS_PER_WIDE_INT
+ && y.precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ HOST_WIDE_INT xl = sext_hwi (x.ulow (), x.precision);
+ HOST_WIDE_INT yl = sext_hwi (y.ulow (), y.precision);
+ return xl < yl;
+ }
+ return lts_p_large (x.val, x.len, x.precision, y.val, y.len,
+ y.precision);
}
-/* Return true if C1 > C2 using signed comparisons. */
-template <typename T1, typename T2>
+/* Return true if X < Y when both are treated as unsigned values. */
inline bool
-wide_int_ro::gts_p (const T1 &c1, const T2 &c2)
+wi::ltu_p (const wide_int_ref &x, const wide_int_ref &y)
{
- return lts_p (c2, c1);
+ if (x.precision <= HOST_BITS_PER_WIDE_INT
+ && y.precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT xl = zext_hwi (x.ulow (), x.precision);
+ unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision);
+ return xl < yl;
+ }
+ return ltu_p_large (x.val, x.len, x.precision, y.val, y.len, y.precision);
}
-/* Return true if THIS > C using unsigned comparisons. */
-template <typename T>
+/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
inline bool
-wide_int_ro::gtu_p (const T &c) const
+wi::lt_p (const wide_int_ref &x, const wide_int_ref &y, signop sgn)
{
- return ltu_p (c, *this);
+ if (sgn == SIGNED)
+ return lts_p (x, y);
+ else
+ return ltu_p (x, y);
}
-/* Return true if C1 > C2 using unsigned comparisons. */
-template <typename T1, typename T2>
+/* Return true if X <= Y when both are treated as signed values. */
inline bool
-wide_int_ro::gtu_p (const T1 &c1, const T2 &c2)
+wi::les_p (const wide_int_ref &x, const wide_int_ref &y)
{
- return ltu_p (c2, c1);
+ return !lts_p (y, x);
}
-/* Return true if THIS > C. Signedness is indicated by SGN. */
-template <typename T>
+/* Return true if X <= Y when both are treated as unsigned values. */
inline bool
-wide_int_ro::gt_p (const T &c, signop sgn) const
+wi::leu_p (const wide_int_ref &x, const wide_int_ref &y)
{
- if (sgn == SIGNED)
- return gts_p (c);
- else
- return gtu_p (c);
+ return !ltu_p (y, x);
}
-/* Return true if C1 > C2. Signedness is indicated by SGN. */
-template <typename T1, typename T2>
+/* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */
inline bool
-wide_int_ro::gt_p (const T1 &c1, const T2 &c2, signop sgn)
+wi::le_p (const wide_int_ref &x, const wide_int_ref &y, signop sgn)
{
if (sgn == SIGNED)
- return gts_p (c1, c2);
+ return les_p (x, y);
else
- return gtu_p (c1, c2);
+ return leu_p (x, y);
}
-/* Return true if THIS >= C using signed comparisons. */
-template <typename T>
+/* Return true if X > Y when both are treated as signed values. */
inline bool
-wide_int_ro::ges_p (const T &c) const
+wi::gts_p (const wide_int_ref &x, const wide_int_ref &y)
{
- return !lts_p (c);
+ return lts_p (y, x);
}
-/* Return true if C1 >= C2 using signed comparisons. */
-template <typename T1, typename T2>
+/* Return true if X > Y when both are treated as unsigned values. */
inline bool
-wide_int_ro::ges_p (const T1 &c1, const T2 &c2)
+wi::gtu_p (const wide_int_ref &x, const wide_int_ref &y)
{
- return !lts_p (c1, c2);
+ return ltu_p (y, x);
}
-/* Return true if THIS >= C using unsigned comparisons. */
-template <typename T>
+/* Return true if X > Y. Signedness of X and Y is indicated by SGN. */
inline bool
-wide_int_ro::geu_p (const T &c) const
+wi::gt_p (const wide_int_ref &x, const wide_int_ref &y, signop sgn)
{
- return !ltu_p (c);
+ if (sgn == SIGNED)
+ return gts_p (x, y);
+ else
+ return gtu_p (x, y);
}
-/* Return true if C1 >= C2 using unsigned comparisons. */
-template <typename T1, typename T2>
+/* Return true if X >= Y when both are treated as signed values. */
inline bool
-wide_int_ro::geu_p (const T1 &c1, const T2 &c2)
+wi::ges_p (const wide_int_ref &x, const wide_int_ref &y)
{
- return !ltu_p (c1, c2);
+ return !lts_p (x, y);
}
-/* Return true if THIS >= C. Signedness is indicated by SGN. */
-template <typename T>
+/* Return true if X >= Y when both are treated as unsigned values. */
inline bool
-wide_int_ro::ge_p (const T &c, signop sgn) const
+wi::geu_p (const wide_int_ref &x, const wide_int_ref &y)
{
- if (sgn == SIGNED)
- return ges_p (c);
- else
- return geu_p (c);
+ return !ltu_p (x, y);
}
-/* Return true if C1 >= C2. Signedness is indicated by SGN. */
-template <typename T1, typename T2>
+/* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */
inline bool
-wide_int_ro::ge_p (const T1 &c1, const T2 &c2, signop sgn)
+wi::ge_p (const wide_int_ref &x, const wide_int_ref &y, signop sgn)
{
if (sgn == SIGNED)
- return ges_p (c1, c2);
+ return ges_p (x, y);
else
- return geu_p (c1, c2);
-}
-
-/* Returns -1 if THIS < C, 0 if THIS == C and 1 if THIS > C using
- signed compares. */
-template <typename T>
-inline int
-wide_int_ro::cmps (const T &c) const
-{
- return wide_int_ro::cmps (*this, c);
+ return geu_p (x, y);
}
-/* Returns -1 if C1 < C2, 0 if C1 == C2 and 1 if C1 > C2 using
- signed compares. */
-template <typename T1, typename T2>
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
+ as signed values. */
inline int
-wide_int_ro::cmps (const T1 &c1, const T2 &c2)
+wi::cmps (const wide_int_ref &x, const wide_int_ref &y)
{
- int result;
- HOST_WIDE_INT ws1[WIDE_INT_MAX_ELTS];
- HOST_WIDE_INT ws2[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s1, *s2; /* Returned data */
- unsigned int cl1, cl2; /* array lengths */
- unsigned int p1, p2; /* precisions */
-
- s1 = to_shwi1 (ws1, &cl1, &p1, c1);
- s2 = to_shwi1 (ws2, &cl2, &p2, c2);
- check_precision (&p1, &p2, false, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT
- && p2 <= HOST_BITS_PER_WIDE_INT)
+ if (x.precision <= HOST_BITS_PER_WIDE_INT
+ && y.precision <= HOST_BITS_PER_WIDE_INT)
{
- HOST_WIDE_INT x0 = sext_hwi (s1[0], p1);
- HOST_WIDE_INT x1 = sext_hwi (s2[0], p2);
-
- if (x0 < x1)
- result = -1;
- else if (x0 > x1)
- result = 1;
+ HOST_WIDE_INT xl = sext_hwi (x.ulow (), x.precision);
+ HOST_WIDE_INT yl = sext_hwi (y.ulow (), y.precision);
+ if (xl < yl)
+ return -1;
+ else if (xl > yl)
+ return 1;
else
- result = 0;
+ return 0;
}
- else
- result = cmps_large (s1, cl1, p1, s2, cl2, p2);
-
-#ifdef DEBUG_WIDE_INT
- debug_vaa ("wide_int_ro:: %d = (%s cmps %s)\n", result, s1, cl1, p1, s2, cl2, p2);
-#endif
- return result;
+ return cmps_large (x.val, x.len, x.precision, y.val, y.len,
+ y.precision);
}
-/* Returns -1 if THIS < C, 0 if THIS == C and 1 if THIS > C using
- unsigned compares. */
-template <typename T>
-int
-wide_int_ro::cmpu (const T &c) const
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
+ as unsigned values. */
+inline int
+wi::cmpu (const wide_int_ref &x, const wide_int_ref &y)
{
- return wide_int_ro::cmpu (*this, c);
-}
-
-/* Returns -1 if C1 < C2, 0 if C1 == C2 and 1 if C1 > C2 using
- unsigned compares. */
-template <typename T1, typename T2>
-int
-wide_int_ro::cmpu (const T1 &c1, const T2 &c2)
-{
- int result;
- HOST_WIDE_INT ws1[WIDE_INT_MAX_ELTS];
- HOST_WIDE_INT ws2[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s1, *s2; /* Returned data */
- unsigned int cl1, cl2; /* array lengths */
- unsigned int p1, p2; /* precisions */
-
- s1 = to_shwi1 (ws1, &cl1, &p1, c1);
- s2 = to_shwi1 (ws2, &cl2, &p2, c2);
- check_precision (&p1, &p2, false, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT
- && p2 <= HOST_BITS_PER_WIDE_INT)
+ if (x.precision <= HOST_BITS_PER_WIDE_INT
+ && y.precision <= HOST_BITS_PER_WIDE_INT)
{
- unsigned HOST_WIDE_INT x0 = zext_hwi (s1[0], p1);
- unsigned HOST_WIDE_INT x1 = zext_hwi (s2[0], p2);
-
- if (x0 < x1)
- result = -1;
- else if (x0 == x1)
- result = 0;
+ unsigned HOST_WIDE_INT xl = zext_hwi (x.ulow (), x.precision);
+ unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision);
+ if (xl < yl)
+ return -1;
+ else if (xl == yl)
+ return 0;
else
- result = 1;
+ return 1;
}
- else
- result = cmpu_large (s1, cl1, p1, s2, cl2, p2);
-
-#ifdef DEBUG_WIDE_INT
- debug_vaa ("wide_int_ro:: %d = (%s cmpu %s)\n", result, s1, cl1, p1, s2, cl2, p2);
-#endif
-
- return result;
-}
-
-/* Return -1, 0 or 1 depending on how THIS compares with C.
- Signedness is indicated by SGN. */
-template <typename T>
-inline int
-wide_int_ro::cmp (const T &c, signop sgn) const
-{
- if (sgn == SIGNED)
- return cmps (c);
- else
- return cmpu (c);
+ return cmpu_large (x.val, x.len, x.precision, y.val, y.len,
+ y.precision);
}
-/* Return -1, 0 or 1 depending on how C1 compares with C2.
- Signedness is indicated by SGN. */
-template <typename T1, typename T2>
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of
+ X and Y indicated by SGN. */
inline int
-wide_int_ro::cmp (const T1 &c1, const T2 &c2, signop sgn)
+wi::cmp (const wide_int_ref &x, const wide_int_ref &y, signop sgn)
{
if (sgn == SIGNED)
- return wide_int_ro::cmps (c1, c2);
+ return cmps (x, y);
else
- return wide_int_ro::cmpu (c1, c2);
-}
-
-/* Return true if THIS has the sign bit set to 1 and all other bits
- are zero. */
-inline bool
-wide_int_ro::only_sign_bit_p () const
-{
- return only_sign_bit_p (precision);
-}
-
-/* Return true if THIS fits in a HOST_WIDE_INT with no loss of
- precision. */
-inline bool
-wide_int_ro::fits_shwi_p () const
-{
- return len == 1;
+ return cmpu (x, y);
}
-/* Return true if THIS fits in an unsigned HOST_WIDE_INT with no
- loss of precision. */
-inline bool
-wide_int_ro::fits_uhwi_p () const
+/* Return ~x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::bit_not (const T &x)
{
- return (precision <= HOST_BITS_PER_WIDE_INT)
- || (len == 1 && val[0] >= 0)
- || (len == 2 && (precision >= 2 * HOST_BITS_PER_WIDE_INT) && (val[1] == 0))
- || (len == 2 && (sext_hwi (val[1], precision & (HOST_BITS_PER_WIDE_INT - 1)) == 0));
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ wide_int_ref xi (x, get_precision (result));
+ for (unsigned int i = 0; i < xi.len; ++i)
+ val[i] = ~xi.val[i];
+ result.set_len (xi.len);
+ return result;
}
-/* Return the signed or unsigned min of THIS and C. */
+/* Return -x. */
template <typename T>
-inline wide_int_ro
-wide_int_ro::min (const T &c, signop sgn) const
+inline WI_UNARY_RESULT (T)
+wi::neg (const T &x)
{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
-
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (sgn == SIGNED)
- return lts_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false);
- else
- return ltu_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false);
+ return sub (0, x);
}
-/* Return the signed or unsigned min of THIS and OP1. */
-inline wide_int_ro
-wide_int_ro::min (const wide_int_ro &op1, signop sgn) const
+/* Return -x. Indicate in *OVERFLOW if X is the minimum signed value. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::neg (const T &x, bool *overflow)
{
- if (sgn == SIGNED)
- return lts_p (op1) ? (*this) : op1;
- else
- return ltu_p (op1) ? (*this) : op1;
+ *overflow = only_sign_bit_p (x);
+ return sub (0, x);
}
-/* Return the signed or unsigned max of THIS and C. */
+/* Return the absolute value of x. */
template <typename T>
-inline wide_int_ro
-wide_int_ro::max (const T &c, signop sgn) const
+inline WI_UNARY_RESULT (T)
+wi::abs (const T &x)
{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
-
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
- if (sgn == SIGNED)
- return gts_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false);
- else
- return gtu_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false);
+ if (neg_p (x))
+ return neg (x);
+ return x;
}
-/* Return the signed or unsigned max of THIS and OP1. */
-inline wide_int_ro
-wide_int_ro::max (const wide_int_ro &op1, signop sgn) const
+/* Return the result of sign-extending the low OFFSET bits of X. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::sext (const T &x, unsigned int offset)
{
- if (sgn == SIGNED)
- return gts_p (op1) ? (*this) : op1;
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ if (offset <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = sext_hwi (xi.ulow (), offset);
+ result.set_len (1);
+ }
else
- return gtu_p (op1) ? (*this) : op1;
+ result.set_len (sext_large (val, xi.val, xi.len, precision, offset));
+ return result;
}
-/* Return the signed min of THIS and C. */
+/* Return the result of zero-extending the low OFFSET bits of X. */
template <typename T>
-inline wide_int_ro
-wide_int_ro::smin (const T &c) const
+inline WI_UNARY_RESULT (T)
+wi::zext (const T &x, unsigned int offset)
{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
-
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- return lts_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false);
-}
-
-/* Return the signed min of THIS and OP1. */
-inline wide_int_ro
-wide_int_ro::smin (const wide_int_ro &op1) const
-{
- return lts_p (op1) ? (*this) : op1;
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ if (offset < HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = zext_hwi (xi.ulow (), offset);
+ result.set_len (1);
+ }
+ else
+ result.set_len (zext_large (val, xi.val, xi.len, precision, offset));
+ return result;
}
-/* Return the signed max of THIS and C. */
+/* Return the result of extending the low OFFSET bits of X according to
+ signedness SGN. */
template <typename T>
-inline wide_int_ro
-wide_int_ro::smax (const T &c) const
-{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
-
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- return gts_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false);
-}
-
-/* Return the signed max of THIS and OP1. */
-inline wide_int_ro
-wide_int_ro::smax (const wide_int_ro &op1) const
+inline WI_UNARY_RESULT (T)
+wi::ext (const T &x, unsigned int offset, signop sgn)
{
- return gts_p (op1) ? (*this) : op1;
+ return sgn == SIGNED ? sext (x, offset) : zext (x, offset);
}
-/* Return the unsigned min of THIS and C. */
+/* Return an integer that represents X | (1 << bit). */
template <typename T>
-inline wide_int_ro
-wide_int_ro::umin (const T &c) const
+inline WI_UNARY_RESULT (T)
+wi::set_bit (const T &x, unsigned int bit)
{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
-
- s = to_shwi1 (ws, &cl, &p2, c);
- return ltu_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false);
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () | ((unsigned HOST_WIDE_INT) 1 << bit);
+ result.set_len (1);
+ }
+ else
+ result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit));
+ return result;
}
-/* Return the unsigned min of THIS and OP1. */
-inline wide_int_ro
-wide_int_ro::umin (const wide_int_ro &op1) const
+/* Return the mininum of X and Y, treating them both as having
+ signedness SGN. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::min (const T1 &x, const T2 &y, signop sgn)
{
- return ltu_p (op1) ? (*this) : op1;
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ if (wi::le_p (x, y, sgn))
+ {
+ wide_int_ref xi (x, precision);
+ for (unsigned int i = 0; i < xi.len; ++i)
+ val[i] = xi.val[i];
+ result.set_len (xi.len);
+ }
+ else
+ {
+ wide_int_ref yi (y, precision);
+ for (unsigned int i = 0; i < yi.len; ++i)
+ val[i] = yi.val[i];
+ result.set_len (yi.len);
+ }
+ return result;
}
-/* Return the unsigned max of THIS and C. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::umax (const T &c) const
+/* Return the minimum of X and Y, treating both as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smin (const T1 &x, const T2 &y)
{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
-
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- return gtu_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false);
+ return min (x, y, SIGNED);
}
-/* Return the unsigned max of THIS and OP1. */
-inline wide_int_ro
-wide_int_ro::umax (const wide_int_ro &op1) const
+/* Return the minimum of X and Y, treating both as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umin (const T1 &x, const T2 &y)
{
- return gtu_p (op1) ? (*this) : op1;
+ return min (x, y, UNSIGNED);
}
-/* Return THIS extended to PREC. The signedness of the extension is
- specified by SGN. */
-inline wide_int_ro
-wide_int_ro::ext (unsigned int prec, signop sgn) const
+/* Return the maxinum of X and Y, treating them both as having
+ signedness SGN. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::max (const T1 &x, const T2 &y, signop sgn)
{
- if (sgn == UNSIGNED)
- return zext (prec);
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ if (wi::ge_p (x, y, sgn))
+ {
+ wide_int_ref xi (x, precision);
+ for (unsigned int i = 0; i < xi.len; ++i)
+ val[i] = xi.val[i];
+ result.set_len (xi.len);
+ }
else
- return sext (prec);
+ {
+ wide_int_ref yi (y, precision);
+ for (unsigned int i = 0; i < yi.len; ++i)
+ val[i] = yi.val[i];
+ result.set_len (yi.len);
+ }
+ return result;
}
-/* Return THIS forced to the size PREC. This is sign extended if
- needed. */
-inline wide_int_ro
-wide_int_ro::sforce_to_size (unsigned int prec) const
+/* Return the maximum of X and Y, treating both as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smax (const T1 &x, const T2 &y)
{
- return force_to_size (prec, SIGNED);
+ return max (x, y, SIGNED);
}
-/* Return THIS forced to the size PREC. This is zero extended if
- needed. */
-inline wide_int_ro
-wide_int_ro::zforce_to_size (unsigned int prec) const
+/* Return the maximum of X and Y, treating both as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umax (const T1 &x, const T2 &y)
{
- return force_to_size (prec, UNSIGNED);
+ return max (x, y, UNSIGNED);
}
-/* Produce 0 or -1 that is the smear of the sign bit. */
-inline HOST_WIDE_INT
-wide_int_ro::sign_mask () const
-{
- if (precision < HOST_BITS_PER_WIDE_INT)
+/* Return X & Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_and (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (xi.len + yi.len == 2)
{
- /* We don't allow a int:0 inside a struct to get this far,
- nor a value of indefinite precision. */
- gcc_assert (precision != 0);
- return ((val[0] << (HOST_BITS_PER_WIDE_INT - precision))
- >> (HOST_BITS_PER_WIDE_INT - 1));
+ val[0] = xi.ulow () & yi.ulow ();
+ result.set_len (1);
}
-
- /* TREE_VRP is not able to see that it is not possible for len to be
- 0. So without this test, it warns about this which causes
- bootstrap failures. */
- if (len < 1)
- gcc_unreachable ();
else
- return val[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1);
+ result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision));
+ return result;
}
-/* Return THIS & C. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::operator & (const T &c) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
+/* Return X & ~Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_and_not (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (xi.len + yi.len == 2)
{
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] & s[0];
+ val[0] = xi.ulow () & ~yi.ulow ();
+ result.set_len (1);
}
else
- result = and_large (val, len, p1, s, cl);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s & %s)\n", result, *this, s, cl, p2);
-#endif
+ result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision));
return result;
}
-/* Return THIS & ~C. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::and_not (const T &c) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
+/* Return X | Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_or (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] & ~s[0];
+ val[0] = xi.ulow () | yi.ulow ();
+ result.set_len (1);
}
else
- result = and_not_large (val, len, p1, s, cl);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s &~ %s)\n", result, *this, s, cl, p2);
-#endif
+ result.set_len (or_large (val, xi.val, xi.len, yi.val, yi.len, precision));
return result;
}
-/* Return the logical negation (bitwise complement) of THIS. */
-inline wide_int_ro
-wide_int_ro::operator ~ () const
-{
- wide_int_ro result;
- int l0 = len - 1;
-
- result.len = len;
- result.precision = precision;
-
- while (l0 >= 0)
+/* Return X | ~Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_or_not (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (xi.len + yi.len == 2)
{
- result.val[l0] = ~val[l0];
- l0--;
+ val[0] = xi.ulow () | ~yi.ulow ();
+ result.set_len (1);
}
-
-#ifdef DEBUG_WIDE_INT
- debug_ww ("wide_int_ro:: %s = (~ %s)\n", result, *this);
-#endif
+ else
+ result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision));
return result;
}
-/* Return THIS | C. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::operator | (const T &c) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
+/* Return X ^ Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_xor (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (xi.len + yi.len == 2)
{
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] | s[0];
+ val[0] = xi.ulow () ^ yi.ulow ();
+ result.set_len (1);
}
else
- result = or_large (val, len, p1, s, cl);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s | %s)\n", result, *this, s, cl, p2);
-#endif
+ result.set_len (xor_large (val, xi.val, xi.len, yi.val, yi.len, precision));
return result;
}
-/* Return THIS | ~C. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::or_not (const T &c) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
+/* Return X + Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::add (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] | ~s[0];
+ val[0] = xi.ulow () + yi.ulow ();
+ result.set_len (1);
}
else
- result = or_not_large (val, len, p1, s, cl);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s |~ %s)\n", result, *this, s, cl, p2);
-#endif
+ result.set_len (add_large (val, xi.val, xi.len, yi.val, yi.len, precision,
+ UNSIGNED, 0));
return result;
}
-/* Return THIS ^ C. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::operator ^ (const T &c) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
+/* Return X + Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] ^ s[0];
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl + yl;
+ if (precision == 0)
+ *overflow = false;
+ else if (sgn == SIGNED)
+ *overflow = (((resultl ^ xl) & (resultl ^ yl)) >> (precision - 1)) & 1;
+ else
+ *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ < (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ val[0] = resultl;
+ result.set_len (1);
}
else
- result = xor_large (val, len, p1, s, cl);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s ^ %s)\n", result, *this, s, cl, p2);
-#endif
+ result.set_len (add_large (val, xi.val, xi.len, yi.val, yi.len, precision,
+ sgn, overflow));
return result;
}
-/* Return THIS + C. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::operator + (const T &c) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
+/* Return X - Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sub (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] + s[0];
- if (precision < HOST_BITS_PER_WIDE_INT)
- result.val[0] = sext_hwi (result.val[0], p1);
+ val[0] = xi.ulow () - yi.ulow ();
+ result.set_len (1);
}
else
- result = add_large (val, len, p1, s, cl, UNSIGNED, 0);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s + %s)\n", result, *this, s, cl, p2);
-#endif
+ result.set_len (sub_large (val, xi.val, xi.len, yi.val, yi.len, precision,
+ UNSIGNED, 0));
return result;
}
-/* Return THIS + C. OVERFLOW is set based on the sign of the
- operation that is specified in SGN. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::add (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
+/* Return X - Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] + s[0];
- if (p1 < HOST_BITS_PER_WIDE_INT)
- result.val[0] = sext_hwi (result.val[0], p1);
- if (sgn == SIGNED)
- {
- HOST_WIDE_INT x
- = (((result.val[0] ^ val[0]) & (result.val[0] ^ s[0]))
- >> (p1 - 1)) & 1;
- *overflow = (x != 0);
- }
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl - yl;
+ if (precision == 0)
+ *overflow = false;
+ else if (sgn == SIGNED)
+ *overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1;
else
- *overflow = ((unsigned HOST_WIDE_INT) result.val[0]
- < (unsigned HOST_WIDE_INT) val[0]);
- }
+ *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ > (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ val[0] = resultl;
+ result.set_len (1);
+ }
else
- result = add_large (val, len, p1, s, cl, sgn, overflow);
-
-#ifdef DEBUG_WIDE_INT
- debug_waav ("wide_int_ro:: %s = (%s + %s) O=%d\n",
- result, val, len, p1, s, cl, p1, *overflow);
-#endif
+ result.set_len (sub_large (val, xi.val, xi.len, yi.val, yi.len, precision,
+ sgn, overflow));
return result;
}
-/* Multiply THIS and C. The result is the same precision as the operands,
- so there is no reason for signed or unsigned versions. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::operator * (const T &c) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- bool overflow = false;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
+/* Return X * Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
{
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] * s[0];
- if (precision < HOST_BITS_PER_WIDE_INT)
- result.val[0] = sext_hwi (result.val[0], precision);
+ val[0] = xi.ulow () * yi.ulow ();
+ result.set_len (1);
}
else
- result = mul_internal (false, false,
- val, len, p1,
- s, cl, UNSIGNED, &overflow, false);
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s * %s)\n", result, *this, s, cl, p2);
-#endif
+ result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len,
+ precision, UNSIGNED, 0, false, false));
return result;
}
-/* Multiply THIS and C. The signedness is specified with SGN.
- OVERFLOW is set true if the result overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::mul (const T &c, signop sgn, bool *overflow) const
-{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- if (overflow)
- *overflow = false;
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- return mul_internal (false, false,
- val, len, p1,
- s, cl, sgn, overflow, true);
-}
-
-/* Signed multiply THIS and C. The result is the same precision
- as the operands. OVERFLOW is set true if the result overflows,
- false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::smul (const T &c, bool *overflow) const
-{
- return mul (c, SIGNED, overflow);
-}
-
-/* Unsigned multiply THIS and C. The result is the same precision
- as the operands. OVERFLOW is set true if the result overflows,
- false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::umul (const T &c, bool *overflow) const
-{
- return mul (c, UNSIGNED, overflow);
-}
-
-/* Multiply THIS and C. The signedness is specified with SGN.
- The result is twice the precision of the operands. The signedness
- is specified with SGN. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::mul_full (const T &c, signop sgn) const
-{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- return mul_internal (false, true,
- val, len, p1,
- s, cl, sgn, 0, false);
-}
-
-/* Signed multiply THIS and C. The result is twice the precision
- of the operands. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::smul_full (const T &c) const
-{
- return mul_full (c, SIGNED);
-}
-
-/* Unsigned multiply THIS and C. The result is twice the precision
- of the operands. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::umul_full (const T &c) const
-{
- return mul_full (c, UNSIGNED);
-}
-
-/* Multiply THIS and C and return the high part of that result.
- The signedness is specified with SGN. The result is the same
- precision as the operands. The mode is the same mode as the
- operands. The signedness is specified with SGN. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::mul_high (const T &c, signop sgn) const
-{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- return mul_internal (true, false,
- val, len, p1,
- s, cl, sgn, 0, false);
+/* Return X * Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, precision,
+ sgn, overflow, false, false));
+ return result;
}
-/* Negate THIS. */
-inline wide_int_ro
-wide_int_ro::operator - () const
+/* Return X * Y, treating both X and Y as signed values. Indicate in
+ *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smul (const T1 &x, const T2 &y, bool *overflow)
{
- wide_int_ro r;
- r = wide_int_ro (0) - *this;
- return r;
+ return mul (x, y, SIGNED, overflow);
}
-/* Negate THIS. OVERFLOW is set true if the value cannot be negated,
- false otherwise. */
-inline wide_int_ro
-wide_int_ro::neg (bool *overflow) const
+/* Return X * Y, treating both X and Y as unsigned values. Indicate in
+ *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umul (const T1 &x, const T2 &y, bool *overflow)
{
- gcc_checking_assert (precision);
-
- *overflow = only_sign_bit_p ();
-
- return wide_int_ro (0) - *this;
+ return mul (x, y, UNSIGNED, overflow);
}
-/* Return THIS - C. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::operator - (const T &c) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
- {
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] - s[0];
- if (p1 < HOST_BITS_PER_WIDE_INT)
- result.val[0] = sext_hwi (result.val[0], p1);
- }
- else
- result = sub_large (val, len, p1, s, cl, UNSIGNED, 0);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s - %s)\n", result, *this, s, cl, p2);
-#endif
- return result;
-}
-
-/* Return THIS - C. OVERFLOW is set based on the sign of the
- operation that is specified in SGN. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::sub (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, true, true);
-
- if (p1 <= HOST_BITS_PER_WIDE_INT)
- {
- result.len = 1;
- result.precision = p1;
- result.val[0] = val[0] - s[0];
- if (p1 < HOST_BITS_PER_WIDE_INT)
- result.val[0] = sext_hwi (result.val[0], p1);
- if (sgn == SIGNED)
- {
- HOST_WIDE_INT x
- = (((val[0] ^ s[0]) & (result.val[0] ^ val[0]))
- >> (p1 - 1)) & 1;
- *overflow = (x != 0);
- }
- else
- *overflow = ((unsigned HOST_WIDE_INT) result.val[0]
- > (unsigned HOST_WIDE_INT) val[0]);
- }
- else
- result = sub_large (val, len, p1, s, cl, sgn, overflow);
-
-#ifdef DEBUG_WIDE_INT
- debug_waav ("wide_int_ro:: %s = (%s - %s) O=%d\n",
- result, val, len, p1, s, cl, p1, *overflow);
-#endif
+/* Perform a widening multiplication of X and Y, extending the values
+ according to SGN, and return the high part of the result. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul_high (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y, precision);
+ result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, precision,
+ sgn, 0, true, false));
return result;
}
-/* Divide DIVISOR into THIS. The result is the same size as the
- operands. The sign is specified in SGN. The output is truncated.
- If the pointer to OVERFLOW is not 0, OVERFLOW is set to true if
- the result overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::div_trunc (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro remainder;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- return divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- &remainder, false, overflow);
+/* Return X / Y, rouding towards 0. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
+
+ quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
+ precision,
+ yi.val, yi.len, yi.precision,
+ sgn, overflow));
+ return quotient;
}
-/* Signed divide with truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::sdiv_trunc (const T &c) const
+/* Return X / Y, rouding towards 0. Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sdiv_trunc (const T1 &x, const T2 &y)
{
- return div_trunc (c, SIGNED);
+ return div_trunc (x, y, SIGNED);
}
-/* Unsigned divide with truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::udiv_trunc (const T &c) const
+/* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_trunc (const T1 &x, const T2 &y)
{
- return div_trunc (c, UNSIGNED);
+ return div_trunc (x, y, UNSIGNED);
}
-/* Divide DIVISOR into THIS. The result is the same size as the operands.
- The sign is specified in SGN. The output is floor truncated. If the
- pointer to OVERFLOW is not 0, OVERFLOW is set to true if the result
- overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::div_floor (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro remainder;
- wide_int_ro quotient;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- return divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- &remainder, false, overflow);
+/* Return X / Y, rouding towards -inf. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val, &remainder_len,
+ remainder_val, xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+ if (wi::neg_p (quotient, sgn) && remainder != 0)
+ return quotient + 1;
+ return quotient;
}
-/* Unsigned divide with floor truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::udiv_floor (const T &c) const
+/* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sdiv_floor (const T1 &x, const T2 &y)
{
- return div_floor (c, UNSIGNED);
+ return div_floor (x, y, SIGNED);
}
-/* Signed divide with floor truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::sdiv_floor (const T &c) const
+/* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */
+/* ??? Why do we have both this and udiv_trunc. Aren't they the same? */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_floor (const T1 &x, const T2 &y)
{
- return div_floor (c, SIGNED);
+ return div_floor (x, y, UNSIGNED);
}
-/* Divide DIVISOR into THIS. The result is the same size as the operands.
- The sign is specified in SGN. The output is ceil truncated. If the
- pointer to OVERFLOW is not 0, OVERFLOW is set to true if the result
- overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::div_ceil (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro remainder;
- wide_int_ro quotient;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- &remainder, true, overflow);
-
- if (!quotient.neg_p (sgn) && !remainder.zero_p ())
+/* Return X / Y, rouding towards +inf. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val, &remainder_len,
+ remainder_val, xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+ if (!wi::neg_p (quotient, sgn) && remainder != 0)
return quotient + 1;
return quotient;
}
-/* Divide DIVISOR into THIS. The result is the same size as the operands.
- The sign is specified in SGN. The output is round truncated. If the
- pointer to OVERFLOW is not 0, OVERFLOW is set to true if the result
- overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::div_round (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro remainder;
- wide_int_ro quotient;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- &remainder, true, overflow);
- if (!remainder.zero_p ())
+/* Return X / Y, rouding towards nearest with ties away from zero.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the result overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val, &remainder_len,
+ remainder_val, xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (remainder != 0)
{
- wide_int_ro divisor = wide_int_ro::from_array (s, cl, precision);
if (sgn == SIGNED)
{
- wide_int_ro p_remainder
- = remainder.neg_p () ? -remainder : remainder;
- wide_int_ro p_divisor = divisor.neg_p () ? -divisor : divisor;
- p_divisor = p_divisor.rshiftu_large (1);
-
- if (p_divisor.gts_p (p_remainder))
+ if (wi::gts_p (wi::lrshift (wi::abs (y), 1),
+ wi::abs (remainder)))
{
- if (quotient.neg_p ())
+ if (wi::neg_p (quotient))
return quotient - 1;
else
return quotient + 1;
@@ -2436,1782 +2205,525 @@ wide_int_ro::div_round (const T &c, signop sgn, bool *overflow) const
}
else
{
- wide_int_ro p_divisor = divisor.rshiftu_large (1);
- if (p_divisor.gtu_p (remainder))
+ if (wi::gtu_p (wi::lrshift (y, 1), remainder))
return quotient + 1;
}
}
return quotient;
}
-/* Divide DIVISOR into THIS producing both the quotient and remainder.
- The result is the same size as the operands. The sign is specified
- in SGN. The output is truncated. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::divmod_trunc (const T &c, wide_int_ro *remainder,
- signop sgn) const
-{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- return divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- remainder, true, 0);
-}
-
-/* Signed divide/mod with truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::sdivmod_trunc (const T &c, wide_int_ro *mod) const
-{
- return divmod_trunc (c, mod, SIGNED);
-}
-
-/* Unsigned divide/mod with truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::udivmod_trunc (const T &c, wide_int_ro *mod) const
-{
- return divmod_trunc (c, mod, UNSIGNED);
-}
-
-/* Divide DIVISOR into THIS. The remainder is also produced in
- REMAINDER. The result is the same size as the operands.
- The sign is specified in SGN. The outputs is floor truncated. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::divmod_floor (const T &c, wide_int_ro *remainder,
- signop sgn) const
-{
- wide_int_ro quotient;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- remainder, true, 0);
- if (quotient.neg_p (sgn) && !(*remainder).zero_p ())
- {
- *remainder = *remainder + wide_int_ro::from_array (s, cl, precision);
- return quotient - 1;
- }
+/* Return X / Y, rouding towards nearest with ties away from zero.
+ Treat X and Y as having the signedness given by SGN. Store the
+ remainder in *REMAINDER_PTR. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
+ WI_BINARY_RESULT (T1, T2) *remainder_ptr)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val, &remainder_len,
+ remainder_val, xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn, 0));
+ remainder.set_len (remainder_len);
+
+ *remainder_ptr = remainder;
return quotient;
}
-/* Signed divide/mod with floor truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::sdivmod_floor (const T &c, wide_int_ro *mod) const
-{
- return divmod_floor (c, mod, SIGNED);
-}
-
-/* Divide DIVISOR into THIS producing the remainder. The result is
- the same size as the operands. The sign is specified in SGN. The
- output is adjusted to be compatible with truncating divide. If the
- pointer to OVERFLOW is not 0, OVERFLOW is set to true if the result
- overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::mod_trunc (const T &c, signop sgn, bool *overflow) const
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
{
- wide_int_ro remainder;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (remainder);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
+ unsigned int remainder_len;
+ divmod_internal (0, &remainder_len, remainder_val, xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn, overflow);
+ remainder.set_len (remainder_len);
- divmod_internal (false, val, len, p1, s, cl, p2, sgn,
- &remainder, true, overflow);
return remainder;
}
-/* Signed mod with truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::smod_trunc (const T &c) const
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smod_trunc (const T1 &x, const T2 &y)
{
- return mod_trunc (c, SIGNED);
+ return mod_trunc (x, y, SIGNED);
}
-/* Unsigned mod with truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::umod_trunc (const T &c) const
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umod_trunc (const T1 &x, const T2 &y)
{
- return mod_trunc (c, UNSIGNED);
+ return mod_trunc (x, y, UNSIGNED);
}
-/* Divide DIVISOR into THIS producing the remainder. The result is
- the same size as the operands. The sign is specified in SGN. The
- output is adjusted to be compatible with floor divide. OVERFLOW is
- set to true if the result overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::mod_floor (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro remainder;
- wide_int_ro quotient;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- &remainder, true, overflow);
-
- if (quotient.neg_p (sgn) && !remainder.zero_p ())
- return remainder + wide_int_ro::from_array (s, cl, precision);
+/* Compute X / Y, rouding towards -inf, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val, &remainder_len,
+ remainder_val, xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (wi::neg_p (quotient, sgn) && remainder != 0)
+ return remainder + y;
return remainder;
}
-/* Unsigned mod with floor truncation of result. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::umod_floor (const T &c) const
+/* Compute X / Y, rouding towards -inf, and return the remainder.
+ Treat X and Y as unsigned values. */
+/* ??? Why do we have both this and umod_trunc. Aren't they the same? */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umod_floor (const T1 &x, const T2 &y)
{
- return mod_floor (c, UNSIGNED);
+ return mod_floor (x, y, UNSIGNED);
}
-/* Divide DIVISOR into THIS producing the remainder. The result is
- the same size as the operands. The sign is specified in SGN. The
- output is adjusted to be compatible with ceil divide. If the
- pointer to OVERFLOW is not 0, OVERFLOW is set to true if the result
- overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::mod_ceil (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro remainder;
- wide_int_ro quotient;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- &remainder, true, overflow);
-
- if (!quotient.neg_p (sgn) && !remainder.zero_p ())
- return remainder - wide_int_ro::from_array (s, cl, precision);
+/* Compute X / Y, rouding towards +inf, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val, &remainder_len,
+ remainder_val, xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (!wi::neg_p (quotient, sgn) && remainder != 0)
+ return remainder - y;
return remainder;
}
-/* Divide DIVISOR into THIS producing the remainder. The result is
- the same size as the operands. The sign is specified in SGN. The
- output is adjusted to be compatible with rounding divide. OVERFLOW
- is set to true if the result overflows, false otherwise. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::mod_round (const T &c, signop sgn, bool *overflow) const
-{
- wide_int_ro remainder;
- wide_int_ro quotient;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- unsigned int p1, p2;
-
- p1 = precision;
- s = to_shwi1 (ws, &cl, &p2, c);
- check_precision (&p1, &p2, false, true);
-
- quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn,
- &remainder, true, overflow);
-
- if (!remainder.zero_p ())
+/* Compute X / Y, rouding towards nearest with ties away from zero,
+ and return the remainder. Treat X and Y as having the signedness
+ given by SGN. Indicate in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ wide_int_ref xi (x, precision);
+ wide_int_ref yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val, &remainder_len,
+ remainder_val, xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (remainder != 0)
{
- wide_int_ro divisor = wide_int_ro::from_array (s, cl, precision);
if (sgn == SIGNED)
{
- wide_int_ro p_remainder = (remainder.neg_p ()
- ? -remainder : remainder);
- wide_int_ro p_divisor = divisor.neg_p () ? -divisor : divisor;
- p_divisor = p_divisor.rshiftu_large (1);
-
- if (p_divisor.gts_p (p_remainder))
+ if (wi::gts_p (wi::lrshift (wi::abs (y), 1),
+ wi::abs (remainder)))
{
- if (quotient.neg_p ())
- return remainder + divisor;
+ if (wi::neg_p (quotient))
+ return remainder + y;
else
- return remainder - divisor;
+ return remainder - y;
}
}
else
{
- wide_int_ro p_divisor = divisor.rshiftu_large (1);
- if (p_divisor.gtu_p (remainder))
- return remainder - divisor;
+ if (wi::gtu_p (wi::lrshift (y, 1), remainder))
+ return remainder - y;
}
}
return remainder;
}
-/* Left shift THIS by C. C must be non-negative. BITSIZE is the
- width of *THIS used for truncating the shift amount. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::lshift (const T &c, unsigned int bitsize) const
+/* Return true if X is a multiple of Y, storing X / Y in *RES if so.
+ Treat X and Y as having the signedness given by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
+ WI_BINARY_RESULT (T1, T2) *res)
{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- HOST_WIDE_INT shift;
-
- s = to_shwi2 (ws, &cl, c);
-
- gcc_checking_assert (precision);
-
- shift = trunc_shift (s, bitsize);
- if (shift == -1)
- result = wide_int_ro::zero (precision);
- else if (shift == 0)
- result = *this;
- /* Handle the simple case quickly. */
- else if (precision <= HOST_BITS_PER_WIDE_INT)
+ WI_BINARY_RESULT (T1, T2) remainder;
+ WI_BINARY_RESULT (T1, T2) quotient = divmod_trunc (x, y, sgn, &remainder);
+ if (remainder == 0)
{
- result.precision = precision;
- result.len = 1;
- result.val[0] = val[0] << shift;
+ *res = quotient;
+ return true;
}
- else
- result = lshift_large (shift, precision);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s << %s)\n", result, *this, s, cl, 0);
-#endif
- return result;
+ return false;
}
-/* Left shift THIS by C into an expanded value with RES_PREC precision.
- C must be non-negative. This function is only available for the default
- wide-int form. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::lshift_widen (const T &c, unsigned int res_prec) const
+/* Truncate the value of shift value X so that the value is within BITSIZE.
+ PRECISION is the number of bits in the value being shifted. */
+inline unsigned int
+wi::trunc_shift (const wide_int_ref &x, unsigned int bitsize,
+ unsigned int precision)
{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- HOST_WIDE_INT shift;
-
- s = to_shwi2 (ws, &cl, c);
-
- gcc_checking_assert (precision);
- gcc_checking_assert (res_prec);
-
- shift = s[0];
-
- gcc_checking_assert (shift >= 0);
-
- if (shift == 0 && res_prec == precision)
- result = *this;
- /* Handle the simple case quickly. */
- else if (res_prec <= HOST_BITS_PER_WIDE_INT)
+ if (bitsize == 0)
{
- result.precision = res_prec;
- result.len = 1;
- result.val[0] = val[0] << shift;
+ gcc_checking_assert (!neg_p (x));
+ if (geu_p (x, precision))
+ return precision;
}
- else
- result = lshift_large (shift, res_prec);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s <<W %s)\n", result, *this, s, cl, 0);
-#endif
- return result;
+ /* Flush out undefined bits. */
+ unsigned int shift = x.ulow ();
+ if (x.precision < HOST_BITS_PER_WIDE_INT)
+ shift = zext_hwi (shift, x.precision);
+ return shift & (bitsize - 1);
}
-/* Rotate THIS left by C within PREC. If PREC is 0, the precsion of
- THIS is used for PREC. The result is the precision of THIS. */
+/* Return X << Y. If BITSIZE is nonzero, only use the low BITSIZE bits
+ of Y. */
template <typename T>
-inline wide_int_ro
-wide_int_ro::lrotate (const T &c, unsigned int prec) const
+inline WI_UNARY_RESULT (T)
+wi::lshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
-
- s = to_shwi2 (ws, &cl, c);
-
- return lrotate ((unsigned HOST_WIDE_INT) s[0], prec);
-}
-
-/* Rotate THIS left by CNT within PREC. If PREC is 0, the precsion
- of THIS is used for PREC. CNT must be non-negative. The result
- is the precision of the THIS. */
-inline wide_int_ro
-wide_int_ro::lrotate (unsigned HOST_WIDE_INT cnt, unsigned int prec) const
-{
- wide_int_ro left, right, result;
-
- gcc_checking_assert (precision);
-
- if (prec == 0)
- prec = precision;
-
- left = lshift (cnt);
- right = rshiftu (prec - cnt);
-
- if (prec != precision)
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ wide_int_ref xi (x, precision);
+ unsigned int shift = trunc_shift (y, bitsize, precision);
+ /* Handle the simple cases quickly. */
+ if (shift >= precision)
{
- left = left.zforce_to_size (precision);
- right = right.zforce_to_size (precision);
+ val[0] = 0;
+ result.set_len (1);
}
- result = left | right;
-
- return result;
-}
-
-/* Right shift THIS by C. BITSIZE is the width of *THIS used for
- truncating the shift amount. SGN indicates the sign. C must be
- non-negative. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::rshift (const T &c, signop sgn, unsigned int bitsize) const
-{
- if (sgn == UNSIGNED)
- return rshiftu (c, bitsize);
- else
- return rshifts (c, bitsize);
-}
-
-/* Unsigned right shift THIS by C. C must be non-negative. BITSIZE
- is width of *THIS used for truncating the shift amount. */
-template <typename T>
-inline wide_int_ro
-wide_int_ro::rshiftu (const T &c, unsigned int bitsize) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- HOST_WIDE_INT shift;
-
- s = to_shwi2 (ws, &cl, c);
- gcc_checking_assert (precision);
- shift = trunc_shift (s, bitsize);
-
- if (shift == 0)
- result = *this;
- else if (shift == -1)
- result = wide_int_ro::zero (precision);
else if (precision <= HOST_BITS_PER_WIDE_INT)
{
- /* Handle the simple case quickly. */
- unsigned HOST_WIDE_INT x = val[0];
-
- result.precision = precision;
- result.len = 1;
-
- if (precision < HOST_BITS_PER_WIDE_INT)
- x = zext_hwi (x, precision);
-
- result.val[0] = x >> shift;
+ val[0] = xi.ulow () << shift;
+ result.set_len (1);
}
else
- result = rshiftu_large (shift);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s >>U %s)\n", result, *this, s, cl, 0);
-#endif
+ result.set_len (lshift_large (val, xi.val, xi.len, precision, shift));
return result;
}
-/* Signed right shift THIS by C. C must be non-negative, BITSIZE is
- the width of *THIS used for truncating the shift amount. */
+/* Return X >> Y, using a logical shift. If BITSIZE is nonzero, only use
+ the low BITSIZE bits of Y. */
template <typename T>
-inline wide_int_ro
-wide_int_ro::rshifts (const T &c, unsigned int bitsize) const
-{
- wide_int_ro result;
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
- HOST_WIDE_INT shift;
-
- s = to_shwi2 (ws, &cl, c);
- gcc_checking_assert (precision);
- shift = trunc_shift (s, bitsize);
-
- if (shift == 0)
- result = *this;
- else if (shift == -1)
- result = wide_int_ro::zero (precision);
- else if (precision < HOST_BITS_PER_WIDE_INT)
+inline WI_UNARY_RESULT (T)
+wi::lrshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ /* Do things in the precision of the input rather than the output,
+ since the result can be no larger than that. */
+ wide_int_ref xi (x);
+ unsigned int shift = trunc_shift (y, bitsize, xi.precision);
+ /* Handle the simple cases quickly. */
+ if (shift >= xi.precision)
{
- /* Handle the simple case quickly. */
- HOST_WIDE_INT x = val[0];
-
- result.precision = precision;
- result.len = 1;
- x = x << (HOST_BITS_PER_WIDE_INT - precision);
- result.val[0] = x >> (shift + HOST_BITS_PER_WIDE_INT - precision);
+ val[0] = 0;
+ result.set_len (1);
}
- else if (precision == HOST_BITS_PER_WIDE_INT)
+ else if (xi.precision <= HOST_BITS_PER_WIDE_INT)
{
- HOST_WIDE_INT x = val[0];
-
- result.precision = precision;
- result.len = 1;
- result.val[0] = x >> shift;
+ val[0] = zext_hwi (xi.ulow (), xi.precision) >> shift;
+ result.set_len (1);
}
else
- result = rshifts_large (shift);
-
-#ifdef DEBUG_WIDE_INT
- debug_wwa ("wide_int_ro:: %s = (%s >>S %s)\n", result, *this, s, cl, 0);
-#endif
+ result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision,
+ get_precision (result), shift));
return result;
}
-/* Rotate THIS right by C within PREC. If PREC is 0, the precsion
- of THIS is used for PREC. The result has the precision of THIS. */
+/* Return X >> Y, using an arithmetic shift. If BITSIZE is nonzero, only use
+ the low BITSIZE bits of Y. */
template <typename T>
-inline wide_int_ro
-wide_int_ro::rrotate (const T &c, unsigned int prec) const
-{
- HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS];
- const HOST_WIDE_INT *s;
- unsigned int cl;
-
- s = to_shwi2 (ws, &cl, c);
- return rrotate ((unsigned HOST_WIDE_INT) s[0], prec);
-}
-
-/* Rotate THIS left by CNT within PREC. If PREC is 0, the precsion
- of THIS is used for PREC. The result has the precision of THIS.
- CNT must be non-negative. */
-inline wide_int_ro
-wide_int_ro::rrotate (unsigned HOST_WIDE_INT cnt, unsigned int prec) const
+inline WI_UNARY_RESULT (T)
+wi::arshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
{
- wide_int_ro left, right, result;
-
- gcc_checking_assert (precision);
-
- if (prec == 0)
- prec = precision;
-
- left = lshift (prec - cnt);
- right = rshiftu (cnt);
-
- if (prec != precision)
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ /* Do things in the precision of the input rather than the output,
+ since the result can be no larger than that. */
+ wide_int_ref xi (x);
+ unsigned int shift = trunc_shift (y, bitsize, xi.precision);
+ /* Handle the simple case quickly. */
+ if (shift >= xi.precision)
{
- left = left.zforce_to_size (precision);
- right = right.zforce_to_size (precision);
+ val[0] = sign_mask (x);
+ result.set_len (1);
}
- result = left | right;
-
- return result;
-}
-
-/* Truncate the value of the shift so that the value is within the
- BITSIZE. */
-inline int
-wide_int_ro::trunc_shift (const HOST_WIDE_INT *cnt, unsigned int bitsize) const
-{
- gcc_checking_assert (cnt[0] >= 0);
-
- if (bitsize == 0)
- return cnt[0];
- else
- return cnt[0] & (bitsize - 1);
-}
-
-template <typename T>
-inline bool
-wide_int_ro::top_bit_set (T x)
-{
- return (x >> (sizeof (x)*8 - 1)) != 0;
-}
-
-/* The following template and its overrides are used for the first
- and second operand of static binary comparison functions.
- These have been implemented so that pointer copying is done
- from the rep of the operands rather than actual data copying.
- This is safe even for garbage collected objects since the value
- is immediately throw away.
-
- This template matches all integers. */
-template <typename T>
-inline const HOST_WIDE_INT *
-wide_int_ro::to_shwi1 (HOST_WIDE_INT *s, unsigned int *l, unsigned int *p,
- const T &x)
-{
- s[0] = x;
- if (signedp (x)
- || sizeof (T) < sizeof (HOST_WIDE_INT)
- || ! top_bit_set (x))
- *l = 1;
- else
+ else if (xi.precision <= HOST_BITS_PER_WIDE_INT)
{
- s[1] = 0;
- *l = 2;
+ val[0] = sext_hwi (zext_hwi (xi.ulow (), xi.precision) >> shift,
+ xi.precision - shift);
+ result.set_len (1);
}
- *p = 0;
- return s;
+ else
+ result.set_len (arshift_large (val, xi.val, xi.len, xi.precision,
+ get_precision (result), shift));
+ return result;
}
-/* The following template and its overrides are used for the second
- operand of binary functions. These have been implemented so that
- pointer copying is done from the rep of the second operand rather
- than actual data copying. This is safe even for garbage collected
- objects since the value is immediately throw away.
-
- The next template matches all integers. */
+/* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a logical
+ shift otherwise. If BITSIZE is nonzero, only use the low BITSIZE bits
+ of Y. */
template <typename T>
-inline const HOST_WIDE_INT *
-wide_int_ro::to_shwi2 (HOST_WIDE_INT *s, unsigned int *l, const T &x)
+inline WI_UNARY_RESULT (T)
+wi::rshift (const T &x, const wide_int_ref &y, signop sgn,
+ unsigned int bitsize)
{
- s[0] = x;
- if (signedp (x)
- || sizeof (T) < sizeof (HOST_WIDE_INT)
- || ! top_bit_set (x))
- *l = 1;
+ if (sgn == UNSIGNED)
+ return lrshift (x, y, bitsize);
else
- {
- s[1] = 0;
- *l = 2;
- }
- return s;
-}
-
-inline wide_int::wide_int () {}
-
-inline wide_int::wide_int (const wide_int_ro &r)
-{
- static_cast <wide_int_ro &> (*this) = r;
-}
-
-/* Convert an INTEGER_CST into a wide int. */
-inline wide_int::wide_int (const_tree tcst)
-{
- *this = from_array (&TREE_INT_CST_ELT (tcst, 0),
- TREE_INT_CST_NUNITS (tcst),
- TYPE_PRECISION (TREE_TYPE (tcst)), false);
-}
-
-inline wide_int::wide_int (HOST_WIDE_INT op0)
-{
- precision = 0;
- val[0] = op0;
- len = 1;
-}
-
-inline wide_int::wide_int (int op0)
-{
- precision = 0;
- val[0] = op0;
- len = 1;
-}
-
-inline wide_int::wide_int (unsigned HOST_WIDE_INT op0)
-{
- *this = wide_int_ro::from_uhwi (op0);
-}
-
-inline wide_int::wide_int (unsigned int op0)
-{
- *this = wide_int_ro::from_uhwi (op0);
-}
-
-inline wide_int::wide_int (const rtx_mode_t &op0)
-{
- *this = wide_int_ro::from_rtx (op0);
-}
-
-inline wide_int &
-wide_int::operator = (const wide_int_ro &r)
-{
- static_cast <wide_int_ro &> (*this) = r;
- return *this;
-}
-
-inline wide_int &
-wide_int::operator = (const_tree tcst)
-{
- *this = from_array (&TREE_INT_CST_ELT (tcst, 0),
- TREE_INT_CST_NUNITS (tcst),
- TYPE_PRECISION (TREE_TYPE (tcst)), false);
- return *this;
-}
-
-inline wide_int &
-wide_int::operator = (HOST_WIDE_INT op0)
-{
- static_cast <wide_int_ro &> (*this) = op0;
- return *this;
-}
-
-inline wide_int &
-wide_int::operator = (int op0)
-{
- static_cast <wide_int_ro &> (*this) = op0;
- return *this;
-}
-
-inline wide_int &
-wide_int::operator = (unsigned HOST_WIDE_INT op0)
-{
- static_cast <wide_int_ro &> (*this) = wide_int_ro (op0);
- return *this;
-}
-
-inline wide_int &
-wide_int::operator = (unsigned int op0)
-{
- static_cast <wide_int_ro &> (*this) = wide_int_ro (op0);
- return *this;
-}
-
-inline wide_int &
-wide_int::operator = (const rtx_mode_t &op0)
-{
- *this = wide_int_ro::from_rtx (op0);
- return *this;
+ return arshift (x, y, bitsize);
}
-inline wide_int &
-wide_int::operator ++ ()
-{
- *this += 1;
- return *this;
-}
-
-inline wide_int &
-wide_int::operator -- ()
+/* Return the result of rotating the low WIDTH bits of X left by Y bits
+ and zero-extending the result. Use a full-width rotate if WIDTH is
+ zero. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::lrotate (const T &x, const wide_int_ref &y, unsigned int width)
{
- *this -= 1;
- return *this;
+ unsigned int precision = get_binary_precision (x, x);
+ if (width == 0)
+ width = precision;
+ gcc_checking_assert ((width & -width) == width);
+ WI_UNARY_RESULT (T) left = wi::lshift (x, y, width);
+ WI_UNARY_RESULT (T) right = wi::lrshift (x, wi::sub (width, y), width);
+ if (width != precision)
+ return wi::zext (left, width) | wi::zext (right, width);
+ return left | right;
}
+/* Return the result of rotating the low WIDTH bits of X right by Y bits
+ and zero-extending the result. Use a full-width rotate if WIDTH is
+ zero. */
template <typename T>
-inline wide_int &
-wide_int::operator &= (const T &c)
+inline WI_UNARY_RESULT (T)
+wi::rrotate (const T &x, const wide_int_ref &y, unsigned int width)
{
- *this = *this & c;
- return *this;
+ unsigned int precision = get_binary_precision (x, x);
+ if (width == 0)
+ width = precision;
+ gcc_checking_assert ((width & -width) == width);
+ WI_UNARY_RESULT (T) right = wi::lrshift (x, y, width);
+ WI_UNARY_RESULT (T) left = wi::lshift (x, wi::sub (width, y), width);
+ if (width != precision)
+ return wi::zext (left, width) | wi::zext (right, width);
+ return left | right;
}
-template <typename T>
-inline wide_int &
-wide_int::operator |= (const T &c)
+/* Return 0 if the number of 1s in X is even and 1 if the number of 1s
+ is odd. */
+inline int
+wi::parity (const wide_int_ref &x)
{
- *this = *this | c;
- return *this;
+ return popcount (x) & 1;
}
+/* Extract WIDTH bits from X, starting at BITPOS. */
template <typename T>
-inline wide_int &
-wide_int::operator ^= (const T &c)
-{
- *this = *this ^ c;
- return *this;
+inline unsigned HOST_WIDE_INT
+wi::extract_uhwi (const T &x, unsigned int bitpos,
+ unsigned int width)
+{
+ unsigned precision = get_precision (x);
+ if (precision < bitpos + width)
+ precision = bitpos + width;
+ wide_int_ref xi (x, precision);
+
+ /* Handle this rare case after the above, so that we assert about
+ bogus BITPOS values. */
+ if (width == 0)
+ return 0;
+
+ unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT;
+ unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT;
+ unsigned HOST_WIDE_INT res = xi.elt (start);
+ res >>= shift;
+ if (shift + width > HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT upper = xi.elt (start + 1);
+ res |= upper << (-shift % HOST_BITS_PER_WIDE_INT);
+ }
+ return zext_hwi (res, width);
}
-template <typename T>
-inline wide_int &
-wide_int::operator += (const T &c)
+template<typename T>
+void
+gt_ggc_mx (generic_wide_int <T> *)
{
- *this = *this + c;
- return *this;
}
-template <typename T>
-inline wide_int &
-wide_int::operator -= (const T &c)
+template<typename T>
+void
+gt_pch_nx (generic_wide_int <T> *)
{
- *this = *this - c;
- return *this;
}
-template <typename T>
-inline wide_int &
-wide_int::operator *= (const T &c)
+template<typename T>
+void
+gt_pch_nx (generic_wide_int <T> *, void (*) (void *, void *), void *)
{
- *this = *this * c;
- return *this;
}
-template <int bitsize>
-class GTY(()) fixed_wide_int : public wide_int_ro
+namespace wi
{
- friend class wide_int_ro;
-
-protected:
- fixed_wide_int &operator = (const wide_int &);
- fixed_wide_int (const wide_int_ro);
- const HOST_WIDE_INT *get_val () const;
-
- using wide_int_ro::val;
-
-public:
- using wide_int_ro::get_precision;
- using wide_int_ro::get_len;
- using wide_int_ro::to_short_addr;
- using wide_int_ro::fits_uhwi_p;
- using wide_int_ro::fits_shwi_p;
- using wide_int_ro::gtu_p;
- using wide_int_ro::gts_p;
- using wide_int_ro::geu_p;
- using wide_int_ro::ges_p;
- using wide_int_ro::to_shwi;
- using wide_int_ro::operator ==;
- using wide_int_ro::ltu_p;
- using wide_int_ro::lts_p;
- using wide_int_ro::leu_p;
- using wide_int_ro::les_p;
- using wide_int_ro::to_uhwi;
- using wide_int_ro::cmps;
- using wide_int_ro::neg_p;
- using wide_int_ro::cmpu;
- using wide_int_ro::umod_floor;
- using wide_int_ro::one_p;
- using wide_int_ro::zero_p;
- using wide_int_ro::multiple_of_p;
- using wide_int_ro::minus_one_p;
- using wide_int_ro::operator !=;
- using wide_int_ro::elt;
- using wide_int_ro::fits_to_tree_p;
- using wide_int_ro::from_uhwi;
- using wide_int_ro::ctz;
- using wide_int_ro::cmp;
- using wide_int_ro::minus_one;
-
- static fixed_wide_int from_wide_int (const wide_int &);
- static fixed_wide_int from_array (const HOST_WIDE_INT *, unsigned int,
- bool = true);
-
- fixed_wide_int ();
- fixed_wide_int (const_tree);
- fixed_wide_int (HOST_WIDE_INT);
- fixed_wide_int (int);
- fixed_wide_int (unsigned HOST_WIDE_INT);
- fixed_wide_int (unsigned int);
-
- fixed_wide_int &operator ++ ();
- fixed_wide_int &operator -- ();
-
- bool multiple_of_p (const wide_int_ro &, signop, fixed_wide_int *) const;
-
- /* Conversion to and from GMP integer representations. */
- void to_mpz (mpz_t, signop) const;
- static fixed_wide_int from_mpz (const_tree, mpz_t, bool);
- fixed_wide_int &operator = (const_tree);
- fixed_wide_int &operator = (HOST_WIDE_INT);
- fixed_wide_int &operator = (int);
- fixed_wide_int &operator = (unsigned HOST_WIDE_INT);
- fixed_wide_int &operator = (unsigned int);
-
- /* Extension, these do not change the precision. */
- fixed_wide_int ext (unsigned int, signop) const;
- fixed_wide_int sext (unsigned int) const;
- fixed_wide_int zext (unsigned int) const;
-
- /* Masking and Insertion */
- fixed_wide_int set_bit (unsigned int) const;
- static fixed_wide_int set_bit_in_zero (unsigned int);
- fixed_wide_int insert (const wide_int_ro &, unsigned int,
- unsigned int) const;
-
- static fixed_wide_int mask (unsigned int, bool);
- static fixed_wide_int shifted_mask (unsigned int, unsigned int, bool);
-
- /* Logicals */
-
- template <typename T>
- fixed_wide_int operator & (const T &) const;
- fixed_wide_int operator & (const fixed_wide_int &) const;
-
- template <typename T>
- fixed_wide_int &operator &= (const T &);
- fixed_wide_int &operator &= (const fixed_wide_int &);
-
- template <typename T>
- fixed_wide_int and_not (const T &) const;
-
- fixed_wide_int operator ~ () const;
-
- template <typename T>
- fixed_wide_int operator | (const T &) const;
- fixed_wide_int operator | (const fixed_wide_int &) const;
-
- template <typename T>
- fixed_wide_int &operator |= (const T &);
- fixed_wide_int &operator |= (const fixed_wide_int &);
-
- template <typename T>
- fixed_wide_int or_not (const T &) const;
-
- template <typename T>
- fixed_wide_int operator ^ (const T &) const;
- fixed_wide_int operator ^ (const fixed_wide_int &) const;
-
- template <typename T>
- fixed_wide_int &operator ^= (const T &);
- fixed_wide_int &operator ^= (const fixed_wide_int &);
-
- /* Arithmetic operation functions, alpha sorted. */
-
- template <typename T>
- fixed_wide_int operator + (const T &) const;
- fixed_wide_int operator + (const fixed_wide_int &c) const;
-
- template <typename T>
- fixed_wide_int &operator += (const T &);
- fixed_wide_int &operator += (const fixed_wide_int &c);
+ /* Used for overloaded functions in which the only other acceptable
+ scalar type is a pointer. It stops a plain 0 from being treated
+ as a null pointer. */
+ struct never_used1 {};
+ struct never_used2 {};
- template <typename T>
- fixed_wide_int add (const T &, signop, bool *) const;
-
- template <typename T>
- fixed_wide_int operator * (const T &) const;
+ wide_int min_value (unsigned int, signop);
+ wide_int min_value (never_used1 *);
+ wide_int min_value (never_used2 *);
+ wide_int max_value (unsigned int, signop);
+ wide_int max_value (never_used1 *);
+ wide_int max_value (never_used2 *);
- template <typename T>
- fixed_wide_int &operator *= (const T &);
-
- template <typename T>
- fixed_wide_int mul (const T &, signop, bool *) const;
-
- template <typename T>
- fixed_wide_int smul (const T &, bool *) const;
-
- template <typename T>
- fixed_wide_int umul (const T &, bool *) const;
-
- template <typename T>
- fixed_wide_int operator - (const T &) const;
-
- fixed_wide_int operator - () const;
-
- fixed_wide_int operator - (const fixed_wide_int &) const;
-
- template <typename T>
- fixed_wide_int &operator -= (const T &);
- fixed_wide_int &operator -= (const fixed_wide_int &);
-
- template <typename T>
- fixed_wide_int sub (const T &, signop, bool *) const;
+ wide_int mul_full (const wide_int_ref &, const wide_int_ref &, signop);
- /* Division and mod. These are the ones that are actually used, but
- there are a lot of them. */
-
- template <typename T>
- fixed_wide_int div_floor (const T &, signop, bool * = 0) const;
-
- template <typename T>
- fixed_wide_int udiv_floor (const T &) const;
-
- template <typename T>
- fixed_wide_int sdiv_floor (const T &) const;
-
- template <typename T>
- fixed_wide_int div_ceil (const T &, signop, bool * = 0) const;
-
- template <typename T>
- fixed_wide_int div_round (const T &, signop, bool * = 0) const;
-
- template <typename T>
- fixed_wide_int div_trunc (const T &, signop, bool * = 0) const;
-
- template <typename T>
- fixed_wide_int sdiv_trunc (const T &) const;
-
- template <typename T>
- fixed_wide_int udiv_trunc (const T &) const;
-
- template <typename T>
- fixed_wide_int divmod_floor (const T &, fixed_wide_int *, signop) const;
-
- template <typename T>
- fixed_wide_int sdivmod_floor (const T &, fixed_wide_int *) const;
+ /* FIXME: this is target dependent, so should be elsewhere.
+ It also seems to assume that CHAR_BIT == BITS_PER_UNIT. */
+ wide_int from_buffer (const unsigned char *, unsigned int);
- /* Shifting rotating and extracting. */
-
- template <typename T>
- fixed_wide_int lrotate (const T &, unsigned int) const;
- fixed_wide_int lrotate (unsigned HOST_WIDE_INT, unsigned int) const;
-
- template <typename T>
- fixed_wide_int lshift (const T &, unsigned int = 0) const;
-
- template <typename T>
- fixed_wide_int lshift_widen (const T &, unsigned int) const;
+#ifndef GENERATOR_FILE
+ void to_mpz (wide_int, mpz_t, signop);
+#endif
- template <typename T>
- fixed_wide_int rshift (const T &, signop, unsigned int = 0) const;
+ wide_int mask (unsigned int, bool, unsigned int);
+ wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int);
+ wide_int set_bit_in_zero (unsigned int, unsigned int);
+ wide_int insert (const wide_int &x, const wide_int &y, unsigned int,
+ unsigned int);
template <typename T>
- fixed_wide_int rshiftu (const T &, unsigned int = 0) const;
+ T mask (unsigned int, bool);
template <typename T>
- fixed_wide_int rshifts (const T &, unsigned int = 0) const;
+ T shifted_mask (unsigned int, unsigned int, bool);
template <typename T>
- fixed_wide_int rrotate (const T &, unsigned int) const;
- fixed_wide_int rrotate (unsigned HOST_WIDE_INT, unsigned int) const;
-};
-
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator = (const wide_int &w)
-{
- static_cast <wide_int_ro &> (*this) = w;
-
- /* We only allow the same size in, as otherwise
- we would not know how to extend it. */
- gcc_assert (precision == bitsize);
-
- return *this;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>::fixed_wide_int (const wide_int_ro w)
- : wide_int_ro (w)
-{
- /* We only allow the same size in, as otherwise
- we would not know how to extend it. */
- gcc_assert (precision == bitsize);
-}
-
-template <int bitsize>
-inline const HOST_WIDE_INT *
-fixed_wide_int <bitsize>::get_val () const
-{
- return val;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::from_wide_int (const wide_int &w)
-{
- if (w.neg_p ())
- return w.sforce_to_size (bitsize);
- return w.zforce_to_size (bitsize);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::from_array (const HOST_WIDE_INT* op0,
- unsigned int len,
- bool need_canon)
-{
- return wide_int_ro::from_array (op0, len, bitsize, need_canon);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>::fixed_wide_int () {}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>::fixed_wide_int (const_tree t)
-{
- *this = t;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>::fixed_wide_int (HOST_WIDE_INT op0)
- : wide_int_ro (op0)
-{
- precision = bitsize;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>::fixed_wide_int (int op0) : wide_int_ro (op0)
-{
- precision = bitsize;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>::fixed_wide_int (unsigned HOST_WIDE_INT op0)
- : wide_int_ro (op0)
-{
- precision = bitsize;
- if (neg_p ())
- static_cast <wide_int_ro &> (*this) = zext (HOST_BITS_PER_WIDE_INT);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>::fixed_wide_int (unsigned int op0)
- : wide_int_ro (op0)
-{
- precision = bitsize;
- if (sizeof (int) == sizeof (HOST_WIDE_INT)
- && neg_p ())
- *this = zext (HOST_BITS_PER_WIDE_INT);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator ++ ()
-{
- *this += 1;
- return *this;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator -- ()
-{
- *this -= 1;
- return *this;
-}
-
-template <int bitsize>
-inline bool
-fixed_wide_int <bitsize>::multiple_of_p (const wide_int_ro &factor,
- signop sgn,
- fixed_wide_int *multiple) const
-{
- return wide_int_ro::multiple_of_p (factor, sgn, multiple);
-}
-
-template <int bitsize>
-inline void
-fixed_wide_int <bitsize>::to_mpz (mpz_t m, signop sgn) const
-{
- wide_int_ro::to_mpz (m, sgn);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::from_mpz (const_tree t, mpz_t m, bool e)
-{
- return wide_int_ro::from_mpz (t, m, e).force_to_size (bitsize,
- TYPE_SIGN (t));
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator = (const_tree t)
-{
- tree type = TREE_TYPE (t);
-
- static_cast <wide_int_ro &> (*this)
- = wide_int_ro::from_array (&TREE_INT_CST_ELT (t, 0),
- TREE_INT_CST_NUNITS (t),
- TYPE_PRECISION (TREE_TYPE (t)), false);
-
- precision = bitsize;
-
- /* This is logically top_bit_set_p. */
- if (TYPE_SIGN (type) == UNSIGNED && neg_p ())
- static_cast <wide_int_ro &> (*this) = zext (TYPE_PRECISION (type));
-
- return *this;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator = (HOST_WIDE_INT op0)
-{
- static_cast <wide_int_ro &> (*this) = op0;
- precision = bitsize;
-
- return *this;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator = (int op0)
-{
- static_cast <wide_int_ro &> (*this) = op0;
- precision = bitsize;
-
- return *this;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator = (unsigned HOST_WIDE_INT op0)
-{
- static_cast <wide_int_ro &> (*this) = op0;
- precision = bitsize;
-
- /* This is logically top_bit_set_p. */
- if (neg_p ())
- static_cast <wide_int_ro &> (*this) = zext (HOST_BITS_PER_WIDE_INT);
-
- return *this;
-}
+ T set_bit_in_zero (unsigned int);
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator = (unsigned int op0)
-{
- static_cast <wide_int_ro &> (*this) = op0;
- precision = bitsize;
-
- if (sizeof (int) == sizeof (HOST_WIDE_INT)
- && neg_p ())
- *this = zext (HOST_BITS_PER_WIDE_INT);
-
- return *this;
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::ext (unsigned int offset, signop sgn) const
-{
- return wide_int_ro::ext (offset, sgn);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::sext (unsigned int offset) const
-{
- return wide_int_ro::sext (offset);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::zext (unsigned int offset) const
-{
- return wide_int_ro::zext (offset);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::set_bit (unsigned int bitpos) const
-{
- return wide_int_ro::set_bit (bitpos);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::set_bit_in_zero (unsigned int bitpos)
-{
- return wide_int_ro::set_bit_in_zero (bitpos, bitsize);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::insert (const wide_int_ro &op0, unsigned int offset,
- unsigned int width) const
-{
- return wide_int_ro::insert (op0, offset, width);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::mask (unsigned int width, bool negate)
-{
- return wide_int_ro::mask (width, negate, bitsize);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::shifted_mask (unsigned int start, unsigned int width,
- bool negate)
-{
- return wide_int_ro::shifted_mask (start, width, negate, bitsize);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator & (const T &c) const
-{
- return *this & fixed_wide_int (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator &= (const T &c)
-{
- *this &= fixed_wide_int (c);
- return *this;
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::and_not (const T &c) const
-{
- return wide_int_ro::and_not (fixed_wide_int (c));
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator ~ () const
-{
- return ~static_cast <const wide_int_ro &> (*this);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator | (const T &c) const
-{
- return *this | fixed_wide_int (c);
+ unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int);
+ unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int,
+ bool, unsigned int);
+ unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, bool);
}
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator |= (const T &c)
+/* Perform a widening multiplication of X and Y, extending the values
+ according according to SGN. */
+inline wide_int
+wi::mul_full (const wide_int_ref &x, const wide_int_ref &y, signop sgn)
{
- *this |= fixed_wide_int (c);
- return *this;
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::or_not (const T &c) const
-{
- return wide_int_ro::or_not (fixed_wide_int (c));
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator ^ (const T &c) const
-{
- return *this ^ fixed_wide_int (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator ^= (const T &c)
-{
- *this ^= fixed_wide_int (c);
- return *this;
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator + (const T &c) const
-{
- return *this + fixed_wide_int (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator += (const T &c)
-{
- *this += fixed_wide_int (c);
- return *this;
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::add (const T &c, signop sgn, bool *overflow) const
-{
- return wide_int_ro::add (c, sgn, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator * (const T &c) const
-{
- return static_cast <const wide_int_ro &> (*this) * fixed_wide_int (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator *= (const T &c)
-{
- reinterpret_cast <wide_int &> (*this) *= c;
- return *this;
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::mul (const T &c, signop sgn, bool *overflow) const
-{
- return wide_int_ro::mul (c, sgn, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::smul (const T &c, bool *overflow) const
-{
- return wide_int_ro::smul (c, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::umul (const T &c, bool *overflow) const
-{
- return wide_int_ro::umul (c, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator - (const T &c) const
-{
- return *this - fixed_wide_int (c);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator - () const
-{
- return - static_cast <const wide_int_ro &> (*this);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator -= (const T &c)
-{
- return *this -= fixed_wide_int (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::sub (const T &c, signop sgn, bool *overflow) const
-{
- return wide_int_ro::sub (c, sgn, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::div_floor (const T &c, signop sgn,
- bool *overflow) const
-{
- return wide_int_ro::div_floor (c, sgn, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::udiv_floor (const T &c) const
-{
- return wide_int_ro::udiv_floor (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::sdiv_floor (const T &c) const
-{
- return wide_int_ro::sdiv_floor (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::div_ceil (const T &c, signop sgn,
- bool *overflow) const
-{
- return wide_int_ro::div_ceil (c, sgn, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::div_round (const T &c, signop sgn,
- bool *overflow) const
-{
- return wide_int_ro::div_round (c, sgn, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::div_trunc (const T &c, signop sgn,
- bool *overflow) const
-{
- return wide_int_ro::div_trunc (c,sgn, overflow);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::sdiv_trunc (const T &c) const
-{
- return wide_int_ro::sdiv_trunc (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::udiv_trunc (const T &c) const
-{
- return wide_int_ro::udiv_trunc (c);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::divmod_floor (const T &c, fixed_wide_int *mod,
- signop sgn) const
-{
- return wide_int_ro::divmod_floor (c, mod, sgn);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::sdivmod_floor (const T &c, fixed_wide_int *mod) const
-{
- return wide_int_ro::sdivmod_floor (c, mod);
-}
-
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::lrotate (const T &c, unsigned int prec) const
-{
- return wide_int_ro::lrotate (c, prec);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::lrotate (unsigned HOST_WIDE_INT y,
- unsigned int prec) const
-{
- return wide_int_ro::lrotate (y, prec);
+ gcc_checking_assert (x.precision == y.precision);
+ wide_int result = wide_int::create (x.precision * 2);
+ result.set_len (mul_internal (result.write_val (), x.val, x.len,
+ y.val, y.len, x.precision,
+ sgn, 0, false, true));
+ return result;
}
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::lshift (const T &c, unsigned int bit_size) const
+/* Return a PRECISION-bit integer in which the low WIDTH bits are set
+ and the other bits are clear, or the inverse if NEGATE_P. */
+inline wide_int
+wi::mask (unsigned int width, bool negate_p, unsigned int precision)
{
- return wide_int_ro::lshift (c, bit_size);
+ wide_int result = wide_int::create (precision);
+ result.set_len (mask (result.write_val (), width, negate_p, precision));
+ return result;
}
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::lshift_widen (const T &c,
- unsigned int new_prec) const
+/* Return a PRECISION-bit integer in which the low START bits are clear,
+ the next WIDTH bits are set, and the other bits are clear,
+ or the inverse if NEGATE_P. */
+inline wide_int
+wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p,
+ unsigned int precision)
{
- return wide_int_ro::lshift_widen (c, new_prec);
+ wide_int result = wide_int::create (precision);
+ result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
+ precision));
+ return result;
}
-template <int bitsize>
-template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::rshift (const T &c, signop sgn,
- unsigned int bit_size) const
+/* Return a PRECISION-bit integer in which bit BIT is set and all the
+ others are clear. */
+inline wide_int
+wi::set_bit_in_zero (unsigned int bit, unsigned int precision)
{
- return wide_int_ro::rshift (c, sgn, bit_size);
+ return shifted_mask (bit, 1, false, precision);
}
-template <int bitsize>
+/* Return an integer of type T in which the low WIDTH bits are set
+ and the other bits are clear, or the inverse if NEGATE_P. */
template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::rshiftu (const T &c,
- unsigned int bit_size) const
+inline T
+wi::mask (unsigned int width, bool negate_p)
{
- return wide_int_ro::rshiftu (c, bit_size);
+ STATIC_ASSERT (wi::int_traits<T>::precision);
+ T result;
+ result.set_len (mask (result.write_val (), width, negate_p,
+ wi::int_traits <T>::precision));
+ return result;
}
-template <int bitsize>
+/* Return an integer of type T in which the low START bits are clear,
+ the next WIDTH bits are set, and the other bits are clear,
+ or the inverse if NEGATE_P. */
template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::rshifts (const T &c,
- unsigned int bit_size) const
+inline T
+wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
{
- return wide_int_ro::rshifts (c, bit_size);
+ STATIC_ASSERT (wi::int_traits<T>::precision);
+ T result;
+ result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
+ wi::int_traits <T>::precision));
+ return result;
}
-template <int bitsize>
+/* Return an integer of type T in which bit BIT is set and all the
+ others are clear. */
template <typename T>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::rrotate (const T &c, unsigned int prec) const
-{
- return wide_int_ro::rrotate (c, prec);
-}
-
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::rrotate (unsigned HOST_WIDE_INT y,
- unsigned int prec) const
+inline T
+wi::set_bit_in_zero (unsigned int bit)
{
- return wide_int_ro::lrotate (y, prec);
+ return shifted_mask <T> (bit, 1, false);
}
-/* Logicals */
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator & (const fixed_wide_int <bitsize> &c) const
-{
- return static_cast <const wide_int_ro &> (*this) & c;
-}
-
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator &= (const fixed_wide_int <bitsize> &c)
-{
- reinterpret_cast <wide_int &> (*this) &= (const wide_int_ro &) c;
- return *this;
-}
-
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator | (const fixed_wide_int <bitsize> &c) const
-{
- return static_cast <const wide_int_ro &> (*this) | c;
-}
-
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator |= (const fixed_wide_int <bitsize> &c)
-{
- reinterpret_cast <wide_int &> (*this) |= (const wide_int_ro &) c;
- return *this;
-}
-
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator ^ (const fixed_wide_int <bitsize> &c) const
-{
- return static_cast <const wide_int_ro &> (*this) ^ c;
-}
-
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator ^= (const fixed_wide_int <bitsize> &c)
-{
- reinterpret_cast <wide_int &> (*this) ^= (const wide_int_ro &) c;
- return *this;
-}
-
-/* Math operators */
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator + (const fixed_wide_int <bitsize> &c) const
-{
- return static_cast <const wide_int_ro &> (*this) + c;
-}
-
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator += (const fixed_wide_int <bitsize> &c)
-{
- reinterpret_cast <wide_int &> (*this) += (const wide_int_ro &) c;
- return *this;
-}
-
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize>
-fixed_wide_int <bitsize>::operator - (const fixed_wide_int <bitsize> &c) const
-{
- return static_cast <const wide_int_ro &> (*this) - c;
-}
-
-template <>
-template <int bitsize>
-inline fixed_wide_int <bitsize> &
-fixed_wide_int <bitsize>::operator -= (const fixed_wide_int <bitsize> &c)
-{
- reinterpret_cast <wide_int &> (*this) -= (const wide_int_ro &) c;
- return *this;
-}
-
-/* A wide_int_ro that has a large enough precision to do any address math
- on the target. */
-typedef fixed_wide_int <ADDR_MAX_PRECISION> addr_wide_int;
-/* A wide_int_ro that has a large enough precision to do any math on the
- target. */
-typedef fixed_wide_int <MAX_BITSIZE_MODE_ANY_INT> max_wide_int;
-
-extern void gt_ggc_mx(max_wide_int*);
-extern void gt_pch_nx(max_wide_int*,void (*)(void*, void*), void*);
-extern void gt_pch_nx(max_wide_int*);
-
-extern addr_wide_int mem_ref_offset (const_tree);
-
-/* The wide-int overload templates. */
-
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, unsigned int *p,
- const wide_int_ro &y)
-{
- *p = y.precision;
- *l = y.len;
- return y.val;
-}
-
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, unsigned int *p,
- const wide_int &y)
-{
- *p = y.precision;
- *l = y.len;
- return y.val;
-}
-
-
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, unsigned int *p,
- const fixed_wide_int <ADDR_MAX_PRECISION> &y)
-{
- *p = y.get_precision ();
- *l = y.get_len ();
- return y.get_val ();
-}
-
-#if ADDR_MAX_PRECISION != MAX_BITSIZE_MODE_ANY_INT
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, unsigned int *p,
- const fixed_wide_int <MAX_BITSIZE_MODE_ANY_INT> &y)
-{
- *p = y.get_precision ();
- *l = y.get_len ();
- return y.get_val ();
-}
-#endif
-
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, const wide_int &y)
-{
- *l = y.len;
- return y.val;
-}
-
-
-/* The tree and const_tree overload templates. */
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, unsigned int *p,
- const tree &tcst)
-{
- tree type = TREE_TYPE (tcst);
-
- *p = TYPE_PRECISION (type);
- *l = TREE_INT_CST_NUNITS (tcst);
- return (const HOST_WIDE_INT*)&TREE_INT_CST_ELT (tcst, 0);
-}
-
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, unsigned int *p,
- const const_tree &tcst)
-{
- tree type = TREE_TYPE (tcst);
-
- *p = TYPE_PRECISION (type);
- *l = TREE_INT_CST_NUNITS (tcst);
- return (const HOST_WIDE_INT*)&TREE_INT_CST_ELT (tcst, 0);
-}
-
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, const tree &tcst)
-{
- *l = TREE_INT_CST_NUNITS (tcst);
- return (const HOST_WIDE_INT*)&TREE_INT_CST_ELT (tcst, 0);
-}
-
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, const const_tree &tcst)
-{
- *l = TREE_INT_CST_NUNITS (tcst);
- return (const HOST_WIDE_INT*)&TREE_INT_CST_ELT (tcst, 0);
-}
-
-/* Checking for the functions that require that at least one of the
- operands have a nonzero precision. If both of them have a precision,
- then if CHECK_EQUAL is true, require that the precision be the same. */
-
-inline void
-wide_int_ro::check_precision (unsigned int *p1, unsigned int *p2,
- bool check_equal ATTRIBUTE_UNUSED,
- bool check_zero ATTRIBUTE_UNUSED)
-{
- gcc_checking_assert ((!check_zero) || *p1 != 0 || *p2 != 0);
-
- if (*p1 == 0)
- *p1 = *p2;
-
- if (*p2 == 0)
- *p2 = *p1;
-
- gcc_checking_assert ((!check_equal) || *p1 == *p2);
-}
-
-/* This is used to bundle an rtx and a mode together so that the pair
- can be used as the second operand of a wide int expression. If we
- ever put modes into rtx integer constants, this should go away and
- then just pass an rtx in. */
-typedef std::pair <rtx, enum machine_mode> rtx_mode_t;
-
-/* There should logically be an overload for rtl here, but it cannot
- be here because of circular include issues. It is in rtl.h. */
-template <>
-inline const HOST_WIDE_INT*
-wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED,
- unsigned int *l, const rtx_mode_t &rp);
-
-/* tree related routines. */
-
-extern tree wide_int_to_tree (tree type, const wide_int_ro &cst);
-extern tree force_fit_type_wide (tree, const wide_int_ro &, int, bool);
-
-/* real related routines. */
-extern wide_int real_to_integer (const REAL_VALUE_TYPE *, bool *, int);
-extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode,
- wide_int, signop);
-extern wide_int decimal_real_to_integer (const REAL_VALUE_TYPE *, bool *, int);
-
-
-#endif /* GENERATOR FILE */
-
#endif /* WIDE_INT_H */