summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTrevor Saunders <tbsaunde+gcc@tbsaunde.org>2015-02-08 09:22:28 -0500
committerTrevor Saunders <tbsaunde+gcc@tbsaunde.org>2015-04-09 19:57:14 -0400
commit4f8953b6684459714538e47696797e3b02dfe428 (patch)
treeebb4c8dbf1bdaa10be85e4042cd782b936f52a62
parent4aed2663603411f5bb768f5c84d1a872a5bb281c (diff)
downloadgcc-tbsaunde/bitvec.tar.gz
add bitvec and use sometbsaunde/bitvec
gcc/ 2015-04-08 Trevor Saunders <tbsaunde@tbsaunde.org> * bitvec.h: New file. * bitvec.c: New file. * Makefile.in (OBJS-libcommon): Add bitvec.o. * alias.c, cfganal.c, cfgbuild.c, cfgbuild.h, cfgexpand.c, cfghooks.c, cfghooks.h, cfgloop.c, cfgloopmanip.c, cfgloopmanip.h, cfgrtl.c, config/spu/spu.c, cp/vtable-class-hierarchy.c, cse.c, dce.c, df-core.c, dse.c, except.c, except.h, function.c, graph.c, graphite-sese-to-poly.c, ira-lives.c, loop-unroll.c, lower-subreg.c, lra-lives.c, lra.c, modulo-sched.c, recog.c, regcprop.c, reload1.c, sched-int.h, sched-rgn.c, sel-sched-ir.c, sel-sched.c, store-motion.c, tree-eh.c, tree-into-ssa.c, tree-outof-ssa.c, tree-ssa-dce.c, tree-ssa-live.c, tree-ssa-loop-im.c, tree-ssa-loop-ivcanon.c, tree-ssa-loop-manip.c, tree-ssa-loop-manip.h, tree-ssa-pre.c, tree-ssa-propagate.c, tree-ssa-reassoc.c, tree-ssa-structalias.c, tree-stdarg.c, tree-vect-slp.c, tree-vrp.c, var-tracking.c: Use bitvec instead of sbitmap.
-rw-r--r--gcc/Makefile.in2
-rw-r--r--gcc/alias.c39
-rw-r--r--gcc/bitvec.c37
-rw-r--r--gcc/bitvec.h194
-rw-r--r--gcc/cfganal.c84
-rw-r--r--gcc/cfgbuild.c6
-rw-r--r--gcc/cfgbuild.h4
-rw-r--r--gcc/cfgexpand.c7
-rw-r--r--gcc/cfghooks.c2
-rw-r--r--gcc/cfghooks.h6
-rw-r--r--gcc/cfgloop.c22
-rw-r--r--gcc/cfgloopmanip.c61
-rw-r--r--gcc/cfgloopmanip.h4
-rw-r--r--gcc/cfgrtl.c9
-rw-r--r--gcc/config/spu/spu.c14
-rw-r--r--gcc/cp/vtable-class-hierarchy.c18
-rw-r--r--gcc/cse.c24
-rw-r--r--gcc/dce.c14
-rw-r--r--gcc/df-core.c25
-rw-r--r--gcc/dse.c22
-rw-r--r--gcc/except.c17
-rw-r--r--gcc/except.h3
-rw-r--r--gcc/function.c11
-rw-r--r--gcc/graph.c12
-rw-r--r--gcc/graphite-sese-to-poly.c17
-rw-r--r--gcc/ira-lives.c45
-rw-r--r--gcc/loop-unroll.c46
-rw-r--r--gcc/lower-subreg.c62
-rw-r--r--gcc/lra-lives.c55
-rw-r--r--gcc/lra.c7
-rw-r--r--gcc/modulo-sched.c73
-rw-r--r--gcc/recog.c9
-rw-r--r--gcc/regcprop.c12
-rw-r--r--gcc/reload1.c7
-rw-r--r--gcc/sched-int.h2
-rw-r--r--gcc/sched-rgn.c103
-rw-r--r--gcc/sel-sched-ir.c23
-rw-r--r--gcc/sel-sched.c19
-rw-r--r--gcc/store-motion.c10
-rw-r--r--gcc/tree-eh.c58
-rw-r--r--gcc/tree-into-ssa.c23
-rw-r--r--gcc/tree-outof-ssa.c29
-rw-r--r--gcc/tree-ssa-dce.c46
-rw-r--r--gcc/tree-ssa-live.c22
-rw-r--r--gcc/tree-ssa-loop-im.c12
-rw-r--r--gcc/tree-ssa-loop-ivcanon.c19
-rw-r--r--gcc/tree-ssa-loop-manip.c12
-rw-r--r--gcc/tree-ssa-loop-manip.h4
-rw-r--r--gcc/tree-ssa-pre.c45
-rw-r--r--gcc/tree-ssa-propagate.c30
-rw-r--r--gcc/tree-ssa-reassoc.c172
-rw-r--r--gcc/tree-ssa-structalias.c93
-rw-r--r--gcc/tree-stdarg.c10
-rw-r--r--gcc/tree-vect-slp.c48
-rw-r--r--gcc/tree-vrp.c70
-rw-r--r--gcc/var-tracking.c16
56 files changed, 934 insertions, 902 deletions
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 4ab7405fb79..d526a88dca0 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1496,7 +1496,7 @@ OBJS = \
# Objects in libcommon.a, potentially used by all host binaries and with
# no target dependencies.
OBJS-libcommon = diagnostic.o diagnostic-color.o pretty-print.o intl.o \
- vec.o input.o version.o
+ bitvec.o vec.o input.o version.o
# Objects in libcommon-target.a, used by drivers and by the core
# compiler and containing target-dependent code.
diff --git a/gcc/alias.c b/gcc/alias.c
index a7160f3e956..dd61f947d6a 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -304,7 +305,7 @@ static GTY(()) vec<rtx, va_gc> *reg_known_value;
REG_EQUIV notes. One could argue that the REG_EQUIV notes are
wrong, but solving the problem in the scheduler will likely give
better code, so we do it here. */
-static sbitmap reg_known_equiv_p;
+static bitvec reg_known_equiv_p;
/* True when scanning insns from the start of the rtl to the
NOTE_INSN_FUNCTION_BEG note. */
@@ -1284,7 +1285,7 @@ find_base_value (rtx src)
/* While scanning insns to find base values, reg_seen[N] is nonzero if
register N has been set in this function. */
-static sbitmap reg_seen;
+static bitvec reg_seen;
static void
record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
@@ -1310,7 +1311,7 @@ record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
{
while (--n >= 0)
{
- bitmap_set_bit (reg_seen, regno + n);
+ reg_seen[regno + n] = true;
new_reg_base_value[regno + n] = 0;
}
return;
@@ -1331,12 +1332,12 @@ record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
else
{
/* There's a REG_NOALIAS note against DEST. */
- if (bitmap_bit_p (reg_seen, regno))
+ if (reg_seen[regno])
{
new_reg_base_value[regno] = 0;
return;
}
- bitmap_set_bit (reg_seen, regno);
+ reg_seen[regno] = true;
new_reg_base_value[regno] = unique_base_value (unique_id++);
return;
}
@@ -1392,10 +1393,10 @@ record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
}
/* If this is the first set of a register, record the value. */
else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
- && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
+ && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
new_reg_base_value[regno] = find_base_value (src);
- bitmap_set_bit (reg_seen, regno);
+ reg_seen[regno] = true;
}
/* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid
@@ -1442,7 +1443,7 @@ get_reg_known_equiv_p (unsigned int regno)
{
regno -= FIRST_PSEUDO_REGISTER;
if (regno < vec_safe_length (reg_known_value))
- return bitmap_bit_p (reg_known_equiv_p, regno);
+ return reg_known_equiv_p[regno];
}
return false;
}
@@ -1454,12 +1455,7 @@ set_reg_known_equiv_p (unsigned int regno, bool val)
{
regno -= FIRST_PSEUDO_REGISTER;
if (regno < vec_safe_length (reg_known_value))
- {
- if (val)
- bitmap_set_bit (reg_known_equiv_p, regno);
- else
- bitmap_clear_bit (reg_known_equiv_p, regno);
- }
+ reg_known_equiv_p[regno] = val;
}
}
@@ -2848,8 +2844,8 @@ init_alias_analysis (void)
timevar_push (TV_ALIAS_ANALYSIS);
vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
- reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
- bitmap_clear (reg_known_equiv_p);
+ reg_known_equiv_p.grow (maxreg - FIRST_PSEUDO_REGISTER);
+ reg_known_equiv_p.clear ();
/* If we have memory allocated from the previous run, use it. */
if (old_reg_base_value)
@@ -2861,7 +2857,7 @@ init_alias_analysis (void)
vec_safe_grow_cleared (reg_base_value, maxreg);
new_reg_base_value = XNEWVEC (rtx, maxreg);
- reg_seen = sbitmap_alloc (maxreg);
+ reg_seen.grow (maxreg);
/* The basic idea is that each pass through this loop will use the
"constant" information from the previous pass to propagate alias
@@ -2906,14 +2902,14 @@ init_alias_analysis (void)
memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
/* Wipe the reg_seen array clean. */
- bitmap_clear (reg_seen);
+ reg_seen.clear ();
/* Initialize the alias information for this pass. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (static_reg_base_value[i])
{
new_reg_base_value[i] = static_reg_base_value[i];
- bitmap_set_bit (reg_seen, i);
+ reg_seen[i] = true;
}
/* Walk the insns adding values to the new_reg_base_value array. */
@@ -3025,8 +3021,7 @@ init_alias_analysis (void)
/* Clean up. */
free (new_reg_base_value);
new_reg_base_value = 0;
- sbitmap_free (reg_seen);
- reg_seen = 0;
+ reg_seen.clear_and_release ();
timevar_pop (TV_ALIAS_ANALYSIS);
}
@@ -3044,7 +3039,7 @@ end_alias_analysis (void)
{
old_reg_base_value = reg_base_value;
vec_free (reg_known_value);
- sbitmap_free (reg_known_equiv_p);
+ reg_known_equiv_p.clear_and_release ();
}
#include "gt-alias.h"
diff --git a/gcc/bitvec.c b/gcc/bitvec.c
new file mode 100644
index 00000000000..fa661090619
--- /dev/null
+++ b/gcc/bitvec.c
@@ -0,0 +1,37 @@
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "bitvec.h"
+
+void
+bitvec::dump (FILE *file) const
+{
+ size_t bits = m_bits.length () * bits_per_elt;
+ fprintf (file, "n_bits = %d, set = {", (int) bits);
+
+ for (size_t pos = 30, i = 0; i < bits; i++)
+ if ((*this)[i])
+ {
+ if (pos > 70)
+ {
+ fprintf (file, "\n ");
+ pos = 0;
+ }
+
+ fprintf (file, "%d ", (int) i);
+ pos += 2 + (i >= 10) + (i >= 100) + (i >= 1000);
+ }
+
+ fprintf (file, "}\n");
+}
+
+ssize_t
+bitvec::first_set_bit () const
+{
+ for (size_t i = begin (); i < end (); i++)
+ if ((*this)[i])
+ return i;
+
+ return -1;
+}
diff --git a/gcc/bitvec.h b/gcc/bitvec.h
new file mode 100644
index 00000000000..ff34158ef7e
--- /dev/null
+++ b/gcc/bitvec.h
@@ -0,0 +1,194 @@
+
+
+#ifndef BITVEC_H_
+#define BITVEC_H_
+
+#include "vec.h"
+
+class stack_bitvec;
+
+class bitvec
+{
+private:
+ typedef uint64_t elt_type;
+ static const uint8_t bits_per_elt = 64;
+
+public:
+
+ bitvec () {}
+ explicit bitvec (size_t elts)
+ { m_bits.safe_grow_cleared (needed_elts (elts)); }
+
+ struct bit_ref
+ {
+ bit_ref (uint64_t *word, uint8_t bit) : m_word (word), m_bit (bit) {}
+
+ operator bool() const { return *m_word & (((uint64_t) 1) << m_bit); }
+ bit_ref &operator= (bool b)
+ {
+ if (b)
+ *m_word |= ((uint64_t) 1) << m_bit;
+ else
+ *m_word &= ~(((uint64_t) 1) << m_bit);
+
+ return *this;
+ }
+
+private:
+ uint64_t *m_word;
+ uint8_t m_bit;
+ };
+
+ bool
+ operator[] (size_t elt) const
+ { return m_bits[word (elt)] & bit_mask (elt); }
+ bit_ref operator[] (size_t elt)
+ { return bit_ref (&m_bits[word (elt)], elt % bits_per_elt); }
+
+ bitvec &
+ operator|= (const bitvec &other)
+ {
+ if (other.m_bits.length () > m_bits.length ())
+ m_bits.safe_grow_cleared (other.m_bits.length ());
+
+ for (size_t i = 0; i < other.m_bits.length (); i++)
+ m_bits[i] |= other.m_bits[i];
+
+ return *this;
+ }
+
+ bool
+ operator== (const bitvec &other) const
+ {
+ if (m_bits.length () != other.m_bits.length ())
+ return false;
+
+ for (size_t i = 0; i < m_bits.length (); i++)
+ if (m_bits[i] != other.m_bits[i])
+ return false;
+
+ return true;
+ }
+
+ size_t begin () const { return 0; }
+ size_t end () const { return m_bits.length () * bits_per_elt; }
+
+ size_t size () const { return m_bits.length (); }
+
+ ssize_t first_set_bit () const;
+ bool any_set_bits () const { return first_set_bit () >= 0; }
+
+ bitvec (const bitvec &other) : m_bits (other.m_bits) {}
+
+ void clear_and_release () { m_bits.truncate (0); }
+
+ void clear ()
+ {
+ memset (m_bits.address (), 0, sizeof (uint64_t) * m_bits.length ());
+ }
+
+ void set ()
+ {
+ for (size_t i = 0; i < m_bits.length (); i++)
+ m_bits[i] = ~(uint64_t) 0;
+ }
+
+ void
+ grow (size_t bits)
+ {
+ m_bits.safe_grow_cleared (needed_elts (bits));
+ }
+
+ void dump (FILE *) const;
+
+protected:
+ static size_t word (size_t elt) { return elt / bits_per_elt; }
+ static size_t bit_mask (size_t elt) { return ((uint64_t) 1) << (elt % bits_per_elt); }
+ static size_t
+ needed_elts (size_t bits)
+ {
+ return (bits + bits_per_elt - 1) / bits_per_elt;
+ }
+
+ void copy (const bitvec &other) { m_bits = other.m_bits; }
+
+ friend stack_bitvec operator| (const bitvec &, const bitvec &);
+
+ auto_vec<uint64_t, 0> m_bits;
+};
+
+class stack_bitvec : public bitvec
+{
+public:
+ stack_bitvec () { init (); }
+
+ explicit stack_bitvec (size_t bits)
+ {
+ if (needed_elts (bits) <= inline_elts)
+ {
+ m_auto.embedded_init (inline_elts, 0, 1);
+ m_bits.m_vec = &m_auto;
+ }
+
+ m_bits.safe_grow_cleared (needed_elts (bits));
+ }
+
+ stack_bitvec (const stack_bitvec &other) : bitvec ()
+ {
+ init ();
+ copy (other);
+ }
+
+ stack_bitvec (const bitvec &other)
+ {
+ init ();
+ copy (other);
+ }
+
+ stack_bitvec &
+ operator= (const bitvec &other)
+ {
+ clear_and_release ();
+ init ();
+ copy (other);
+ return *this;
+ }
+
+ stack_bitvec &
+ operator= (const stack_bitvec &other)
+ {
+ clear_and_release ();
+ init ();
+ copy (other);
+ return *this;
+ }
+
+private:
+ static const uint8_t inline_elts = 4;
+
+ void
+ init ()
+ {
+ m_auto.embedded_init (inline_elts, 0, 1);
+ m_bits.m_vec = &m_auto;
+ }
+
+ vec<uint64_t, va_heap, vl_embed> m_auto;
+ uint64_t m_data[inline_elts - 1];
+};
+
+inline stack_bitvec
+operator| (const bitvec &a, const bitvec &b)
+{
+ stack_bitvec both (a);
+ size_t len2 = b.m_bits.length ();
+ if (len2 > both.m_bits.length ())
+ both.m_bits.safe_grow_cleared (len2);
+
+ for (size_t i = 0; i < len2; i++)
+ both.m_bits[i] |= b.m_bits[i];
+
+ return both;
+ }
+
+#endif
diff --git a/gcc/cfganal.c b/gcc/cfganal.c
index b8d67bcdb0f..a9d2b2c8bca 100644
--- a/gcc/cfganal.c
+++ b/gcc/cfganal.c
@@ -37,6 +37,7 @@ along with GCC; see the file COPYING3. If not see
#include "basic-block.h"
#include "bitmap.h"
#include "sbitmap.h"
+#include "bitvec.h"
#include "timevar.h"
/* Store the data structures necessary for depth-first search. */
@@ -79,7 +80,6 @@ mark_dfs_back_edges (void)
int sp;
int prenum = 1;
int postnum = 1;
- sbitmap visited;
bool found = false;
/* Allocate the preorder and postorder number arrays. */
@@ -91,10 +91,7 @@ mark_dfs_back_edges (void)
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
-
- /* None of the nodes in the CFG have been visited yet. */
- bitmap_clear (visited);
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
/* Push the first edge on to the stack. */
stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
@@ -112,11 +109,10 @@ mark_dfs_back_edges (void)
ei_edge (ei)->flags &= ~EDGE_DFS_BACK;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && ! bitmap_bit_p (visited,
- dest->index))
+ if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && ! visited[dest->index])
{
/* Mark that we have visited the destination. */
- bitmap_set_bit (visited, dest->index);
+ visited[dest->index] = true;
pre[dest->index] = prenum++;
if (EDGE_COUNT (dest->succs) > 0)
@@ -150,7 +146,6 @@ mark_dfs_back_edges (void)
free (pre);
free (post);
free (stack);
- sbitmap_free (visited);
return found;
}
@@ -622,7 +617,6 @@ post_order_compute (int *post_order, bool include_entry_exit,
edge_iterator *stack;
int sp;
int post_order_num = 0;
- sbitmap visited;
int count;
if (include_entry_exit)
@@ -633,10 +627,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
-
- /* None of the nodes in the CFG have been visited yet. */
- bitmap_clear (visited);
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
/* Push the first edge on to the stack. */
stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
@@ -654,10 +645,10 @@ post_order_compute (int *post_order, bool include_entry_exit,
/* Check if the edge destination has been visited yet. */
if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
- && ! bitmap_bit_p (visited, dest->index))
+ && ! visited[dest->index])
{
/* Mark that we have visited the destination. */
- bitmap_set_bit (visited, dest->index);
+ visited[dest->index] = true;
if (EDGE_COUNT (dest->succs) > 0)
/* Since the DEST node has been visited for the first
@@ -698,7 +689,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
{
next_bb = b->next_bb;
- if (!(bitmap_bit_p (visited, b->index)))
+ if (!(visited[b->index]))
delete_basic_block (b);
}
@@ -706,7 +697,6 @@ post_order_compute (int *post_order, bool include_entry_exit,
}
free (stack);
- sbitmap_free (visited);
return post_order_num;
}
@@ -782,17 +772,13 @@ inverted_post_order_compute (int *post_order)
edge_iterator *stack;
int sp;
int post_order_num = 0;
- sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
-
- /* None of the nodes in the CFG have been visited yet. */
- bitmap_clear (visited);
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
/* Put all blocks that have no successor into the initial work list. */
FOR_ALL_BB_FN (bb, cfun)
@@ -802,7 +788,7 @@ inverted_post_order_compute (int *post_order)
if (EDGE_COUNT (bb->preds) > 0)
{
stack[sp++] = ei_start (bb->preds);
- bitmap_set_bit (visited, bb->index);
+ visited[bb->index] = true;
}
}
@@ -822,10 +808,10 @@ inverted_post_order_compute (int *post_order)
pred = ei_edge (ei)->src;
/* Check if the predecessor has been visited yet. */
- if (! bitmap_bit_p (visited, pred->index))
+ if (! visited[pred->index])
{
/* Mark that we have visited the destination. */
- bitmap_set_bit (visited, pred->index);
+ visited[pred->index] = true;
if (EDGE_COUNT (pred->preds) > 0)
/* Since the predecessor node has been visited for the first
@@ -852,7 +838,7 @@ inverted_post_order_compute (int *post_order)
since EXIT_BLOCK is always added after the outer do-while loop. */
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
- if (!bitmap_bit_p (visited, bb->index))
+ if (!visited[bb->index])
{
has_unvisited_bb = true;
@@ -865,7 +851,7 @@ inverted_post_order_compute (int *post_order)
/* Find an already visited predecessor. */
FOR_EACH_EDGE (e, ei, bb->preds)
{
- if (bitmap_bit_p (visited, e->src->index))
+ if (visited[e->src->index])
visited_pred = e->src;
}
@@ -873,7 +859,7 @@ inverted_post_order_compute (int *post_order)
{
basic_block be = dfs_find_deadend (bb);
gcc_assert (be != NULL);
- bitmap_set_bit (visited, be->index);
+ visited[be->index] = true;
stack[sp++] = ei_start (be->preds);
break;
}
@@ -886,7 +872,7 @@ inverted_post_order_compute (int *post_order)
Find a dead-end from the ENTRY, and restart the iteration. */
basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gcc_assert (be != NULL);
- bitmap_set_bit (visited, be->index);
+ visited[be->index] = true;
stack[sp++] = ei_start (be->preds);
}
@@ -899,7 +885,6 @@ inverted_post_order_compute (int *post_order)
post_order[post_order_num++] = EXIT_BLOCK;
free (stack);
- sbitmap_free (visited);
return post_order_num;
}
@@ -924,7 +909,6 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
int sp;
int pre_order_num = 0;
int rev_post_order_num = n_basic_blocks_for_fn (cfun) - 1;
- sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
@@ -942,10 +926,8 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
rev_post_order_num -= NUM_FIXED_BLOCKS;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
-
- /* None of the nodes in the CFG have been visited yet. */
- bitmap_clear (visited);
+stack_bitvec visited (last_basic_block_for_fn (cfun));
+ stack_bitvec visited2 (last_basic_block_for_fn (cfun));
/* Push the first edge on to the stack. */
stack[sp++] = ei_start (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs);
@@ -962,11 +944,13 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
dest = ei_edge (ei)->dest;
/* Check if the edge destination has been visited yet. */
+ gcc_assert (visited[dest->index] == visited2[dest->index]);
if (dest != EXIT_BLOCK_PTR_FOR_FN (fn)
- && ! bitmap_bit_p (visited, dest->index))
+ && ! visited[dest->index])
{
/* Mark that we have visited the destination. */
- bitmap_set_bit (visited, dest->index);
+ visited[dest->index] = true;
+ visited2[dest->index] = true;
if (pre_order)
pre_order[pre_order_num] = dest->index;
@@ -999,7 +983,6 @@ pre_and_rev_post_order_compute_fn (struct function *fn,
}
free (stack);
- sbitmap_free (visited);
if (include_entry_exit)
{
@@ -1520,44 +1503,35 @@ single_pred_before_succ_order (void)
basic_block *order = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
unsigned n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
unsigned np, i;
- sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
-#define MARK_VISITED(BB) (bitmap_set_bit (visited, (BB)->index))
-#define VISITED_P(BB) (bitmap_bit_p (visited, (BB)->index))
-
- bitmap_clear (visited);
-
- MARK_VISITED (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ visited[ENTRY_BLOCK_PTR_FOR_FN (cfun)->index] = true;
FOR_EACH_BB_FN (x, cfun)
{
- if (VISITED_P (x))
+ if (visited[x->index])
continue;
/* Walk the predecessors of x as long as they have precisely one
predecessor and add them to the list, so that they get stored
after x. */
for (y = x, np = 1;
- single_pred_p (y) && !VISITED_P (single_pred (y));
+ single_pred_p (y) && !visited[single_pred (y)->index];
y = single_pred (y))
np++;
for (y = x, i = n - np;
- single_pred_p (y) && !VISITED_P (single_pred (y));
+ single_pred_p (y) && !visited[single_pred (y)->index];
y = single_pred (y), i++)
{
order[i] = y;
- MARK_VISITED (y);
+ visited[y->index] = true;
}
order[i] = y;
- MARK_VISITED (y);
+ visited[y->index] = true;
gcc_assert (i == n - 1);
n -= np;
}
- sbitmap_free (visited);
gcc_assert (n == 0);
return order;
-
-#undef MARK_VISITED
-#undef VISITED_P
}
diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c
index 7cbed500632..472c8fd8708 100644
--- a/gcc/cfgbuild.c
+++ b/gcc/cfgbuild.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -614,13 +615,12 @@ compute_outgoing_frequencies (basic_block b)
and create edges. */
void
-find_many_sub_basic_blocks (sbitmap blocks)
+find_many_sub_basic_blocks (const bitvec &blocks)
{
basic_block bb, min, max;
FOR_EACH_BB_FN (bb, cfun)
- SET_STATE (bb,
- bitmap_bit_p (blocks, bb->index) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
+ SET_STATE (bb, blocks[bb->index] ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
FOR_EACH_BB_FN (bb, cfun)
if (STATE (bb) == BLOCK_TO_SPLIT)
diff --git a/gcc/cfgbuild.h b/gcc/cfgbuild.h
index a2816db8bb5..685e0a12072 100644
--- a/gcc/cfgbuild.h
+++ b/gcc/cfgbuild.h
@@ -20,9 +20,11 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_CFGBUILD_H
#define GCC_CFGBUILD_H
+class bitvec;
+
extern bool inside_basic_block_p (const rtx_insn *);
extern bool control_flow_insn_p (const rtx_insn *);
extern void rtl_make_eh_edge (sbitmap, basic_block, rtx);
-extern void find_many_sub_basic_blocks (sbitmap);
+extern void find_many_sub_basic_blocks (const bitvec &);
#endif /* GCC_CFGBUILD_H */
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 97e7a2583ea..d28e5727e33 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "rtl.h"
#include "hard-reg-set.h"
@@ -5922,7 +5923,6 @@ unsigned int
pass_expand::execute (function *fun)
{
basic_block bb, init_block;
- sbitmap blocks;
edge_iterator ei;
edge e;
rtx_insn *var_seq, *var_ret_seq;
@@ -6229,10 +6229,9 @@ pass_expand::execute (function *fun)
}
}
- blocks = sbitmap_alloc (last_basic_block_for_fn (fun));
- bitmap_ones (blocks);
+ stack_bitvec blocks (last_basic_block_for_fn (fun));
+ blocks.set ();
find_many_sub_basic_blocks (blocks);
- sbitmap_free (blocks);
purge_all_dead_edges ();
expand_stack_alignment ();
diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c
index abeab8cf1a5..8ae35158848 100644
--- a/gcc/cfghooks.c
+++ b/gcc/cfghooks.c
@@ -1220,7 +1220,7 @@ lv_flush_pending_stmts (edge e)
bool
cfg_hook_duplicate_loop_to_header_edge (struct loop *loop, edge e,
unsigned int ndupl,
- sbitmap wont_exit, edge orig,
+ const bitvec &wont_exit, edge orig,
vec<edge> *to_remove,
int flags)
{
diff --git a/gcc/cfghooks.h b/gcc/cfghooks.h
index 4a1340e392c..6d7dde8f392 100644
--- a/gcc/cfghooks.h
+++ b/gcc/cfghooks.h
@@ -21,6 +21,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_CFGHOOKS_H
#define GCC_CFGHOOKS_H
+class bitvec;
+
/* Only basic-block.h includes this. */
/* Structure to gather statistic about profile consistency, per pass.
@@ -153,7 +155,7 @@ struct cfg_hooks
/* A hook for duplicating loop in CFG, currently this is used
in loop versioning. */
bool (*cfg_hook_duplicate_loop_to_header_edge) (struct loop *, edge,
- unsigned, sbitmap,
+ unsigned, const bitvec &,
edge, vec<edge> *,
int);
@@ -223,7 +225,7 @@ extern void execute_on_growing_pred (edge);
extern void execute_on_shrinking_pred (edge);
extern bool cfg_hook_duplicate_loop_to_header_edge (struct loop *loop, edge,
unsigned int ndupl,
- sbitmap wont_exit,
+ const bitvec &wont_exit,
edge orig,
vec<edge> *to_remove,
int flags);
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index ce56b69192a..ce3a7043dbe 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
+#include "bitvec.h"
#include "hashtab.h"
#include "hash-set.h"
#include "vec.h"
@@ -1325,7 +1326,6 @@ DEBUG_FUNCTION void
verify_loop_structure (void)
{
unsigned *sizes, i, j;
- sbitmap irreds;
basic_block bb, *bbs;
struct loop *loop;
int err = 0;
@@ -1333,7 +1333,6 @@ verify_loop_structure (void)
unsigned num = number_of_loops (cfun);
struct loop_exit *exit, *mexit;
bool dom_available = dom_info_available_p (CDI_DOMINATORS);
- sbitmap visited;
if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
{
@@ -1369,8 +1368,7 @@ verify_loop_structure (void)
}
/* Check the recorded loop father and sizes of loops. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (visited);
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
@@ -1403,9 +1401,9 @@ verify_loop_structure (void)
}
/* Ignore this block if it is in an inner loop. */
- if (bitmap_bit_p (visited, bb->index))
+ if (visited[bb->index])
continue;
- bitmap_set_bit (visited, bb->index);
+ visited[bb->index] = true;
if (bb->loop_father != loop)
{
@@ -1416,7 +1414,6 @@ verify_loop_structure (void)
}
}
free (bbs);
- sbitmap_free (visited);
/* Check headers and latches. */
FOR_EACH_LOOP (loop, 0)
@@ -1483,14 +1480,14 @@ verify_loop_structure (void)
if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
{
/* Record old info. */
- irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ stack_bitvec irreds (last_basic_block_for_fn (cfun));
FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
if (bb->flags & BB_IRREDUCIBLE_LOOP)
- bitmap_set_bit (irreds, bb->index);
+ irreds[bb->index] = true;
else
- bitmap_clear_bit (irreds, bb->index);
+ irreds[bb->index] = false;
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->flags & EDGE_IRREDUCIBLE_LOOP)
e->flags |= EDGE_ALL_FLAGS + 1;
@@ -1505,13 +1502,13 @@ verify_loop_structure (void)
edge_iterator ei;
if ((bb->flags & BB_IRREDUCIBLE_LOOP)
- && !bitmap_bit_p (irreds, bb->index))
+ && !irreds[bb->index])
{
error ("basic block %d should be marked irreducible", bb->index);
err = 1;
}
else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
- && bitmap_bit_p (irreds, bb->index))
+ && irreds[bb->index])
{
error ("basic block %d should not be marked irreducible", bb->index);
err = 1;
@@ -1535,7 +1532,6 @@ verify_loop_structure (void)
e->flags &= ~(EDGE_ALL_FLAGS + 1);
}
}
- free (irreds);
}
/* Check the recorded loop exits. */
diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c
index 45cc85da863..7959922916b 100644
--- a/gcc/cfgloopmanip.c
+++ b/gcc/cfgloopmanip.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "rtl.h"
#include "predict.h"
#include "vec.h"
+#include "bitvec.h"
#include "hashtab.h"
#include "hash-set.h"
#include "symtab.h"
@@ -198,7 +199,6 @@ fix_bb_placements (basic_block from,
bool *irred_invalidated,
bitmap loop_closed_ssa_invalidated)
{
- sbitmap in_queue;
basic_block *queue, *qtop, *qbeg, *qend;
struct loop *base_loop, *target_loop;
edge e;
@@ -218,11 +218,10 @@ fix_bb_placements (basic_block from,
|| from == base_loop->header)
return;
- in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (in_queue);
- bitmap_set_bit (in_queue, from->index);
+ stack_bitvec in_queue (last_basic_block_for_fn (cfun));
+ in_queue[from->index] = true;
/* Prevent us from going out of the base_loop. */
- bitmap_set_bit (in_queue, base_loop->header->index);
+ in_queue[base_loop->header->index] = true;
queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
qtop = queue + base_loop->num_nodes + 1;
@@ -237,7 +236,7 @@ fix_bb_placements (basic_block from,
qbeg++;
if (qbeg == qtop)
qbeg = queue;
- bitmap_clear_bit (in_queue, from->index);
+ in_queue[from->index] = false;
if (from->loop_father->header == from)
{
@@ -278,7 +277,7 @@ fix_bb_placements (basic_block from,
if (e->flags & EDGE_IRREDUCIBLE_LOOP)
*irred_invalidated = true;
- if (bitmap_bit_p (in_queue, pred->index))
+ if (in_queue[pred->index])
continue;
/* If it is subloop, then it either was not moved, or
@@ -298,7 +297,7 @@ fix_bb_placements (basic_block from,
continue;
}
- if (bitmap_bit_p (in_queue, pred->index))
+ if (in_queue[pred->index])
continue;
/* Schedule the basic block. */
@@ -306,10 +305,9 @@ fix_bb_placements (basic_block from,
qend++;
if (qend == qtop)
qend = queue;
- bitmap_set_bit (in_queue, pred->index);
+ in_queue[pred->index] = true;
}
}
- free (in_queue);
free (queue);
}
@@ -323,7 +321,6 @@ remove_path (edge e)
basic_block *rem_bbs, *bord_bbs, from, bb;
vec<basic_block> dom_bbs;
int i, nrem, n_bord_bbs;
- sbitmap seen;
bool irred_invalidated = false;
edge_iterator ei;
struct loop *l, *f;
@@ -362,16 +359,15 @@ remove_path (edge e)
n_bord_bbs = 0;
bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
- seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (seen);
+ stack_bitvec seen (last_basic_block_for_fn (cfun));
/* Find "border" hexes -- i.e. those with predecessor in removed path. */
for (i = 0; i < nrem; i++)
- bitmap_set_bit (seen, rem_bbs[i]->index);
+ seen[rem_bbs[i]->index] = true;
if (!irred_invalidated)
FOR_EACH_EDGE (ae, ei, e->src->succs)
if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
- && !bitmap_bit_p (seen, ae->dest->index)
+ && !seen[ae->dest->index]
&& ae->flags & EDGE_IRREDUCIBLE_LOOP)
{
irred_invalidated = true;
@@ -383,9 +379,9 @@ remove_path (edge e)
bb = rem_bbs[i];
FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
- && !bitmap_bit_p (seen, ae->dest->index))
+ && !seen[ae->dest->index])
{
- bitmap_set_bit (seen, ae->dest->index);
+ seen[ae->dest->index] = true;
bord_bbs[n_bord_bbs++] = ae->dest;
if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
@@ -407,15 +403,15 @@ remove_path (edge e)
free (rem_bbs);
/* Find blocks whose dominators may be affected. */
- bitmap_clear (seen);
+ seen.clear ();
for (i = 0; i < n_bord_bbs; i++)
{
basic_block ldom;
bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
- if (bitmap_bit_p (seen, bb->index))
+ if (seen[bb->index])
continue;
- bitmap_set_bit (seen, bb->index);
+ seen[bb->index] = true;
for (ldom = first_dom_son (CDI_DOMINATORS, bb);
ldom;
@@ -424,7 +420,6 @@ remove_path (edge e)
dom_bbs.safe_push (ldom);
}
- free (seen);
/* Recount dominators. */
iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
@@ -633,16 +628,14 @@ static void
update_dominators_in_loop (struct loop *loop)
{
vec<basic_block> dom_bbs = vNULL;
- sbitmap seen;
basic_block *body;
unsigned i;
- seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (seen);
+ stack_bitvec seen (last_basic_block_for_fn (cfun));
body = get_loop_body (loop);
for (i = 0; i < loop->num_nodes; i++)
- bitmap_set_bit (seen, body[i]->index);
+ seen[body[i]->index] = true;
for (i = 0; i < loop->num_nodes; i++)
{
@@ -651,16 +644,15 @@ update_dominators_in_loop (struct loop *loop)
for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
ldom;
ldom = next_dom_son (CDI_DOMINATORS, ldom))
- if (!bitmap_bit_p (seen, ldom->index))
+ if (!seen[ldom->index])
{
- bitmap_set_bit (seen, ldom->index);
+ seen[ldom->index] = true;
dom_bbs.safe_push (ldom);
}
}
iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
free (body);
- free (seen);
dom_bbs.release ();
}
@@ -1160,7 +1152,7 @@ set_zero_probability (edge e)
bool
duplicate_loop_to_header_edge (struct loop *loop, edge e,
- unsigned int ndupl, sbitmap wont_exit,
+ unsigned int ndupl, const bitvec &wont_exit,
edge orig, vec<edge> *to_remove,
int flags)
{
@@ -1253,7 +1245,7 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e,
scale_step = XNEWVEC (int, ndupl);
for (i = 1; i <= ndupl; i++)
- scale_step[i - 1] = bitmap_bit_p (wont_exit, i)
+ scale_step[i - 1] = wont_exit[i]
? prob_pass_wont_exit
: prob_pass_thru;
@@ -1280,7 +1272,7 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e,
}
else if (is_latch)
{
- prob_pass_main = bitmap_bit_p (wont_exit, 0)
+ prob_pass_main = wont_exit[0]
? prob_pass_wont_exit
: prob_pass_thru;
p = prob_pass_main;
@@ -1389,7 +1381,7 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e,
}
/* Record exit edge in this copy. */
- if (orig && bitmap_bit_p (wont_exit, j + 1))
+ if (orig && wont_exit[j + 1])
{
if (to_remove)
to_remove->safe_push (new_spec_edges[SE_ORIG]);
@@ -1425,7 +1417,7 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e,
free (orig_loops);
/* Record the exit edge in the original loop body, and update the frequencies. */
- if (orig && bitmap_bit_p (wont_exit, 0))
+ if (orig && wont_exit[0])
{
if (to_remove)
to_remove->safe_push (orig);
@@ -1726,8 +1718,9 @@ loop_version (struct loop *loop,
first_head = entry->dest;
/* Duplicate loop. */
+ stack_bitvec dummy;
if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, 1,
- NULL, NULL, NULL, 0))
+ dummy, NULL, NULL, 0))
{
entry->flags |= irred_flag;
return NULL;
diff --git a/gcc/cfgloopmanip.h b/gcc/cfgloopmanip.h
index 56a02669510..ba9b0130bcc 100644
--- a/gcc/cfgloopmanip.h
+++ b/gcc/cfgloopmanip.h
@@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_CFGLOOPMANIP_H
#define GCC_CFGLOOPMANIP_H
+class bitvec;
+
enum
{
CP_SIMPLE_PREHEADERS = 1,
@@ -51,7 +53,7 @@ extern struct loop * duplicate_loop (struct loop *, struct loop *);
extern void duplicate_subloops (struct loop *, struct loop *);
extern bool can_duplicate_loop_p (const struct loop *loop);
extern bool duplicate_loop_to_header_edge (struct loop *, edge,
- unsigned, sbitmap, edge,
+ unsigned, const bitvec &, edge,
vec<edge> *, int);
extern bool mfb_keep_just (edge);
basic_block create_preheader (struct loop *, int);
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index 0e27eddd5a9..5aeb8823628 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -41,6 +41,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -4312,18 +4313,16 @@ cfg_layout_initialize (unsigned int flags)
void
break_superblocks (void)
{
- sbitmap superblocks;
bool need = false;
basic_block bb;
- superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (superblocks);
+ stack_bitvec superblocks (last_basic_block_for_fn (cfun));
FOR_EACH_BB_FN (bb, cfun)
if (bb->flags & BB_SUPERBLOCK)
{
bb->flags &= ~BB_SUPERBLOCK;
- bitmap_set_bit (superblocks, bb->index);
+ superblocks[bb->index] = true;
need = true;
}
@@ -4332,8 +4331,6 @@ break_superblocks (void)
rebuild_jump_labels (get_insns ());
find_many_sub_basic_blocks (superblocks);
}
-
- free (superblocks);
}
/* Finalize the changes: reorder insn list according to the sequence specified
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index e99cea37d05..039cc89111d 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -2127,7 +2127,7 @@ pad_bb(void)
static void
spu_emit_branch_hint (rtx_insn *before, rtx_insn *branch, rtx target,
- int distance, sbitmap blocks)
+ int distance, bitvec *blocks)
{
rtx branch_label = 0;
rtx_insn *hint;
@@ -2151,7 +2151,7 @@ spu_emit_branch_hint (rtx_insn *before, rtx_insn *branch, rtx target,
LABEL_PRESERVE_P (branch_label) = 1;
insn = emit_label_before (branch_label, branch);
branch_label = gen_rtx_LABEL_REF (VOIDmode, branch_label);
- bitmap_set_bit (blocks, BLOCK_FOR_INSN (branch)->index);
+ (*blocks)[BLOCK_FOR_INSN (branch)->index] = true;
hint = emit_insn_before (gen_hbr (branch_label, target), before);
recog_memoized (hint);
@@ -2477,7 +2477,6 @@ spu_var_tracking (void)
static void
spu_machine_dependent_reorg (void)
{
- sbitmap blocks;
basic_block bb;
rtx_insn *branch, *insn;
rtx branch_target = 0;
@@ -2497,8 +2496,7 @@ spu_machine_dependent_reorg (void)
return;
}
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (blocks);
+ stack_bitvec blocks (last_basic_block_for_fn (cfun));
in_spu_reorg = 1;
compute_bb_for_insn ();
@@ -2562,7 +2560,7 @@ spu_machine_dependent_reorg (void)
INSN_UID (branch), bb->index,
INSN_UID (next));
spu_emit_branch_hint (next, branch, branch_target,
- branch_addr - next_addr, blocks);
+ branch_addr - next_addr, &blocks);
}
branch = 0;
}
@@ -2662,14 +2660,14 @@ spu_machine_dependent_reorg (void)
INSN_UID (branch), bb->index,
INSN_UID (NEXT_INSN (insn)));
spu_emit_branch_hint (NEXT_INSN (insn), branch, branch_target,
- branch_addr - next_addr, blocks);
+ branch_addr - next_addr, &blocks);
}
branch = 0;
}
}
free (spu_bb_info);
- if (!bitmap_empty_p (blocks))
+ if (blocks.any_set_bits ())
find_many_sub_basic_blocks (blocks);
/* We have to schedule to make sure alignment is ok. */
diff --git a/gcc/cp/vtable-class-hierarchy.c b/gcc/cp/vtable-class-hierarchy.c
index a138ee4e43b..6cd1f59570a 100644
--- a/gcc/cp/vtable-class-hierarchy.c
+++ b/gcc/cp/vtable-class-hierarchy.c
@@ -113,6 +113,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "cp-tree.h"
#include "output.h"
#include "hash-map.h"
@@ -333,11 +334,11 @@ init_functions (void)
static void
add_to_worklist (struct work_node **worklist, struct vtv_graph_node *node,
- sbitmap inserted)
+ bitvec *inserted)
{
struct work_node *new_work_node;
- if (bitmap_bit_p (inserted, node->class_uid))
+ if ((*inserted)[node->class_uid])
return;
new_work_node = XNEW (struct work_node);
@@ -345,7 +346,7 @@ add_to_worklist (struct work_node **worklist, struct vtv_graph_node *node,
new_work_node->node = node;
*worklist = new_work_node;
- bitmap_set_bit (inserted, node->class_uid);
+ (*inserted)[node->class_uid] = true;
}
/* This is a helper function for
@@ -392,7 +393,6 @@ void
vtv_compute_class_hierarchy_transitive_closure (void)
{
struct work_node *worklist = NULL;
- sbitmap inserted = sbitmap_alloc (num_vtable_map_nodes);
unsigned i;
unsigned j;
@@ -405,14 +405,14 @@ vtv_compute_class_hierarchy_transitive_closure (void)
/* Set-up: */
/* Find all the "leaf" nodes in the graph, and add them to the worklist. */
- bitmap_clear (inserted);
+ stack_bitvec inserted (num_vtable_map_nodes);
for (j = 0; j < num_vtable_map_nodes; ++j)
{
struct vtbl_map_node *cur = vtbl_map_nodes_vec[j];
if (cur->class_info
&& ((cur->class_info->children).length() == 0)
- && ! (bitmap_bit_p (inserted, cur->class_info->class_uid)))
- add_to_worklist (&worklist, cur->class_info, inserted);
+ && ! (inserted[cur->class_info->class_uid]))
+ add_to_worklist (&worklist, cur->class_info, &inserted);
}
/* Main work: pull next leaf node off work list, process it, add its
@@ -434,8 +434,8 @@ vtv_compute_class_hierarchy_transitive_closure (void)
{
temp_node->parents[i]->num_processed_children =
temp_node->parents[i]->num_processed_children + 1;
- if (!bitmap_bit_p (inserted, temp_node->parents[i]->class_uid))
- add_to_worklist (&worklist, temp_node->parents[i], inserted);
+ if (!inserted[temp_node->parents[i]->class_uid])
+ add_to_worklist (&worklist, temp_node->parents[i], &inserted);
}
}
}
diff --git a/gcc/cse.c b/gcc/cse.c
index 2a33827a61c..7519372b310 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
@@ -565,7 +566,7 @@ static bitmap cse_ebb_live_in, cse_ebb_live_out;
/* A simple bitmap to track which basic blocks have been visited
already as part of an already processed extended basic block. */
-static sbitmap cse_visited_basic_blocks;
+static bitvec cse_visited_basic_blocks;
static bool fixed_base_plus_p (rtx x);
static int notreg_cost (rtx, enum rtx_code, int);
@@ -6242,7 +6243,7 @@ cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
edge e;
int path_size;
- bitmap_set_bit (cse_visited_basic_blocks, first_bb->index);
+ cse_visited_basic_blocks[first_bb->index] = true;
/* See if there is a previous path. */
path_size = data->path_size;
@@ -6299,9 +6300,9 @@ cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
We still want to visit each basic block only once, so
halt the path here if we have already visited BB. */
- && !bitmap_bit_p (cse_visited_basic_blocks, bb->index))
+ && !cse_visited_basic_blocks[bb->index])
{
- bitmap_set_bit (cse_visited_basic_blocks, bb->index);
+ cse_visited_basic_blocks[bb->index] = true;
data->path[path_size++].bb = bb;
break;
}
@@ -6344,10 +6345,10 @@ cse_find_path (basic_block first_bb, struct cse_basic_block_data *data,
&& single_pred_p (e->dest)
/* Avoid visiting basic blocks twice. The large comment
above explains why this can happen. */
- && !bitmap_bit_p (cse_visited_basic_blocks, e->dest->index))
+ && !cse_visited_basic_blocks[e->dest->index])
{
basic_block bb2 = e->dest;
- bitmap_set_bit (cse_visited_basic_blocks, bb2->index);
+ cse_visited_basic_blocks[bb2->index] = true;
data->path[path_size++].bb = bb2;
bb = bb2;
}
@@ -6582,8 +6583,8 @@ cse_extended_basic_block (struct cse_basic_block_data *ebb_data)
/* If we truncate the path, we must also reset the
visited bit on the remaining blocks in the path,
or we will never visit them at all. */
- bitmap_clear_bit (cse_visited_basic_blocks,
- ebb_data->path[path_size].bb->index);
+ cse_visited_basic_blocks[ebb_data->path[path_size].bb->index]
+ = false;
ebb_data->path[path_size].bb = NULL;
}
while (path_size - 1 != path_entry);
@@ -6660,8 +6661,7 @@ cse_main (rtx_insn *f ATTRIBUTE_UNUSED, int nregs)
reg_eqv_table = XNEWVEC (struct reg_eqv_elem, nregs);
/* Set up the table of already visited basic blocks. */
- cse_visited_basic_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (cse_visited_basic_blocks);
+ new (&cse_visited_basic_blocks) bitvec (last_basic_block_for_fn (cfun));
/* Loop over basic blocks in reverse completion order (RPO),
excluding the ENTRY and EXIT blocks. */
@@ -6675,7 +6675,7 @@ cse_main (rtx_insn *f ATTRIBUTE_UNUSED, int nregs)
{
bb = BASIC_BLOCK_FOR_FN (cfun, rc_order[i++]);
}
- while (bitmap_bit_p (cse_visited_basic_blocks, bb->index)
+ while (cse_visited_basic_blocks[bb->index]
&& i < n_blocks);
/* Find all paths starting with BB, and process them. */
@@ -6705,7 +6705,7 @@ cse_main (rtx_insn *f ATTRIBUTE_UNUSED, int nregs)
end_alias_analysis ();
free (reg_eqv_table);
free (ebb_data.path);
- sbitmap_free (cse_visited_basic_blocks);
+ cse_visited_basic_blocks.clear_and_release ();
free (rc_order);
rtl_hooks = general_rtl_hooks;
diff --git a/gcc/dce.c b/gcc/dce.c
index 99f62715562..bcc31e8a8cb 100644
--- a/gcc/dce.c
+++ b/gcc/dce.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "hashtab.h"
#include "tm.h"
#include "rtl.h"
@@ -70,7 +71,7 @@ static bool can_alter_cfg = false;
static vec<rtx_insn *> worklist;
/* Bitmap of instructions marked as needed indexed by INSN_UID. */
-static sbitmap marked;
+static bitvec marked;
/* Bitmap obstacks used for block processing by the fast algorithm. */
static bitmap_obstack dce_blocks_bitmap_obstack;
@@ -190,7 +191,7 @@ marked_insn_p (rtx_insn *insn)
/* Artificial defs are always needed and they do not have an insn.
We should never see them here. */
gcc_assert (insn);
- return bitmap_bit_p (marked, INSN_UID (insn));
+ return marked[INSN_UID (insn)];
}
@@ -204,7 +205,7 @@ mark_insn (rtx_insn *insn, bool fast)
{
if (!fast)
worklist.safe_push (insn);
- bitmap_set_bit (marked, INSN_UID (insn));
+ marked[INSN_UID (insn)] = true;
if (dump_file)
fprintf (dump_file, " Adding insn %d to worklist\n", INSN_UID (insn));
if (CALL_P (insn)
@@ -746,8 +747,7 @@ init_dce (bool fast)
else
can_alter_cfg = true;
- marked = sbitmap_alloc (get_max_uid () + 1);
- bitmap_clear (marked);
+ marked.grow (get_max_uid () + 1);
}
@@ -756,7 +756,7 @@ init_dce (bool fast)
static void
fini_dce (bool fast)
{
- sbitmap_free (marked);
+ marked.clear_and_release ();
if (fast)
{
@@ -1126,7 +1126,7 @@ fast_dce (bool word_level)
/* So something was deleted that requires a redo. Do it on
the cheap. */
delete_unmarked_insns ();
- bitmap_clear (marked);
+ marked.clear ();
bitmap_clear (processed);
bitmap_clear (redo_out);
diff --git a/gcc/df-core.c b/gcc/df-core.c
index 82f136436bb..d933803014a 100644
--- a/gcc/df-core.c
+++ b/gcc/df-core.c
@@ -377,6 +377,7 @@ are write-only operations.
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
@@ -397,7 +398,6 @@ are write-only operations.
#include "cfg.h"
#include "cfganal.h"
#include "basic-block.h"
-#include "sbitmap.h"
#include "bitmap.h"
#include "df.h"
#include "tree-pass.h"
@@ -927,7 +927,7 @@ df_worklist_propagate_forward (struct dataflow *dataflow,
unsigned bb_index,
unsigned *bbindex_to_postorder,
bitmap pending,
- sbitmap considered,
+ const bitvec &considered,
ptrdiff_t age)
{
edge e;
@@ -940,7 +940,7 @@ df_worklist_propagate_forward (struct dataflow *dataflow,
FOR_EACH_EDGE (e, ei, bb->preds)
{
if (age <= BB_LAST_CHANGE_AGE (e->src)
- && bitmap_bit_p (considered, e->src->index))
+ && considered[e->src->index])
changed |= dataflow->problem->con_fun_n (e);
}
else if (dataflow->problem->con_fun_0)
@@ -955,7 +955,7 @@ df_worklist_propagate_forward (struct dataflow *dataflow,
{
unsigned ob_index = e->dest->index;
- if (bitmap_bit_p (considered, ob_index))
+ if (considered[ob_index])
bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
}
return true;
@@ -972,7 +972,7 @@ df_worklist_propagate_backward (struct dataflow *dataflow,
unsigned bb_index,
unsigned *bbindex_to_postorder,
bitmap pending,
- sbitmap considered,
+ const bitvec &considered,
ptrdiff_t age)
{
edge e;
@@ -985,7 +985,7 @@ df_worklist_propagate_backward (struct dataflow *dataflow,
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (age <= BB_LAST_CHANGE_AGE (e->dest)
- && bitmap_bit_p (considered, e->dest->index))
+ && considered[e->dest->index])
changed |= dataflow->problem->con_fun_n (e);
}
else if (dataflow->problem->con_fun_0)
@@ -1000,7 +1000,7 @@ df_worklist_propagate_backward (struct dataflow *dataflow,
{
unsigned ob_index = e->src->index;
- if (bitmap_bit_p (considered, ob_index))
+ if (considered[ob_index])
bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
}
return true;
@@ -1030,7 +1030,7 @@ df_worklist_propagate_backward (struct dataflow *dataflow,
static void
df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
bitmap pending,
- sbitmap considered,
+ const bitvec &considered,
int *blocks_in_postorder,
unsigned *bbindex_to_postorder,
int n_blocks)
@@ -1100,7 +1100,7 @@ df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
}
-/* Worklist-based dataflow solver. It uses sbitmap as a worklist,
+/* Worklist-based dataflow solver. It uses bitvec as a worklist,
with "n"-th bit representing the n-th block in the reverse-postorder order.
The solver is a double-queue algorithm similar to the "double stack" solver
from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
@@ -1114,7 +1114,6 @@ df_worklist_dataflow (struct dataflow *dataflow,
int n_blocks)
{
bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
- sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_iterator bi;
unsigned int *bbindex_to_postorder;
int i;
@@ -1131,11 +1130,10 @@ df_worklist_dataflow (struct dataflow *dataflow,
for (i = 0; i < last_basic_block_for_fn (cfun); i++)
bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
- /* Initialize the considered map. */
- bitmap_clear (considered);
+ stack_bitvec considered (last_basic_block_for_fn (cfun));
EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
{
- bitmap_set_bit (considered, index);
+ considered[index] = true;
}
/* Initialize the mapping of block index to postorder. */
@@ -1155,7 +1153,6 @@ df_worklist_dataflow (struct dataflow *dataflow,
blocks_in_postorder,
bbindex_to_postorder,
n_blocks);
- sbitmap_free (considered);
free (bbindex_to_postorder);
}
diff --git a/gcc/dse.c b/gcc/dse.c
index 2bb20d74625..fb1fb70b4a9 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "hash-table.h"
#include "tm.h"
#include "rtl.h"
@@ -3297,14 +3298,14 @@ dse_step3_exit_block_scan (bb_info_t bb_info)
have their bits set in UNREACHABLE_BLOCKS. */
static void
-mark_reachable_blocks (sbitmap unreachable_blocks, basic_block bb)
+mark_reachable_blocks (bitvec *unreachable_blocks, basic_block bb)
{
edge e;
edge_iterator ei;
- if (bitmap_bit_p (unreachable_blocks, bb->index))
+ if ((*unreachable_blocks)[bb->index])
{
- bitmap_clear_bit (unreachable_blocks, bb->index);
+ (*unreachable_blocks)[bb->index] = false;
FOR_EACH_EDGE (e, ei, bb->preds)
{
mark_reachable_blocks (unreachable_blocks, e->src);
@@ -3318,12 +3319,10 @@ static void
dse_step3 (bool for_spills)
{
basic_block bb;
- sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- sbitmap_iterator sbi;
+ stack_bitvec unreachable_blocks (last_basic_block_for_fn (cfun));
bitmap all_ones = NULL;
- unsigned int i;
- bitmap_ones (unreachable_blocks);
+ unreachable_blocks.set ();
FOR_ALL_BB_FN (bb, cfun)
{
@@ -3340,7 +3339,7 @@ dse_step3 (bool for_spills)
else
dse_step3_scan (for_spills, bb);
if (EDGE_COUNT (bb->succs) == 0)
- mark_reachable_blocks (unreachable_blocks, bb);
+ mark_reachable_blocks (&unreachable_blocks, bb);
/* If this is the second time dataflow is run, delete the old
sets. */
@@ -3353,9 +3352,8 @@ dse_step3 (bool for_spills)
/* For any block in an infinite loop, we must initialize the out set
to all ones. This could be expensive, but almost never occurs in
practice. However, it is common in regression tests. */
- EXECUTE_IF_SET_IN_BITMAP (unreachable_blocks, 0, i, sbi)
- {
- if (bitmap_bit_p (all_blocks, i))
+ for (size_t i = 0; i < unreachable_blocks.end (); i++)
+ if (unreachable_blocks[i] && bitmap_bit_p (all_blocks, i))
{
bb_info_t bb_info = bb_table[i];
if (!all_ones)
@@ -3373,11 +3371,9 @@ dse_step3 (bool for_spills)
bitmap_copy (bb_info->out, all_ones);
}
}
- }
if (all_ones)
BITMAP_FREE (all_ones);
- sbitmap_free (unreachable_blocks);
}
diff --git a/gcc/except.c b/gcc/except.c
index 833ec21f329..5e80e39a44e 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -112,6 +112,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "rtl.h"
#include "hash-set.h"
@@ -679,30 +680,26 @@ eh_region
eh_region_outermost (struct function *ifun, eh_region region_a,
eh_region region_b)
{
- sbitmap b_outer;
-
gcc_assert (ifun->eh->region_array);
gcc_assert (ifun->eh->region_tree);
- b_outer = sbitmap_alloc (ifun->eh->region_array->length ());
- bitmap_clear (b_outer);
+ stack_bitvec b_outer (ifun->eh->region_array->length ());
do
{
- bitmap_set_bit (b_outer, region_b->index);
+ b_outer[region_b->index] = true;
region_b = region_b->outer;
}
while (region_b);
do
{
- if (bitmap_bit_p (b_outer, region_a->index))
+ if (b_outer[region_a->index])
break;
region_a = region_a->outer;
}
while (region_a);
- sbitmap_free (b_outer);
return region_a;
}
@@ -1630,13 +1627,13 @@ remove_eh_handler (eh_region region)
preserved. */
static void
-remove_unreachable_eh_regions_worker (eh_region *pp, sbitmap r_reachable)
+remove_unreachable_eh_regions_worker (eh_region *pp, const bitvec &r_reachable)
{
while (*pp)
{
eh_region region = *pp;
remove_unreachable_eh_regions_worker (&region->inner, r_reachable);
- if (!bitmap_bit_p (r_reachable, region->index))
+ if (!r_reachable[region->index])
remove_eh_handler_splicer (pp);
else
pp = &region->next_peer;
@@ -1649,7 +1646,7 @@ remove_unreachable_eh_regions_worker (eh_region *pp, sbitmap r_reachable)
searches in the region tree. */
void
-remove_unreachable_eh_regions (sbitmap r_reachable)
+remove_unreachable_eh_regions (const bitvec &r_reachable)
{
remove_unreachable_eh_regions_worker (&cfun->eh->region_tree, r_reachable);
}
diff --git a/gcc/except.h b/gcc/except.h
index eb812033dc8..8a2ddf005d5 100644
--- a/gcc/except.h
+++ b/gcc/except.h
@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3. If not see
#include "hash-map.h"
#include "hashtab.h"
+class bitvec;
struct function;
struct eh_region_d;
@@ -228,7 +229,7 @@ extern void init_eh_for_function (void);
extern void remove_eh_landing_pad (eh_landing_pad);
extern void remove_eh_handler (eh_region);
-extern void remove_unreachable_eh_regions (sbitmap);
+extern void remove_unreachable_eh_regions (const bitvec &);
extern bool current_function_has_exception_handlers (void);
extern void output_function_exception_table (const char *);
diff --git a/gcc/function.c b/gcc/function.c
index 2c3d1426c18..d03056b9bd3 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "rtl-error.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -6109,17 +6110,13 @@ epilogue_done:
if (inserted)
{
- sbitmap blocks;
-
commit_edge_insertions ();
/* Look for basic blocks within the prologue insns. */
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (blocks);
- bitmap_set_bit (blocks, entry_edge->dest->index);
- bitmap_set_bit (blocks, orig_entry_edge->dest->index);
+ stack_bitvec blocks (last_basic_block_for_fn (cfun));
+ blocks[entry_edge->dest->index] = true;
+ blocks[orig_entry_edge->dest->index] = true;
find_many_sub_basic_blocks (blocks);
- sbitmap_free (blocks);
/* The epilogue insns we inserted may cause the exit edge to no longer
be fallthru. */
diff --git a/gcc/graph.c b/gcc/graph.c
index 5fb0d781bc2..f33272e4cdc 100644
--- a/gcc/graph.c
+++ b/gcc/graph.c
@@ -23,7 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "diagnostic-core.h" /* for fatal_error */
-#include "sbitmap.h"
+#include "bitvec.h"
#include "predict.h"
#include "vec.h"
#include "hashtab.h"
@@ -167,10 +167,8 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
{
int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun));
int i, n;
- sbitmap visited;
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (visited);
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, true);
for (i = n_basic_blocks_for_fn (fun) - n;
@@ -178,7 +176,7 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
draw_cfg_node (pp, fun->funcdef_no, bb);
- bitmap_set_bit (visited, bb->index);
+ visited[bb->index] = true;
}
free (rpo);
@@ -187,11 +185,9 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
/* Some blocks are unreachable. We still want to dump them. */
basic_block bb;
FOR_ALL_BB_FN (bb, fun)
- if (! bitmap_bit_p (visited, bb->index))
+ if (! visited[bb->index])
draw_cfg_node (pp, fun->funcdef_no, bb);
}
-
- sbitmap_free (visited);
}
/* Draw all the basic blocks in LOOP. Print the blocks in breath-first
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 23b63ad1f5e..a38f2a1b0b7 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -40,6 +40,7 @@ extern "C" {
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -373,13 +374,13 @@ try_generate_gimple_bb (scop_p scop, basic_block bb)
be handled after BB. */
static bool
-all_non_dominated_preds_marked_p (basic_block bb, sbitmap map)
+all_non_dominated_preds_marked_p (basic_block bb, const bitvec *map)
{
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->preds)
- if (!bitmap_bit_p (map, e->src->index)
+ if (!(*map)[e->src->index]
&& !dominated_by_p (CDI_DOMINATORS, e->src, bb))
return false;
@@ -417,19 +418,19 @@ graphite_sort_dominated_info (vec<basic_block> dom)
/* Recursive helper function for build_scops_bbs. */
static void
-build_scop_bbs_1 (scop_p scop, sbitmap visited, basic_block bb)
+build_scop_bbs_1 (scop_p scop, bitvec *visited, basic_block bb)
{
sese region = SCOP_REGION (scop);
vec<basic_block> dom;
poly_bb_p pbb;
- if (bitmap_bit_p (visited, bb->index)
+ if ((*visited)[bb->index]
|| !bb_in_sese_p (bb, region))
return;
pbb = new_poly_bb (scop, try_generate_gimple_bb (scop, bb));
SCOP_BBS (scop).safe_push (pbb);
- bitmap_set_bit (visited, bb->index);
+ (*visited)[bb->index] = true;
dom = get_dominated_by (CDI_DOMINATORS, bb);
@@ -460,12 +461,10 @@ build_scop_bbs_1 (scop_p scop, sbitmap visited, basic_block bb)
static void
build_scop_bbs (scop_p scop)
{
- sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
sese region = SCOP_REGION (scop);
- bitmap_clear (visited);
- build_scop_bbs_1 (scop, visited, SESE_ENTRY_BB (region));
- sbitmap_free (visited);
+ build_scop_bbs_1 (scop, &visited, SESE_ENTRY_BB (region));
}
/* Return an ISL identifier for the polyhedral basic block PBB. */
diff --git a/gcc/ira-lives.c b/gcc/ira-lives.c
index b29f5722bfc..725b81737f3 100644
--- a/gcc/ira-lives.c
+++ b/gcc/ira-lives.c
@@ -43,6 +43,7 @@ along with GCC; see the file COPYING3. If not see
#include "params.h"
#include "df.h"
#include "sbitmap.h"
+#include "bitvec.h"
#include "sparseset.h"
#include "ira-int.h"
@@ -1418,48 +1419,40 @@ ira_rebuild_start_finish_chains (void)
static void
remove_some_program_points_and_update_live_ranges (void)
{
- unsigned i;
int n;
int *map;
ira_object_t obj;
ira_object_iterator oi;
live_range_t r, prev_r, next_r;
- sbitmap born_or_dead, born, dead;
- sbitmap_iterator sbi;
bool born_p, dead_p, prev_born_p, prev_dead_p;
- born = sbitmap_alloc (ira_max_point);
- dead = sbitmap_alloc (ira_max_point);
- bitmap_clear (born);
- bitmap_clear (dead);
+ stack_bitvec born (ira_max_point), dead (ira_max_point);
FOR_EACH_OBJECT (obj, oi)
for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
{
ira_assert (r->start <= r->finish);
- bitmap_set_bit (born, r->start);
- bitmap_set_bit (dead, r->finish);
+ born[r->start] = true;
+ dead[r->finish] = true;
}
- born_or_dead = sbitmap_alloc (ira_max_point);
- bitmap_ior (born_or_dead, born, dead);
+ stack_bitvec born_or_dead (born | dead);
map = (int *) ira_allocate (sizeof (int) * ira_max_point);
n = -1;
prev_born_p = prev_dead_p = false;
- EXECUTE_IF_SET_IN_BITMAP (born_or_dead, 0, i, sbi)
- {
- born_p = bitmap_bit_p (born, i);
- dead_p = bitmap_bit_p (dead, i);
- if ((prev_born_p && ! prev_dead_p && born_p && ! dead_p)
- || (prev_dead_p && ! prev_born_p && dead_p && ! born_p))
- map[i] = n;
- else
- map[i] = ++n;
- prev_born_p = born_p;
- prev_dead_p = dead_p;
- }
- sbitmap_free (born_or_dead);
- sbitmap_free (born);
- sbitmap_free (dead);
+ for (size_t i = born_or_dead.begin (); i < born_or_dead.end (); i++)
+ if (born_or_dead[i])
+ {
+ born_p = born[i];
+ dead_p = dead[i];
+ if ((prev_born_p && ! prev_dead_p && born_p && ! dead_p)
+ || (prev_dead_p && ! prev_born_p && dead_p && ! born_p))
+ map[i] = n;
+ else
+ map[i] = ++n;
+ prev_born_p = born_p;
+ prev_dead_p = dead_p;
+ }
+
n++;
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
fprintf (ira_dump_file, "Compressing live ranges: from %d to %d - %d%%\n",
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index 2befb61719e..08ff5919d5b 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "rtl.h"
#include "hash-set.h"
@@ -493,7 +494,6 @@ unroll_loop_constant_iterations (struct loop *loop)
{
unsigned HOST_WIDE_INT niter;
unsigned exit_mod;
- sbitmap wont_exit;
unsigned i;
edge e;
unsigned max_unroll = loop->lpt_decision.times;
@@ -509,8 +509,8 @@ unroll_loop_constant_iterations (struct loop *loop)
exit_mod = niter % (max_unroll + 1);
- wont_exit = sbitmap_alloc (max_unroll + 1);
- bitmap_ones (wont_exit);
+ stack_bitvec wont_exit (max_unroll + 1);
+ wont_exit.set ();
auto_vec<edge> remove_edges;
if (flag_split_ivs_in_unroller
@@ -527,9 +527,9 @@ unroll_loop_constant_iterations (struct loop *loop)
fprintf (dump_file, ";; Condition at beginning of loop.\n");
/* Peel exit_mod iterations. */
- bitmap_clear_bit (wont_exit, 0);
+ wont_exit[0] = false;
if (desc->noloop_assumptions)
- bitmap_clear_bit (wont_exit, 1);
+ wont_exit[1] = false;
if (exit_mod)
{
@@ -557,7 +557,7 @@ unroll_loop_constant_iterations (struct loop *loop)
loop->any_estimate = false;
}
- bitmap_set_bit (wont_exit, 1);
+ wont_exit[1] = true;
}
else
{
@@ -573,9 +573,9 @@ unroll_loop_constant_iterations (struct loop *loop)
if (exit_mod != max_unroll
|| desc->noloop_assumptions)
{
- bitmap_clear_bit (wont_exit, 0);
+ wont_exit[0] = false;
if (desc->noloop_assumptions)
- bitmap_clear_bit (wont_exit, 1);
+ wont_exit[1] = false;
opt_info_start_duplication (opt_info);
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
@@ -600,11 +600,11 @@ unroll_loop_constant_iterations (struct loop *loop)
loop->any_estimate = false;
desc->noloop_assumptions = NULL_RTX;
- bitmap_set_bit (wont_exit, 0);
- bitmap_set_bit (wont_exit, 1);
+ wont_exit[0] = true;
+ wont_exit[1] = true;
}
- bitmap_clear_bit (wont_exit, max_unroll);
+ wont_exit[max_unroll] = false;
}
/* Now unroll the loop. */
@@ -626,8 +626,6 @@ unroll_loop_constant_iterations (struct loop *loop)
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (exit_at_end)
{
basic_block exit_block = get_bb_copy (desc->in_edge->src);
@@ -881,7 +879,6 @@ unroll_loop_runtime_iterations (struct loop *loop)
rtx_insn *init_code, *branch_code;
unsigned i, j, p;
basic_block preheader, *body, swtch, ezc_swtch;
- sbitmap wont_exit;
int may_exit_copy;
unsigned n_peel;
edge e;
@@ -956,16 +953,15 @@ unroll_loop_runtime_iterations (struct loop *loop)
auto_vec<edge> remove_edges;
- wont_exit = sbitmap_alloc (max_unroll + 2);
+ stack_bitvec wont_exit (max_unroll + 2);
/* Peel the first copy of loop body (almost always we must leave exit test
here; the only exception is when we have extra zero check and the number
of iterations is reliable. Also record the place of (possible) extra
zero check. */
- bitmap_clear (wont_exit);
if (extra_zero_check
&& !desc->noloop_assumptions)
- bitmap_set_bit (wont_exit, 1);
+ wont_exit[1] = true;
ezc_swtch = loop_preheader_edge (loop)->src;
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1, wont_exit, desc->out_edge,
@@ -979,9 +975,9 @@ unroll_loop_runtime_iterations (struct loop *loop)
for (i = 0; i < n_peel; i++)
{
/* Peel the copy. */
- bitmap_clear (wont_exit);
+ wont_exit.clear ();
if (i != n_peel - 1 || !last_may_exit)
- bitmap_set_bit (wont_exit, 1);
+ wont_exit[1] = true;
ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1, wont_exit, desc->out_edge,
&remove_edges,
@@ -1035,8 +1031,8 @@ unroll_loop_runtime_iterations (struct loop *loop)
/* And unroll loop. */
- bitmap_ones (wont_exit);
- bitmap_clear_bit (wont_exit, may_exit_copy);
+ wont_exit.set ();
+ wont_exit[may_exit_copy] = false;
opt_info_start_duplication (opt_info);
ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
@@ -1055,8 +1051,6 @@ unroll_loop_runtime_iterations (struct loop *loop)
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (exit_at_end)
{
basic_block exit_block = get_bb_copy (desc->in_edge->src);
@@ -1213,7 +1207,6 @@ decide_unroll_stupid (struct loop *loop, int flags)
static void
unroll_loop_stupid (struct loop *loop)
{
- sbitmap wont_exit;
unsigned nunroll = loop->lpt_decision.times;
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
@@ -1224,8 +1217,7 @@ unroll_loop_stupid (struct loop *loop)
opt_info = analyze_insns_in_loop (loop);
- wont_exit = sbitmap_alloc (nunroll + 1);
- bitmap_clear (wont_exit);
+ stack_bitvec wont_exit (nunroll + 1);
opt_info_start_duplication (opt_info);
ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
@@ -1243,8 +1235,6 @@ unroll_loop_stupid (struct loop *loop)
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (desc->simple_p)
{
/* We indeed may get here provided that there are nontrivial assumptions
diff --git a/gcc/lower-subreg.c b/gcc/lower-subreg.c
index 9d5fd3be31f..d6746481f17 100644
--- a/gcc/lower-subreg.c
+++ b/gcc/lower-subreg.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "machmode.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "vec.h"
#include "double-int.h"
@@ -1543,16 +1544,12 @@ decompose_multiword_subregs (bool decompose_copies)
bitmap_and_compl_into (decomposable_context, non_decomposable_context);
if (!bitmap_empty_p (decomposable_context))
{
- sbitmap sub_blocks;
- unsigned int i;
- sbitmap_iterator sbi;
bitmap_iterator iter;
unsigned int regno;
propagate_pseudo_copies ();
- sub_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (sub_blocks);
+ stack_bitvec sub_blocks (last_basic_block_for_fn (cfun));
EXECUTE_IF_SET_IN_BITMAP (decomposable_context, 0, regno, iter)
decompose_register (regno);
@@ -1611,7 +1608,7 @@ decompose_multiword_subregs (bool decompose_copies)
extract_insn (insn);
if (cfi)
- bitmap_set_bit (sub_blocks, bb->index);
+ sub_blocks[bb->index] = true;
}
}
else
@@ -1654,33 +1651,32 @@ decompose_multiword_subregs (bool decompose_copies)
of a basic block, split those blocks now. Note that we only handle
the case where splitting a load has caused multiple possibly trapping
loads to appear. */
- EXECUTE_IF_SET_IN_BITMAP (sub_blocks, 0, i, sbi)
- {
- rtx_insn *insn, *end;
- edge fallthru;
-
- bb = BASIC_BLOCK_FOR_FN (cfun, i);
- insn = BB_HEAD (bb);
- end = BB_END (bb);
-
- while (insn != end)
- {
- if (control_flow_insn_p (insn))
- {
- /* Split the block after insn. There will be a fallthru
- edge, which is OK so we keep it. We have to create the
- exception edges ourselves. */
- fallthru = split_block (bb, insn);
- rtl_make_eh_edge (NULL, bb, BB_END (bb));
- bb = fallthru->dest;
- insn = BB_HEAD (bb);
- }
- else
- insn = NEXT_INSN (insn);
- }
- }
-
- sbitmap_free (sub_blocks);
+ for (size_t i = sub_blocks.begin (); i < sub_blocks.end (); i++)
+ if (sub_blocks[i])
+ {
+ rtx_insn *insn, *end;
+ edge fallthru;
+
+ bb = BASIC_BLOCK_FOR_FN (cfun, i);
+ insn = BB_HEAD (bb);
+ end = BB_END (bb);
+
+ while (insn != end)
+ {
+ if (control_flow_insn_p (insn))
+ {
+ /* Split the block after insn. There will be a fallthru
+ edge, which is OK so we keep it. We have to create the
+ exception edges ourselves. */
+ fallthru = split_block (bb, insn);
+ rtl_make_eh_edge (NULL, bb, BB_END (bb));
+ bb = fallthru->dest;
+ insn = BB_HEAD (bb);
+ }
+ else
+ insn = NEXT_INSN (insn);
+ }
+ }
}
{
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index 9dfffb6f287..ad13633ccd8 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "rtl.h"
@@ -1047,50 +1048,42 @@ remove_some_program_points_and_update_live_ranges (void)
int n, max_regno;
int *map;
lra_live_range_t r, prev_r, next_r;
- sbitmap born_or_dead, born, dead;
- sbitmap_iterator sbi;
bool born_p, dead_p, prev_born_p, prev_dead_p;
- born = sbitmap_alloc (lra_live_max_point);
- dead = sbitmap_alloc (lra_live_max_point);
- bitmap_clear (born);
- bitmap_clear (dead);
+ stack_bitvec born (lra_live_max_point), dead (lra_live_max_point);
max_regno = max_reg_num ();
for (i = FIRST_PSEUDO_REGISTER; i < (unsigned) max_regno; i++)
{
for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next)
{
lra_assert (r->start <= r->finish);
- bitmap_set_bit (born, r->start);
- bitmap_set_bit (dead, r->finish);
+ born[r->start] = true;
+ dead[r->finish] = true;
}
}
- born_or_dead = sbitmap_alloc (lra_live_max_point);
- bitmap_ior (born_or_dead, born, dead);
+ stack_bitvec born_or_dead = born | dead;
map = XCNEWVEC (int, lra_live_max_point);
n = -1;
prev_born_p = prev_dead_p = false;
- EXECUTE_IF_SET_IN_BITMAP (born_or_dead, 0, i, sbi)
- {
- born_p = bitmap_bit_p (born, i);
- dead_p = bitmap_bit_p (dead, i);
- if ((prev_born_p && ! prev_dead_p && born_p && ! dead_p)
- || (prev_dead_p && ! prev_born_p && dead_p && ! born_p))
- {
- map[i] = n;
- lra_point_freq[n] = MAX (lra_point_freq[n], lra_point_freq[i]);
- }
- else
- {
- map[i] = ++n;
- lra_point_freq[n] = lra_point_freq[i];
- }
- prev_born_p = born_p;
- prev_dead_p = dead_p;
- }
- sbitmap_free (born_or_dead);
- sbitmap_free (born);
- sbitmap_free (dead);
+ for (size_t i = born_or_dead.begin (); i < born_or_dead.end (); i++)
+ if (born_or_dead[i])
+ {
+ born_p = born[i];
+ dead_p = dead[i];
+ if ((prev_born_p && ! prev_dead_p && born_p && ! dead_p)
+ || (prev_dead_p && ! prev_born_p && dead_p && ! born_p))
+ {
+ map[i] = n;
+ lra_point_freq[n] = MAX (lra_point_freq[n], lra_point_freq[i]);
+ }
+ else
+ {
+ map[i] = ++n;
+ lra_point_freq[n] = lra_point_freq[i];
+ }
+ prev_born_p = born_p;
+ prev_dead_p = dead_p;
+ }
n++;
if (lra_dump_file != NULL)
fprintf (lra_dump_file, "Compressing live ranges: from %d to %d - %d%%\n",
diff --git a/gcc/lra.c b/gcc/lra.c
index f4d7a3c071f..3df2737709d 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -103,6 +103,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "rtl.h"
@@ -2443,11 +2444,9 @@ lra (FILE *f)
/* We've possibly turned single trapping insn into multiple ones. */
if (cfun->can_throw_non_call_exceptions)
{
- sbitmap blocks;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_ones (blocks);
+ stack_bitvec blocks (last_basic_block_for_fn (cfun));
+ blocks.set ();
find_many_sub_basic_blocks (blocks);
- sbitmap_free (blocks);
}
if (inserted_p)
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index 22cd21650f6..3eda4f43837 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "diagnostic-core.h"
#include "rtl.h"
@@ -230,7 +231,7 @@ static void reset_partial_schedule (partial_schedule_ptr, int new_ii);
void print_partial_schedule (partial_schedule_ptr, FILE *);
static void verify_partial_schedule (partial_schedule_ptr, sbitmap);
static ps_insn_ptr ps_add_node_check_conflicts (partial_schedule_ptr,
- int, int, sbitmap, sbitmap);
+ int, int, sbitmap, bitvec *);
static void rotate_partial_schedule (partial_schedule_ptr, int);
void set_row_column_for_ps (partial_schedule_ptr);
static void ps_insert_empty_row (partial_schedule_ptr, int, sbitmap);
@@ -248,11 +249,11 @@ static void generate_prolog_epilog (partial_schedule_ptr, struct loop *,
rtx, rtx);
static int calculate_stage_count (partial_schedule_ptr, int);
static void calculate_must_precede_follow (ddg_node_ptr, int, int,
- int, int, sbitmap, sbitmap, sbitmap);
+ int, int, sbitmap, sbitmap, bitvec *);
static int get_sched_window (partial_schedule_ptr, ddg_node_ptr,
sbitmap, int, int *, int *, int *);
static bool try_scheduling_node_in_cycle (partial_schedule_ptr, int, int,
- sbitmap, int *, sbitmap, sbitmap);
+ sbitmap, int *, sbitmap, bitvec *);
static void remove_node_from_ps (partial_schedule_ptr, ps_insn_ptr);
#define NODE_ASAP(node) ((node)->aux.count)
@@ -577,7 +578,7 @@ set_columns_for_ps (partial_schedule_ptr ps)
Return true on success. */
static bool
schedule_reg_move (partial_schedule_ptr ps, int i_reg_move,
- sbitmap distance1_uses, sbitmap must_follow)
+ sbitmap distance1_uses, bitvec *must_follow)
{
unsigned int u;
int this_time, this_distance, this_start, this_end, this_latency;
@@ -668,8 +669,8 @@ schedule_reg_move (partial_schedule_ptr ps, int i_reg_move,
fprintf (dump_file, "%11d %11d %5s %s\n", start, end, "", "(max, min)");
}
- bitmap_clear (must_follow);
- bitmap_set_bit (must_follow, move->def);
+ must_follow->clear ();
+ (*must_follow)[move->def] = true;
start = MAX (start, end - (ii - 1));
for (c = end; c >= start; c--)
@@ -719,7 +720,6 @@ schedule_reg_moves (partial_schedule_ptr ps)
rtx prev_reg, old_reg;
int first_move;
int distances[2];
- sbitmap must_follow;
sbitmap distance1_uses;
rtx set = single_set (u->insn);
@@ -830,12 +830,11 @@ schedule_reg_moves (partial_schedule_ptr ps)
}
}
- must_follow = sbitmap_alloc (first_move + nreg_moves);
+ stack_bitvec must_follow (first_move + nreg_moves);
for (i_reg_move = 0; i_reg_move < nreg_moves; i_reg_move++)
if (!schedule_reg_move (ps, first_move + i_reg_move,
- distance1_uses, must_follow))
+ distance1_uses, &must_follow))
break;
- sbitmap_free (must_follow);
if (distance1_uses)
sbitmap_free (distance1_uses);
if (i_reg_move < nreg_moves)
@@ -934,7 +933,7 @@ permute_partial_schedule (partial_schedule_ptr ps, rtx_insn *last)
window boundaries marked by START and END cycles. STEP is the
direction of the window. */
static inline void
-set_must_precede_follow (sbitmap *tmp_follow, sbitmap must_follow,
+set_must_precede_follow (bitvec **tmp_follow, bitvec *must_follow,
sbitmap *tmp_precede, sbitmap must_precede, int c,
int start, int end, int step)
{
@@ -1022,7 +1021,8 @@ optimize_sc (partial_schedule_ptr ps, ddg_ptr g)
int branch_cycle = SCHED_TIME (g->closing_branch->cuid);
int row = SMODULO (branch_cycle, ps->ii);
int num_splits = 0;
- sbitmap must_precede, must_follow, tmp_precede, tmp_follow;
+ sbitmap must_precede, tmp_precede;
+ bitvec *tmp_follow;
int c;
if (dump_file)
@@ -1061,14 +1061,14 @@ optimize_sc (partial_schedule_ptr ps, ddg_ptr g)
}
must_precede = sbitmap_alloc (g->num_nodes);
- must_follow = sbitmap_alloc (g->num_nodes);
+ stack_bitvec must_follow (g->num_nodes);
/* Try to schedule the branch is it's new cycle. */
calculate_must_precede_follow (g->closing_branch, start, end,
step, ii, sched_nodes,
- must_precede, must_follow);
+ must_precede, &must_follow);
- set_must_precede_follow (&tmp_follow, must_follow, &tmp_precede,
+ set_must_precede_follow (&tmp_follow, &must_follow, &tmp_precede,
must_precede, c, start, end, step);
/* Find the element in the partial schedule related to the closing
@@ -1094,7 +1094,7 @@ optimize_sc (partial_schedule_ptr ps, ddg_ptr g)
/* The branch was failed to be placed in row ii - 1.
Put it back in it's original place in the partial
schedualing. */
- set_must_precede_follow (&tmp_follow, must_follow, &tmp_precede,
+ set_must_precede_follow (&tmp_follow, &must_follow, &tmp_precede,
must_precede, branch_cycle, start, end,
step);
success =
@@ -1118,7 +1118,6 @@ optimize_sc (partial_schedule_ptr ps, ddg_ptr g)
}
free (must_precede);
- free (must_follow);
}
clear:
@@ -2062,7 +2061,7 @@ get_sched_window (partial_schedule_ptr ps, ddg_node_ptr u_node,
static void
calculate_must_precede_follow (ddg_node_ptr u_node, int start, int end,
int step, int ii, sbitmap sched_nodes,
- sbitmap must_precede, sbitmap must_follow)
+ sbitmap must_precede, bitvec *must_follow)
{
ddg_edge_ptr e;
int first_cycle_in_window, last_cycle_in_window;
@@ -2080,7 +2079,7 @@ calculate_must_precede_follow (ddg_node_ptr u_node, int start, int end,
last_cycle_in_window = (step == 1) ? end - step : start;
bitmap_clear (must_precede);
- bitmap_clear (must_follow);
+ must_follow->clear ();
if (dump_file)
fprintf (dump_file, "\nmust_precede: ");
@@ -2129,7 +2128,7 @@ calculate_must_precede_follow (ddg_node_ptr u_node, int start, int end,
if (dump_file)
fprintf (dump_file, "%d ", e->dest->cuid);
- bitmap_set_bit (must_follow, e->dest->cuid);
+ (*must_follow)[e->dest->cuid] = true;
}
if (dump_file)
@@ -2150,7 +2149,7 @@ static bool
try_scheduling_node_in_cycle (partial_schedule_ptr ps,
int u, int cycle, sbitmap sched_nodes,
int *num_splits, sbitmap must_precede,
- sbitmap must_follow)
+ bitvec *must_follow)
{
ps_insn_ptr psi;
bool success = 0;
@@ -2183,7 +2182,7 @@ sms_schedule_by_order (ddg_ptr g, int mii, int maxii, int *nodes_order)
int start, end, step; /* Place together into one struct? */
sbitmap sched_nodes = sbitmap_alloc (num_nodes);
sbitmap must_precede = sbitmap_alloc (num_nodes);
- sbitmap must_follow = sbitmap_alloc (num_nodes);
+ stack_bitvec must_follow (num_nodes);
sbitmap tobe_scheduled = sbitmap_alloc (num_nodes);
partial_schedule_ptr ps = create_partial_schedule (ii, g, DFA_HISTORY);
@@ -2229,13 +2228,14 @@ sms_schedule_by_order (ddg_ptr g, int mii, int maxii, int *nodes_order)
calculate_must_precede_follow (u_node, start, end, step, ii,
sched_nodes, must_precede,
- must_follow);
+ &must_follow);
for (c = start; c != end; c += step)
{
- sbitmap tmp_precede, tmp_follow;
+ sbitmap tmp_precede;
+ bitvec *tmp_follow;
- set_must_precede_follow (&tmp_follow, must_follow,
+ set_must_precede_follow (&tmp_follow, &must_follow,
&tmp_precede, must_precede,
c, start, end, step);
success =
@@ -2297,7 +2297,6 @@ sms_schedule_by_order (ddg_ptr g, int mii, int maxii, int *nodes_order)
sbitmap_free (sched_nodes);
sbitmap_free (must_precede);
- sbitmap_free (must_follow);
sbitmap_free (tobe_scheduled);
return ps;
@@ -2509,9 +2508,7 @@ static void
check_nodes_order (int *node_order, int num_nodes)
{
int i;
- sbitmap tmp = sbitmap_alloc (num_nodes);
-
- bitmap_clear (tmp);
+ stack_bitvec tmp (num_nodes);
if (dump_file)
fprintf (dump_file, "SMS final nodes order: \n");
@@ -2522,15 +2519,13 @@ check_nodes_order (int *node_order, int num_nodes)
if (dump_file)
fprintf (dump_file, "%d ", u);
- gcc_assert (u < num_nodes && u >= 0 && !bitmap_bit_p (tmp, u));
+ gcc_assert (u < num_nodes && u >= 0 && !tmp[u]);
- bitmap_set_bit (tmp, u);
+ tmp[u] = true;
}
if (dump_file)
fprintf (dump_file, "\n");
-
- sbitmap_free (tmp);
}
/* Order the nodes of G for scheduling and pass the result in
@@ -3027,7 +3022,7 @@ remove_node_from_ps (partial_schedule_ptr ps, ps_insn_ptr ps_i)
instructions and find the first possible column to put it in. */
static bool
ps_insn_find_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
- sbitmap must_precede, sbitmap must_follow)
+ sbitmap must_precede, bitvec *must_follow)
{
ps_insn_ptr next_ps_i;
ps_insn_ptr first_must_follow = NULL;
@@ -3048,7 +3043,7 @@ ps_insn_find_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
next_ps_i = next_ps_i->next_in_row)
{
if (must_follow
- && bitmap_bit_p (must_follow, next_ps_i->id)
+ && (*must_follow)[next_ps_i->id]
&& ! first_must_follow)
first_must_follow = next_ps_i;
if (must_precede && bitmap_bit_p (must_precede, next_ps_i->id))
@@ -3119,7 +3114,7 @@ ps_insn_find_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
PS_I when scheduled in the same cycle. */
static int
ps_insn_advance_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
- sbitmap must_follow)
+ const bitvec *must_follow)
{
ps_insn_ptr prev, next;
int row;
@@ -3134,7 +3129,7 @@ ps_insn_advance_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
/* Check if next_in_row is dependent on ps_i, both having same sched
times (typically ANTI_DEP). If so, ps_i cannot skip over it. */
- if (must_follow && bitmap_bit_p (must_follow, ps_i->next_in_row->id))
+ if (must_follow && (*must_follow)[ps_i->next_in_row->id])
return false;
/* Advance PS_I over its next_in_row in the doubly linked list. */
@@ -3166,7 +3161,7 @@ ps_insn_advance_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
in the same cycle. */
static ps_insn_ptr
add_node_to_ps (partial_schedule_ptr ps, int id, int cycle,
- sbitmap must_precede, sbitmap must_follow)
+ sbitmap must_precede, bitvec *must_follow)
{
ps_insn_ptr ps_i;
int row = SMODULO (cycle, ps->ii);
@@ -3265,7 +3260,7 @@ ps_has_conflicts (partial_schedule_ptr ps, int from, int to)
ps_insn_ptr
ps_add_node_check_conflicts (partial_schedule_ptr ps, int n,
int c, sbitmap must_precede,
- sbitmap must_follow)
+ bitvec *must_follow)
{
int has_conflicts = 0;
ps_insn_ptr ps_i;
diff --git a/gcc/recog.c b/gcc/recog.c
index a9d3b1f779b..0baa8d75fe6 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -2969,12 +2970,10 @@ split_insn (rtx_insn *insn)
void
split_all_insns (void)
{
- sbitmap blocks;
bool changed;
basic_block bb;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (blocks);
+ stack_bitvec blocks (last_basic_block_for_fn (cfun));
changed = false;
FOR_EACH_BB_REVERSE_FN (bb, cfun)
@@ -3010,7 +3009,7 @@ split_all_insns (void)
{
if (split_insn (insn))
{
- bitmap_set_bit (blocks, bb->index);
+ blocks[bb->index] = true;
changed = true;
}
}
@@ -3025,8 +3024,6 @@ split_all_insns (void)
#ifdef ENABLE_CHECKING
verify_flow_info ();
#endif
-
- sbitmap_free (blocks);
}
/* Same as split_all_insns, but do not expect CFG to be available.
diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index c809e774c58..cb40e46921a 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -20,6 +20,7 @@
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "rtl.h"
#include "tm_p.h"
@@ -1242,13 +1243,11 @@ pass_cprop_hardreg::execute (function *fun)
{
struct value_data *all_vd;
basic_block bb;
- sbitmap visited;
bool analyze_called = false;
all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (fun));
- visited = sbitmap_alloc (last_basic_block_for_fn (fun));
- bitmap_clear (visited);
+ stack_bitvec visited (last_basic_block_for_fn (fun));
if (MAY_HAVE_DEBUG_INSNS)
debug_insn_changes_pool
@@ -1257,14 +1256,14 @@ pass_cprop_hardreg::execute (function *fun)
FOR_EACH_BB_FN (bb, fun)
{
- bitmap_set_bit (visited, bb->index);
+ visited[bb->index] = true;
/* If a block has a single predecessor, that we've already
processed, begin with the value data that was live at
the end of the predecessor block. */
/* ??? Ought to use more intelligent queuing of blocks. */
if (single_pred_p (bb)
- && bitmap_bit_p (visited, single_pred (bb)->index)
+ && visited[single_pred (bb)->index]
&& ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
{
all_vd[bb->index] = all_vd[single_pred (bb)->index];
@@ -1292,7 +1291,7 @@ pass_cprop_hardreg::execute (function *fun)
if (MAY_HAVE_DEBUG_INSNS)
{
FOR_EACH_BB_FN (bb, fun)
- if (bitmap_bit_p (visited, bb->index)
+ if (visited[bb->index]
&& all_vd[bb->index].n_debug_insn_changes)
{
unsigned int regno;
@@ -1317,7 +1316,6 @@ pass_cprop_hardreg::execute (function *fun)
free_alloc_pool (debug_insn_changes_pool);
}
- sbitmap_free (visited);
free (all_vd);
return 0;
}
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 5a010454c8e..2686a9f1dcb 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "machmode.h"
@@ -1337,11 +1338,9 @@ reload (rtx_insn *first, int global)
/* We've possibly turned single trapping insn into multiple ones. */
if (cfun->can_throw_non_call_exceptions)
{
- sbitmap blocks;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_ones (blocks);
+ stack_bitvec blocks (last_basic_block_for_fn (cfun));
+ blocks.set ();
find_many_sub_basic_blocks (blocks);
- sbitmap_free (blocks);
}
if (inserted)
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index 1cb0e2d344d..0336675bb5c 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -1494,7 +1494,7 @@ extern void debug_rgn_dependencies (int);
extern void debug_dependencies (rtx_insn *, rtx_insn *);
extern void free_rgn_deps (void);
extern int contributes_to_priority (rtx_insn *, rtx_insn *);
-extern void extend_rgns (int *, int *, sbitmap, int *);
+extern void extend_rgns (int *, int *, bitvec *, int *);
extern void deps_join (struct deps_desc *, struct deps_desc *);
extern void rgn_setup_common_sched_info (void);
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 76f78dfb504..ff05409bbef 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "diagnostic-core.h"
#include "rtl.h"
@@ -585,10 +586,10 @@ too_large (int block, int *num_bbs, int *num_insns)
if (max_hdr[blk] == -1) \
max_hdr[blk] = hdr; \
else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
- bitmap_clear_bit (inner, hdr); \
+ inner[hdr] = false; \
else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
{ \
- bitmap_clear_bit (inner,max_hdr[blk]); \
+ inner[max_hdr[blk]] = false; \
max_hdr[blk] = hdr; \
} \
}
@@ -636,18 +637,6 @@ haifa_find_rgns (void)
int too_large_failure;
basic_block bb;
- /* Note if a block is a natural loop header. */
- sbitmap header;
-
- /* Note if a block is a natural inner loop header. */
- sbitmap inner;
-
- /* Note if a block is in the block queue. */
- sbitmap in_queue;
-
- /* Note if a block is in the block queue. */
- sbitmap in_stack;
-
/* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
and a mapping from block to its loop header (if the block is contained
in a loop, else -1).
@@ -662,17 +651,18 @@ haifa_find_rgns (void)
dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun));
stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
- inner = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_ones (inner);
+ /* Note if a block is a natural inner loop header. */
+ stack_bitvec inner (last_basic_block_for_fn (cfun));
+ inner.set ();
- header = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (header);
+ /* Note if a block is a natural loop header. */
+ bitvec header (last_basic_block_for_fn (cfun));
- in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (in_queue);
+ /* Note if a block is in the block queue. */
+ stack_bitvec in_queue (last_basic_block_for_fn (cfun));
- in_stack = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (in_stack);
+ /* Note if a block is in the block queue. */
+ stack_bitvec in_stack (last_basic_block_for_fn (cfun));
for (i = 0; i < last_basic_block_for_fn (cfun); i++)
max_hdr[i] = -1;
@@ -700,8 +690,8 @@ haifa_find_rgns (void)
gcc_assert (node != ENTRY_BLOCK);
child = ei_edge (current_edge)->dest->index;
gcc_assert (child != EXIT_BLOCK);
- bitmap_clear_bit (in_stack, child);
- if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
+ in_stack[child] = false;
+ if (max_hdr[child] >= 0 && in_stack[max_hdr[child]])
UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
ei_next (&current_edge);
}
@@ -717,7 +707,7 @@ haifa_find_rgns (void)
/* Process a node. */
node = ei_edge (current_edge)->src->index;
gcc_assert (node != ENTRY_BLOCK);
- bitmap_set_bit (in_stack, node);
+ in_stack[node] = true;
dfs_nr[node] = ++count;
/* We don't traverse to the exit block. */
@@ -732,10 +722,10 @@ haifa_find_rgns (void)
/* If the successor is in the stack, then we've found a loop.
Mark the loop, if it is not a natural loop, then it will
be rejected during the second traversal. */
- if (bitmap_bit_p (in_stack, child))
+ if (in_stack[child])
{
no_loops = 0;
- bitmap_set_bit (header, child);
+ header[child] = true;
UPDATE_LOOP_RELATIONS (node, child);
SET_EDGE_PASSED (current_edge);
ei_next (&current_edge);
@@ -747,7 +737,7 @@ haifa_find_rgns (void)
with a new edge. */
if (dfs_nr[child])
{
- if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
+ if (max_hdr[child] >= 0 && in_stack[max_hdr[child]])
UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
SET_EDGE_PASSED (current_edge);
ei_next (&current_edge);
@@ -801,11 +791,11 @@ haifa_find_rgns (void)
there basic blocks, which are forced to be region heads.
This is done to try to assemble few smaller regions
from a too_large region. */
- sbitmap extended_rgn_header = NULL;
+ bitvec extended_rgn_header;
bool extend_regions_p;
if (no_loops)
- bitmap_set_bit (header, 0);
+ header[0] = true;
/* Second traversal:find reducible inner loops and topologically sort
block of each region. */
@@ -816,16 +806,14 @@ haifa_find_rgns (void)
if (extend_regions_p)
{
degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
- extended_rgn_header =
- sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (extended_rgn_header);
+ extended_rgn_header.grow (last_basic_block_for_fn (cfun));
}
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
FOR_EACH_BB_FN (bb, cfun)
{
- if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
+ if (header[bb->index] && inner[bb->index])
{
edge e;
edge_iterator ei;
@@ -897,7 +885,7 @@ haifa_find_rgns (void)
&& single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
{
queue[++tail] = jbb->index;
- bitmap_set_bit (in_queue, jbb->index);
+ in_queue[jbb->index] = true;
if (too_large (jbb->index, &num_bbs, &num_insns))
{
@@ -921,7 +909,7 @@ haifa_find_rgns (void)
{
/* This is a loop latch. */
queue[++tail] = node;
- bitmap_set_bit (in_queue, node);
+ in_queue[node] = true;
if (too_large (node, &num_bbs, &num_insns))
{
@@ -980,10 +968,10 @@ haifa_find_rgns (void)
tail = -1;
break;
}
- else if (!bitmap_bit_p (in_queue, node) && node != bb->index)
+ else if (!in_queue[node] && node != bb->index)
{
queue[++tail] = node;
- bitmap_set_bit (in_queue, node);
+ in_queue[node] = true;
if (too_large (node, &num_bbs, &num_insns))
{
@@ -1049,7 +1037,7 @@ haifa_find_rgns (void)
of one too_large region. */
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
- bitmap_set_bit (extended_rgn_header, e->dest->index);
+ extended_rgn_header[e->dest->index] = true;
}
}
}
@@ -1059,10 +1047,9 @@ haifa_find_rgns (void)
{
free (degree1);
- bitmap_ior (header, header, extended_rgn_header);
- sbitmap_free (extended_rgn_header);
+ header |= extended_rgn_header;
- extend_rgns (degree, &idx, header, max_hdr);
+ extend_rgns (degree, &idx, &header, max_hdr);
}
}
@@ -1083,10 +1070,6 @@ haifa_find_rgns (void)
free (max_hdr);
free (degree);
free (stack);
- sbitmap_free (header);
- sbitmap_free (inner);
- sbitmap_free (in_queue);
- sbitmap_free (in_stack);
}
@@ -1172,7 +1155,7 @@ print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
(two blocks can reside within one region if they have
the same loop header). */
void
-extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
+extend_rgns (int *degree, int *idxp, bitvec *header, int *loop_hdr)
{
int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
@@ -1218,7 +1201,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
edge_iterator ei;
int bbn = order[i];
- if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn))
+ if (max_hdr[bbn] != -1 && !(*header)[bbn])
{
int hdr = -1;
@@ -1257,7 +1240,7 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
/* If BB start its own region,
update set of headers with BB. */
- bitmap_set_bit (header, bbn);
+ (*header)[bbn] = true;
rescan = 1;
}
else
@@ -1509,7 +1492,6 @@ compute_trg_info (int trg)
edgelst el = { NULL, 0 };
int i, j, k, update_idx;
basic_block block;
- sbitmap visited;
edge_iterator ei;
edge e;
@@ -1532,7 +1514,7 @@ compute_trg_info (int trg)
sp->is_speculative = 0;
sp->src_prob = REG_BR_PROB_BASE;
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
for (i = trg + 1; i < current_nr_blocks; i++)
{
@@ -1574,13 +1556,13 @@ compute_trg_info (int trg)
overrunning the end of the bblst_table. */
update_idx = 0;
- bitmap_clear (visited);
+ visited.clear ();
for (j = 0; j < el.nr_members; j++)
{
block = el.first_member[j]->src;
FOR_EACH_EDGE (e, ei, block->succs)
{
- if (!bitmap_bit_p (visited, e->dest->index))
+ if (!visited[e->dest->index])
{
for (k = 0; k < el.nr_members; k++)
if (e == el.first_member[k])
@@ -1589,7 +1571,7 @@ compute_trg_info (int trg)
if (k >= el.nr_members)
{
bblst_table[bblst_last++] = e->dest;
- bitmap_set_bit (visited, e->dest->index);
+ visited[e->dest->index] = true;
update_idx++;
}
}
@@ -1608,8 +1590,6 @@ compute_trg_info (int trg)
sp->src_prob = 0;
}
}
-
- sbitmap_free (visited);
}
/* Free the computed target info. */
@@ -2447,7 +2427,7 @@ sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
/* A bitmap to note insns that participate in any dependency. Used in
add_branch_dependences. */
-static sbitmap insn_referenced;
+static bitvec insn_referenced;
/* Add dependences so that branches are scheduled to run last in their
block. */
@@ -2502,7 +2482,7 @@ add_branch_dependences (rtx_insn *head, rtx_insn *tail)
{
if (! sched_insns_conditions_mutex_p (last, insn))
add_dependence (last, insn, REG_DEP_ANTI);
- bitmap_set_bit (insn_referenced, INSN_LUID (insn));
+ insn_referenced[INSN_LUID (insn)] = true;
}
CANT_MOVE (insn) = 1;
@@ -2526,7 +2506,7 @@ add_branch_dependences (rtx_insn *head, rtx_insn *tail)
{
insn = prev_nonnote_insn (insn);
- if (bitmap_bit_p (insn_referenced, INSN_LUID (insn))
+ if (insn_referenced[INSN_LUID (insn)]
|| DEBUG_INSN_P (insn))
continue;
@@ -3264,14 +3244,13 @@ sched_rgn_compute_dependencies (int rgn)
init_deps (bb_deps + bb, false);
/* Initialize bitmap used in add_branch_dependences. */
- insn_referenced = sbitmap_alloc (sched_max_luid);
- bitmap_clear (insn_referenced);
+ insn_referenced.grow (sched_max_luid);
/* Compute backward dependencies. */
for (bb = 0; bb < current_nr_blocks; bb++)
compute_block_dependences (bb);
- sbitmap_free (insn_referenced);
+ insn_referenced.clear_and_release ();
free_pending_lists ();
finish_deps_global ();
free (bb_deps);
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 94f6c43a284..731b239bf3e 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "diagnostic-core.h"
#include "rtl.h"
@@ -86,7 +87,7 @@ struct loop *current_loop_nest;
static vec<loop_p> loop_nests = vNULL;
/* Saves blocks already in loop regions, indexed by bb->index. */
-static sbitmap bbs_in_loop_rgns = NULL;
+static bitvec bbs_in_loop_rgns;
/* CFG hooks that are saved before changing create_basic_block hook. */
static struct cfg_hooks orig_cfg_hooks;
@@ -6041,7 +6042,7 @@ make_region_from_loop (struct loop *loop)
new_rgn_number = sel_create_new_region ();
sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
- bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index);
+ bbs_in_loop_rgns[preheader_block->index] = true;
for (i = 0; i < loop->num_nodes; i++)
{
@@ -6052,11 +6053,11 @@ make_region_from_loop (struct loop *loop)
gcc_assert (new_rgn_number >= 0);
- if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index))
+ if (! bbs_in_loop_rgns[loop_blocks[i]->index])
{
sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
new_rgn_number);
- bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index);
+ bbs_in_loop_rgns[loop_blocks[i]->index] = true;
}
}
@@ -6101,7 +6102,7 @@ make_regions_from_loop_nest (struct loop *loop)
/* Traverse all inner nodes of the loop. */
for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
- if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index))
+ if (! bbs_in_loop_rgns[cur_loop->header->index])
return false;
/* At this moment all regular inner loops should have been pipelined.
@@ -6126,8 +6127,7 @@ sel_init_pipelining (void)
| LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
current_loop_nest = NULL;
- bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (bbs_in_loop_rgns);
+ bbs_in_loop_rgns.grow (last_basic_block_for_fn (cfun));
recompute_rev_top_order ();
}
@@ -6215,17 +6215,17 @@ make_regions_from_the_rest (void)
{
degree[bb->index] = 0;
- if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index))
+ if (!bbs_in_loop_rgns[bb->index])
{
FOR_EACH_EDGE (e, ei, bb->preds)
- if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index))
+ if (!bbs_in_loop_rgns[e->src->index])
degree[bb->index]++;
}
else
degree[bb->index] = -1;
}
- extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
+ extend_rgns (degree, &cur_rgn_blocks, &bbs_in_loop_rgns, loop_hdr);
/* Any block that did not end up in a region is placed into a region
by itself. */
@@ -6286,8 +6286,7 @@ sel_find_rgns (void)
make_regions_from_the_rest ();
/* We don't need bbs_in_loop_rgns anymore. */
- sbitmap_free (bbs_in_loop_rgns);
- bbs_in_loop_rgns = NULL;
+ bbs_in_loop_rgns.clear_and_release ();
}
/* Add the preheader blocks from previous loop to current region taking
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 988f9d50719..2ac1c4d4256 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "rtl-error.h"
#include "tm_p.h"
@@ -6759,14 +6760,14 @@ static int cur_seqno;
compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
Clear visited blocks from BLOCKS_TO_RESCHEDULE. */
static void
-init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
+init_seqno_1 (basic_block bb, bitvec *visited_bbs, bitmap blocks_to_reschedule)
{
int bbi = BLOCK_TO_BB (bb->index);
insn_t insn, note = bb_note (bb);
insn_t succ_insn;
succ_iterator si;
- bitmap_set_bit (visited_bbs, bbi);
+ (*visited_bbs)[bbi] = true;
if (blocks_to_reschedule)
bitmap_clear_bit (blocks_to_reschedule, bb->index);
@@ -6778,7 +6779,7 @@ init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
gcc_assert (in_current_region_p (succ));
- if (!bitmap_bit_p (visited_bbs, succ_bbi))
+ if (!(*visited_bbs)[succ_bbi])
{
gcc_assert (succ_bbi > bbi);
@@ -6801,36 +6802,34 @@ init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
static int
init_seqno (bitmap blocks_to_reschedule, basic_block from)
{
- sbitmap visited_bbs;
bitmap_iterator bi;
unsigned bbi;
- visited_bbs = sbitmap_alloc (current_nr_blocks);
+ stack_bitvec visited_bbs (current_nr_blocks);
if (blocks_to_reschedule)
{
- bitmap_ones (visited_bbs);
+ visited_bbs.set ();
EXECUTE_IF_SET_IN_BITMAP (blocks_to_reschedule, 0, bbi, bi)
{
gcc_assert (BLOCK_TO_BB (bbi) < current_nr_blocks);
- bitmap_clear_bit (visited_bbs, BLOCK_TO_BB (bbi));
+ visited_bbs[BLOCK_TO_BB (bbi)] = false;
}
}
else
{
- bitmap_clear (visited_bbs);
+ visited_bbs.clear ();
from = EBB_FIRST_BB (0);
}
cur_seqno = sched_max_luid - 1;
- init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
+ init_seqno_1 (from, &visited_bbs, blocks_to_reschedule);
/* cur_seqno may be positive if the number of instructions is less than
sched_max_luid - 1 (when rescheduling or if some instructions have been
removed by the call to purge_empty_blocks in sel_sched_region_1). */
gcc_assert (cur_seqno >= 0);
- sbitmap_free (visited_bbs);
return sched_max_luid - 1;
}
diff --git a/gcc/store-motion.c b/gcc/store-motion.c
index 530766f800d..74fd340d073 100644
--- a/gcc/store-motion.c
+++ b/gcc/store-motion.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "diagnostic-core.h"
#include "toplev.h"
@@ -870,7 +871,7 @@ remove_reachable_equiv_notes (basic_block bb, struct st_expr *smexpr)
edge_iterator *stack, ei;
int sp;
edge act;
- sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
rtx last, note;
rtx_insn *insn;
rtx mem = smexpr->pattern;
@@ -879,8 +880,6 @@ remove_reachable_equiv_notes (basic_block bb, struct st_expr *smexpr)
sp = 0;
ei = ei_start (bb->succs);
- bitmap_clear (visited);
-
act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
while (1)
{
@@ -889,7 +888,6 @@ remove_reachable_equiv_notes (basic_block bb, struct st_expr *smexpr)
if (!sp)
{
free (stack);
- sbitmap_free (visited);
return;
}
act = ei_edge (stack[--sp]);
@@ -897,14 +895,14 @@ remove_reachable_equiv_notes (basic_block bb, struct st_expr *smexpr)
bb = act->dest;
if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
- || bitmap_bit_p (visited, bb->index))
+ || visited[bb->index])
{
if (!ei_end_p (ei))
ei_next (&ei);
act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
continue;
}
- bitmap_set_bit (visited, bb->index);
+ visited[bb->index] = true;
if (bitmap_bit_p (st_antloc[bb->index], smexpr->index))
{
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index a111e9d93ae..47cfc695736 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "hash-table.h"
#include "tm.h"
#include "hash-set.h"
@@ -3810,25 +3811,16 @@ make_pass_lower_eh_dispatch (gcc::context *ctxt)
The caller is responsible for freeing the returned sbitmaps. */
static void
-mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
+mark_reachable_handlers (bitvec *r_reachable, bitvec *lp_reachable)
{
- sbitmap r_reachable, lp_reachable;
basic_block bb;
- bool mark_landing_pads = (lp_reachablep != NULL);
- gcc_checking_assert (r_reachablep != NULL);
+ bool mark_landing_pads = (lp_reachable != NULL);
+ gcc_checking_assert (r_reachable != NULL);
- r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
- bitmap_clear (r_reachable);
- *r_reachablep = r_reachable;
+ r_reachable->grow (cfun->eh->region_array->length ());
if (mark_landing_pads)
- {
- lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
- bitmap_clear (lp_reachable);
- *lp_reachablep = lp_reachable;
- }
- else
- lp_reachable = NULL;
+ lp_reachable->grow (cfun->eh->lp_array->length ());
FOR_EACH_BB_FN (bb, cfun)
{
@@ -3845,15 +3837,15 @@ mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
/* Negative LP numbers are MUST_NOT_THROW regions which
are not considered BB enders. */
if (lp_nr < 0)
- bitmap_set_bit (r_reachable, -lp_nr);
+ (*r_reachable)[-lp_nr] = true;
/* Positive LP numbers are real landing pads, and BB enders. */
else if (lp_nr > 0)
{
gcc_assert (gsi_one_before_end_p (gsi));
eh_region region = get_eh_region_from_lp_number (lp_nr);
- bitmap_set_bit (r_reachable, region->index);
- bitmap_set_bit (lp_reachable, lp_nr);
+ (*r_reachable)[region->index] = true;
+ (*lp_reachable)[lp_nr] = true;
}
}
@@ -3861,13 +3853,12 @@ mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
switch (gimple_code (stmt))
{
case GIMPLE_RESX:
- bitmap_set_bit (r_reachable,
- gimple_resx_region (as_a <gresx *> (stmt)));
+ (*r_reachable)[gimple_resx_region (as_a <gresx *> (stmt))]
+ = true;
break;
case GIMPLE_EH_DISPATCH:
- bitmap_set_bit (r_reachable,
- gimple_eh_dispatch_region (
- as_a <geh_dispatch *> (stmt)));
+ (*r_reachable)[gimple_eh_dispatch_region (as_a <geh_dispatch *>
+ (stmt))] = true;
break;
case GIMPLE_CALL:
if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
@@ -3877,7 +3868,7 @@ mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
HOST_WIDE_INT ri = tree_to_shwi (rt);
gcc_assert (ri = (int)ri);
- bitmap_set_bit (r_reachable, ri);
+ (*r_reachable)[ri] = true;
}
break;
default:
@@ -3892,7 +3883,7 @@ mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
static void
remove_unreachable_handlers (void)
{
- sbitmap r_reachable, lp_reachable;
+ stack_bitvec r_reachable, lp_reachable;
eh_region region;
eh_landing_pad lp;
unsigned i;
@@ -3904,15 +3895,15 @@ remove_unreachable_handlers (void)
fprintf (dump_file, "Before removal of unreachable regions:\n");
dump_eh_tree (dump_file, cfun);
fprintf (dump_file, "Reachable regions: ");
- dump_bitmap_file (dump_file, r_reachable);
+ r_reachable.dump (dump_file);
fprintf (dump_file, "Reachable landing pads: ");
- dump_bitmap_file (dump_file, lp_reachable);
+ lp_reachable.dump (dump_file);
}
if (dump_file)
{
FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
- if (region && !bitmap_bit_p (r_reachable, region->index))
+ if (region && !r_reachable[region->index])
fprintf (dump_file,
"Removing unreachable region %d\n",
region->index);
@@ -3921,7 +3912,7 @@ remove_unreachable_handlers (void)
remove_unreachable_eh_regions (r_reachable);
FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
- if (lp && !bitmap_bit_p (lp_reachable, lp->index))
+ if (lp && !lp_reachable[lp->index])
{
if (dump_file)
fprintf (dump_file,
@@ -3937,9 +3928,6 @@ remove_unreachable_handlers (void)
fprintf (dump_file, "\n\n");
}
- sbitmap_free (r_reachable);
- sbitmap_free (lp_reachable);
-
#ifdef ENABLE_CHECKING
verify_eh_tree (cfun);
#endif
@@ -3980,7 +3968,7 @@ static void
remove_unreachable_handlers_no_lp (void)
{
eh_region region;
- sbitmap r_reachable;
+ stack_bitvec r_reachable;
unsigned i;
mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
@@ -3992,18 +3980,16 @@ remove_unreachable_handlers_no_lp (void)
if (region->landing_pads != NULL
|| region->type == ERT_MUST_NOT_THROW)
- bitmap_set_bit (r_reachable, region->index);
+ r_reachable[region->index] = true;
if (dump_file
- && !bitmap_bit_p (r_reachable, region->index))
+ && !r_reachable[region->index])
fprintf (dump_file,
"Removing unreachable region %d\n",
region->index);
}
remove_unreachable_eh_regions (r_reachable);
-
- sbitmap_free (r_reachable);
}
/* Undo critical edge splitting on an EH landing pad. Earlier, we
diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c
index 258962882f4..97880a55a7a 100644
--- a/gcc/tree-into-ssa.c
+++ b/gcc/tree-into-ssa.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -135,7 +136,7 @@ static sbitmap old_ssa_names;
the operations done on them are presence tests. */
static sbitmap new_ssa_names;
-static sbitmap interesting_blocks;
+static bitvec interesting_blocks;
/* Set of SSA names that have been marked to be released after they
were registered in the replacement table. They will be finally
@@ -715,7 +716,7 @@ mark_def_sites (basic_block bb, gimple stmt, bitmap kills)
set_rewrite_uses (stmt, true);
}
if (rewrite_uses_p (stmt))
- bitmap_set_bit (interesting_blocks, bb->index);
+ interesting_blocks[bb->index] = true;
return;
}
@@ -743,7 +744,7 @@ mark_def_sites (basic_block bb, gimple stmt, bitmap kills)
/* If we found the statement interesting then also mark the block BB
as interesting. */
if (rewrite_uses_p (stmt) || register_defs_p (stmt))
- bitmap_set_bit (interesting_blocks, bb->index);
+ interesting_blocks[bb->index] = true;
}
/* Structure used by prune_unused_phi_nodes to record bounds of the intervals
@@ -1480,7 +1481,7 @@ rewrite_dom_walker::before_dom_children (basic_block bb)
/* Step 2. Rewrite every variable used in each statement in the block
with its immediate reaching definitions. Update the current definition
of a variable when a new real or virtual definition is found. */
- if (bitmap_bit_p (interesting_blocks, bb->index))
+ if (interesting_blocks[bb->index])
for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
rewrite_stmt (&gsi);
@@ -2179,7 +2180,7 @@ rewrite_update_dom_walker::before_dom_children (basic_block bb)
}
/* Step 2. Rewrite every variable used in each statement in the block. */
- if (bitmap_bit_p (interesting_blocks, bb->index))
+ if (interesting_blocks[bb->index])
{
gcc_checking_assert (bitmap_bit_p (blocks_to_update, bb->index));
for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
@@ -2396,8 +2397,7 @@ pass_build_ssa::execute (function *fun)
/* Initialize the set of interesting blocks. The callback
mark_def_sites will add to this set those blocks that the renamer
should process. */
- interesting_blocks = sbitmap_alloc (last_basic_block_for_fn (fun));
- bitmap_clear (interesting_blocks);
+ interesting_blocks.grow (last_basic_block_for_fn (fun));
/* Initialize dominance frontier. */
dfs = XNEWVEC (bitmap_head, last_basic_block_for_fn (fun));
@@ -2422,7 +2422,7 @@ pass_build_ssa::execute (function *fun)
bitmap_clear (&dfs[bb->index]);
free (dfs);
- sbitmap_free (interesting_blocks);
+ interesting_blocks.clear_and_release ();
fini_ssa_renamer ();
@@ -3399,14 +3399,13 @@ update_ssa (unsigned update_flags)
get_var_info (sym)->info.current_def = NULL_TREE;
/* Now start the renaming process at START_BB. */
- interesting_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (interesting_blocks);
+ interesting_blocks.grow (last_basic_block_for_fn (cfun));
EXECUTE_IF_SET_IN_BITMAP (blocks_to_update, 0, i, bi)
- bitmap_set_bit (interesting_blocks, i);
+ interesting_blocks[i] = true;
rewrite_blocks (start_bb, REWRITE_UPDATE);
- sbitmap_free (interesting_blocks);
+ interesting_blocks.clear_and_release ();
/* Debugging dumps. */
if (dump_file)
diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c
index e6310cdfd3c..f170a249755 100644
--- a/gcc/tree-outof-ssa.c
+++ b/gcc/tree-outof-ssa.c
@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -175,7 +176,7 @@ typedef struct _elim_graph {
vec<source_location> edge_locus;
/* Visited vector. */
- sbitmap visited;
+ bitvec visited;
/* Stack for visited nodes. */
vec<int> stack;
@@ -445,7 +446,7 @@ new_elim_graph (int size)
g->edge_locus.create (10);
g->stack.create (30);
- g->visited = sbitmap_alloc (size);
+ new (&g->visited) bitvec (size);
return g;
}
@@ -467,7 +468,7 @@ clear_elim_graph (elim_graph g)
static inline void
delete_elim_graph (elim_graph g)
{
- sbitmap_free (g->visited);
+ g->visited.~bitvec ();
g->stack.release ();
g->edge_list.release ();
g->const_copies.release ();
@@ -662,10 +663,10 @@ elim_forward (elim_graph g, int T)
int S;
source_location locus;
- bitmap_set_bit (g->visited, T);
+ g->visited[T] = true;
FOR_EACH_ELIM_GRAPH_SUCC (g, T, S, locus,
{
- if (!bitmap_bit_p (g->visited, S))
+ if (!g->visited[S])
elim_forward (g, S);
});
g->stack.safe_push (T);
@@ -682,7 +683,7 @@ elim_unvisited_predecessor (elim_graph g, int T)
FOR_EACH_ELIM_GRAPH_PRED (g, T, P, locus,
{
- if (!bitmap_bit_p (g->visited, P))
+ if (!g->visited[P])
return 1;
});
return 0;
@@ -696,10 +697,10 @@ elim_backward (elim_graph g, int T)
int P;
source_location locus;
- bitmap_set_bit (g->visited, T);
+ g->visited[T] = true;
FOR_EACH_ELIM_GRAPH_PRED (g, T, P, locus,
{
- if (!bitmap_bit_p (g->visited, P))
+ if (!g->visited[P])
{
elim_backward (g, P);
insert_partition_copy_on_edge (g->e, P, T, locus);
@@ -741,7 +742,7 @@ elim_create (elim_graph g, int T)
insert_part_to_rtx_on_edge (g->e, U, T, UNKNOWN_LOCATION);
FOR_EACH_ELIM_GRAPH_PRED (g, T, P, locus,
{
- if (!bitmap_bit_p (g->visited, P))
+ if (!g->visited[P])
{
elim_backward (g, P);
insert_rtx_to_part_on_edge (g->e, P, U, unsignedsrcp, locus);
@@ -753,7 +754,7 @@ elim_create (elim_graph g, int T)
S = elim_graph_remove_succ_edge (g, T, &locus);
if (S != -1)
{
- bitmap_set_bit (g->visited, T);
+ g->visited[T] = true;
insert_partition_copy_on_edge (g->e, T, S, locus);
}
}
@@ -782,20 +783,20 @@ eliminate_phi (edge e, elim_graph g)
{
int part;
- bitmap_clear (g->visited);
+ g->visited.clear ();
g->stack.truncate (0);
FOR_EACH_VEC_ELT (g->nodes, x, part)
{
- if (!bitmap_bit_p (g->visited, part))
+ if (!g->visited[part])
elim_forward (g, part);
}
- bitmap_clear (g->visited);
+ g->visited.clear ();
while (g->stack.length () > 0)
{
x = g->stack.pop ();
- if (!bitmap_bit_p (g->visited, x))
+ if (!g->visited[x])
elim_create (g, x);
}
}
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index c9cb0e48953..e0b73eee9a3 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -118,11 +119,11 @@ static vec<gimple> worklist;
/* Vector indicating an SSA name has already been processed and marked
as necessary. */
-static sbitmap processed;
+static bitvec processed;
/* Vector indicating that the last statement of a basic block has already
been marked as necessary. */
-static sbitmap last_stmt_necessary;
+static bitvec last_stmt_necessary;
/* Vector indicating that BB contains statements that are live. */
static sbitmap bb_contains_live_stmts;
@@ -138,7 +139,7 @@ static control_dependences *cd;
/* Vector indicating that a basic block has already had all the edges
processed that it is control dependent on. */
-static sbitmap visited_control_parents;
+static bitvec visited_control_parents;
/* TRUE if this pass alters the CFG (by removing control statements).
FALSE otherwise.
@@ -185,14 +186,14 @@ mark_operand_necessary (tree op)
gcc_assert (op);
ver = SSA_NAME_VERSION (op);
- if (bitmap_bit_p (processed, ver))
+ if (processed[ver])
{
stmt = SSA_NAME_DEF_STMT (op);
gcc_assert (gimple_nop_p (stmt)
|| gimple_plf (stmt, STMT_NECESSARY));
return;
}
- bitmap_set_bit (processed, ver);
+ processed[ver] = true;
stmt = SSA_NAME_DEF_STMT (op);
gcc_assert (stmt);
@@ -342,7 +343,7 @@ mark_last_stmt_necessary (basic_block bb)
{
gimple stmt = last_stmt (bb);
- bitmap_set_bit (last_stmt_necessary, bb->index);
+ last_stmt_necessary[bb->index] = true;
bitmap_set_bit (bb_contains_live_stmts, bb->index);
/* We actually mark the statement only if it is a control statement. */
@@ -379,12 +380,12 @@ mark_control_dependent_edges_necessary (basic_block bb, bool ignore_self)
continue;
}
- if (!bitmap_bit_p (last_stmt_necessary, cd_bb->index))
+ if (!last_stmt_necessary[cd_bb->index])
mark_last_stmt_necessary (cd_bb);
}
if (!skipped)
- bitmap_set_bit (visited_control_parents, bb->index);
+ visited_control_parents[bb->index] = true;
}
@@ -577,7 +578,7 @@ mark_all_reaching_defs_necessary_1 (ao_ref *ref ATTRIBUTE_UNUSED,
/* We have to skip already visited (and thus necessary) statements
to make the chaining work after we dropped back to simple mode. */
if (chain_ovfl
- && bitmap_bit_p (processed, SSA_NAME_VERSION (vdef)))
+ && processed[SSA_NAME_VERSION (vdef)])
{
gcc_assert (gimple_nop_p (def_stmt)
|| gimple_plf (def_stmt, STMT_NECESSARY));
@@ -673,7 +674,7 @@ propagate_necessity (bool aggressive)
already done so. */
basic_block bb = gimple_bb (stmt);
if (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
- && !bitmap_bit_p (visited_control_parents, bb->index))
+ && !visited_control_parents[bb->index])
mark_control_dependent_edges_necessary (bb, false);
}
@@ -776,12 +777,11 @@ propagate_necessity (bool aggressive)
if (gimple_bb (stmt)
!= get_immediate_dominator (CDI_POST_DOMINATORS, arg_bb))
{
- if (!bitmap_bit_p (last_stmt_necessary, arg_bb->index))
+ if (!last_stmt_necessary[arg_bb->index])
mark_last_stmt_necessary (arg_bb);
}
else if (arg_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
- && !bitmap_bit_p (visited_control_parents,
- arg_bb->index))
+ && !visited_control_parents[arg_bb->index])
mark_control_dependent_edges_necessary (arg_bb, true);
}
}
@@ -1387,7 +1387,7 @@ eliminate_unnecessary_stmts (void)
{
tree name = USE_FROM_PTR (use_p);
if (!SSA_NAME_IS_DEFAULT_DEF (name)
- && !bitmap_bit_p (processed, SSA_NAME_VERSION (name)))
+ && !processed[SSA_NAME_VERSION (name)])
{
dead = true;
break;
@@ -1410,7 +1410,7 @@ eliminate_unnecessary_stmts (void)
call (); saving one operand. */
if (name
&& TREE_CODE (name) == SSA_NAME
- && !bitmap_bit_p (processed, SSA_NAME_VERSION (name))
+ && !processed[SSA_NAME_VERSION (name)]
/* Avoid doing so for allocation calls which we
did not mark as necessary, it will confuse the
special logic we apply to malloc/free pair removal. */
@@ -1576,14 +1576,12 @@ tree_dce_init (bool aggressive)
if (aggressive)
{
- last_stmt_necessary = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (last_stmt_necessary);
+ last_stmt_necessary.grow (last_basic_block_for_fn (cfun));
bb_contains_live_stmts = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (bb_contains_live_stmts);
}
- processed = sbitmap_alloc (num_ssa_names + 1);
- bitmap_clear (processed);
+ processed.grow (num_ssa_names + 1);
worklist.create (64);
cfg_altered = false;
@@ -1597,13 +1595,13 @@ tree_dce_done (bool aggressive)
if (aggressive)
{
delete cd;
- sbitmap_free (visited_control_parents);
- sbitmap_free (last_stmt_necessary);
+ visited_control_parents.clear_and_release ();
+ last_stmt_necessary.clear_and_release ();
sbitmap_free (bb_contains_live_stmts);
bb_contains_live_stmts = NULL;
}
- sbitmap_free (processed);
+ processed.clear_and_release ();
worklist.release ();
}
@@ -1644,9 +1642,7 @@ perform_tree_ssa_dce (bool aggressive)
calculate_dominance_info (CDI_POST_DOMINATORS);
cd = new control_dependences (create_edge_list ());
- visited_control_parents =
- sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (visited_control_parents);
+ visited_control_parents.grow (last_basic_block_for_fn (cfun));
mark_dfs_back_edges ();
}
diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c
index df902292596..d19fd4700e6 100644
--- a/gcc/tree-ssa-live.c
+++ b/gcc/tree-ssa-live.c
@@ -37,6 +37,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-pretty-print.h"
#include "bitmap.h"
#include "sbitmap.h"
+#include "bitvec.h"
#include "predict.h"
#include "hard-reg-set.h"
#include "function.h"
@@ -1051,7 +1052,7 @@ delete_tree_live_info (tree_live_info_p live)
it each time. */
static void
-loe_visit_block (tree_live_info_p live, basic_block bb, sbitmap visited)
+loe_visit_block (tree_live_info_p live, basic_block bb, bitvec *visited)
{
edge e;
bool change;
@@ -1059,8 +1060,8 @@ loe_visit_block (tree_live_info_p live, basic_block bb, sbitmap visited)
basic_block pred_bb;
bitmap loe;
- gcc_checking_assert (!bitmap_bit_p (visited, bb->index));
- bitmap_set_bit (visited, bb->index);
+ gcc_checking_assert (!(*visited)[bb->index]);
+ (*visited)[bb->index] = true;
loe = live_on_entry (live, bb);
@@ -1078,10 +1079,9 @@ loe_visit_block (tree_live_info_p live, basic_block bb, sbitmap visited)
revisit stack. */
change = bitmap_ior_and_compl_into (live_on_entry (live, pred_bb),
loe, &live->liveout[pred_bb->index]);
- if (change
- && bitmap_bit_p (visited, pred_bb->index))
+ if ((*visited)[pred_bb->index] && change)
{
- bitmap_clear_bit (visited, pred_bb->index);
+ (*visited)[pred_bb->index] = false;
*(live->stack_top)++ = pred_bb->index;
}
}
@@ -1096,23 +1096,19 @@ live_worklist (tree_live_info_p live)
{
unsigned b;
basic_block bb;
- sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun) + 1);
-
- bitmap_clear (visited);
+ stack_bitvec visited (last_basic_block_for_fn (cfun) + 1);
/* Visit all the blocks in reverse order and propagate live on entry values
into the predecessors blocks. */
FOR_EACH_BB_REVERSE_FN (bb, cfun)
- loe_visit_block (live, bb, visited);
+ loe_visit_block (live, bb, &visited);
/* Process any blocks which require further iteration. */
while (live->stack_top != live->work_stack)
{
b = *--(live->stack_top);
- loe_visit_block (live, BASIC_BLOCK_FOR_FN (cfun, b), visited);
+ loe_visit_block (live, BASIC_BLOCK_FOR_FN (cfun, b), &visited);
}
-
- sbitmap_free (visited);
}
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 9aba79ba776..ffd6d1706d9 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
+#include "bitvec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
@@ -2366,7 +2367,7 @@ store_motion (void)
blocks that contain a nonpure call. */
static void
-fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
+fill_always_executed_in_1 (struct loop *loop, const bitvec &contains_call)
{
basic_block bb = NULL, *bbs, last = NULL;
unsigned i;
@@ -2385,7 +2386,7 @@ fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
last = bb;
- if (bitmap_bit_p (contains_call, bb->index))
+ if (contains_call[bb->index])
break;
FOR_EACH_EDGE (e, ei, bb->succs)
@@ -2435,11 +2436,10 @@ fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
static void
fill_always_executed_in (void)
{
- sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ stack_bitvec contains_call (last_basic_block_for_fn (cfun));
basic_block bb;
struct loop *loop;
- bitmap_clear (contains_call);
FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi;
@@ -2450,13 +2450,11 @@ fill_always_executed_in (void)
}
if (!gsi_end_p (gsi))
- bitmap_set_bit (contains_call, bb->index);
+ contains_call[bb->index] = true;
}
for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
fill_always_executed_in_1 (loop, contains_call);
-
- sbitmap_free (contains_call);
}
diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c
index 0e1d75d3323..be4eeb34680 100644
--- a/gcc/tree-ssa-loop-ivcanon.c
+++ b/gcc/tree-ssa-loop-ivcanon.c
@@ -38,6 +38,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "hash-set.h"
#include "machmode.h"
@@ -745,7 +746,6 @@ try_unroll_loop_completely (struct loop *loop,
if (n_unroll)
{
- sbitmap wont_exit;
edge e;
unsigned i;
bool large;
@@ -859,9 +859,9 @@ try_unroll_loop_completely (struct loop *loop,
"loop turned into non-loop; it never loops.\n");
initialize_original_copy_tables ();
- wont_exit = sbitmap_alloc (n_unroll + 1);
- bitmap_ones (wont_exit);
- bitmap_clear_bit (wont_exit, 0);
+ stack_bitvec wont_exit (n_unroll + 1);
+ wont_exit.set ();
+ wont_exit[0] = false;
if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
n_unroll, wont_exit,
@@ -870,7 +870,6 @@ try_unroll_loop_completely (struct loop *loop,
| DLTHE_FLAG_COMPLETTE_PEEL))
{
free_original_copy_tables ();
- free (wont_exit);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Failed to duplicate the loop\n");
return false;
@@ -883,7 +882,6 @@ try_unroll_loop_completely (struct loop *loop,
}
to_remove.release ();
- free (wont_exit);
free_original_copy_tables ();
}
@@ -961,7 +959,6 @@ try_peel_loop (struct loop *loop,
int npeel;
struct loop_size size;
int peeled_size;
- sbitmap wont_exit;
unsigned i;
vec<edge> to_remove = vNULL;
edge e;
@@ -1032,9 +1029,9 @@ try_peel_loop (struct loop *loop,
/* Duplicate possibly eliminating the exits. */
initialize_original_copy_tables ();
- wont_exit = sbitmap_alloc (npeel + 1);
- bitmap_ones (wont_exit);
- bitmap_clear_bit (wont_exit, 0);
+ stack_bitvec wont_exit (npeel + 1);
+ wont_exit.set ();
+ wont_exit[0] = false;
if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
npeel, wont_exit,
exit, &to_remove,
@@ -1042,7 +1039,6 @@ try_peel_loop (struct loop *loop,
| DLTHE_FLAG_COMPLETTE_PEEL))
{
free_original_copy_tables ();
- free (wont_exit);
return false;
}
FOR_EACH_VEC_ELT (to_remove, i, e)
@@ -1050,7 +1046,6 @@ try_peel_loop (struct loop *loop,
bool ok = remove_path (e);
gcc_assert (ok);
}
- free (wont_exit);
free_original_copy_tables ();
if (dump_file && (dump_flags & TDF_DETAILS))
{
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index 228fac665d6..1b84d2efdf4 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "hash-set.h"
#include "machmode.h"
@@ -769,7 +770,8 @@ copy_phi_node_args (unsigned first_new_block)
bool
gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e,
- unsigned int ndupl, sbitmap wont_exit,
+ unsigned int ndupl,
+ const bitvec &wont_exit,
edge orig, vec<edge> *to_remove,
int flags)
{
@@ -1055,7 +1057,6 @@ tree_transform_and_unroll_loop (struct loop *loop, unsigned factor,
unsigned est_niter, prob_entry, scale_unrolled, scale_rest, freq_e, freq_h;
unsigned new_est_niter, i, prob;
unsigned irr = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
- sbitmap wont_exit;
auto_vec<edge> to_remove;
est_niter = expected_loop_iterations (loop);
@@ -1189,14 +1190,13 @@ tree_transform_and_unroll_loop (struct loop *loop, unsigned factor,
/* Unroll the loop and remove the exits in all iterations except for the
last one. */
- wont_exit = sbitmap_alloc (factor);
- bitmap_ones (wont_exit);
- bitmap_clear_bit (wont_exit, factor - 1);
+ stack_bitvec wont_exit (factor);
+ wont_exit.set ();
+ wont_exit[factor - 1] = false;
ok = gimple_duplicate_loop_to_header_edge
(loop, loop_latch_edge (loop), factor - 1,
wont_exit, new_exit, &to_remove, DLTHE_FLAG_UPDATE_FREQ);
- free (wont_exit);
gcc_assert (ok);
FOR_EACH_VEC_ELT (to_remove, i, e)
diff --git a/gcc/tree-ssa-loop-manip.h b/gcc/tree-ssa-loop-manip.h
index ad0c3815d4f..54d0a5cfd17 100644
--- a/gcc/tree-ssa-loop-manip.h
+++ b/gcc/tree-ssa-loop-manip.h
@@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_SSA_LOOP_MANIP_H
#define GCC_TREE_SSA_LOOP_MANIP_H
+class bitvec;
+
typedef void (*transform_callback)(struct loop *, void *);
extern void create_iv (tree, tree, tree, struct loop *, gimple_stmt_iterator *,
@@ -32,7 +34,7 @@ extern basic_block ip_normal_pos (struct loop *);
extern void standard_iv_increment_position (struct loop *,
gimple_stmt_iterator *, bool *);
extern bool gimple_duplicate_loop_to_header_edge (struct loop *, edge,
- unsigned int, sbitmap,
+ unsigned int, const bitvec &,
edge, vec<edge> *,
int);
extern bool can_unroll_loop_p (struct loop *loop, unsigned factor,
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index c985e798696..7a84460b5a6 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -2110,12 +2111,12 @@ prune_clobbered_mems (bitmap_set_t set, basic_block block)
}
}
-static sbitmap has_abnormal_preds;
+static bitvec has_abnormal_preds;
/* List of blocks that may have changed during ANTIC computation and
thus need to be iterated over. */
-static sbitmap changed_blocks;
+static bitvec changed_blocks;
/* Compute the ANTIC set for BLOCK.
@@ -2219,12 +2220,12 @@ compute_antic_aux (basic_block block, bool block_has_abnormal_pred_edge)
if (!bitmap_set_equal (old, ANTIC_IN (block)))
{
changed = true;
- bitmap_set_bit (changed_blocks, block->index);
+ changed_blocks[block->index] = true;
FOR_EACH_EDGE (e, ei, block->preds)
- bitmap_set_bit (changed_blocks, e->src->index);
+ changed_blocks[e->src->index] = true;
}
else
- bitmap_clear_bit (changed_blocks, block->index);
+ changed_blocks[block->index] = false;
maybe_dump_sets:
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -2365,12 +2366,12 @@ compute_partial_antic_aux (basic_block block,
if (!bitmap_set_equal (old_PA_IN, PA_IN (block)))
{
changed = true;
- bitmap_set_bit (changed_blocks, block->index);
+ changed_blocks[block->index] = true;
FOR_EACH_EDGE (e, ei, block->preds)
- bitmap_set_bit (changed_blocks, e->src->index);
+ changed_blocks[e->src->index] = true;
}
else
- bitmap_clear_bit (changed_blocks, block->index);
+ changed_blocks[block->index] = false;
maybe_dump_sets:
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -2399,8 +2400,7 @@ compute_antic (void)
/* If any predecessor edges are abnormal, we punt, so antic_in is empty.
We pre-build the map of blocks with incoming abnormal edges here. */
- has_abnormal_preds = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (has_abnormal_preds);
+ has_abnormal_preds.grow (last_basic_block_for_fn (cfun));
FOR_ALL_BB_FN (block, cfun)
{
@@ -2412,7 +2412,7 @@ compute_antic (void)
e->flags &= ~EDGE_DFS_BACK;
if (e->flags & EDGE_ABNORMAL)
{
- bitmap_set_bit (has_abnormal_preds, block->index);
+ has_abnormal_preds[block->index] = true;
break;
}
}
@@ -2427,8 +2427,8 @@ compute_antic (void)
/* At the exit block we anticipate nothing. */
BB_VISITED (EXIT_BLOCK_PTR_FOR_FN (cfun)) = 1;
- changed_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun) + 1);
- bitmap_ones (changed_blocks);
+ changed_blocks.grow (last_basic_block_for_fn (cfun) + 1);
+ changed_blocks.set ();
while (changed)
{
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -2441,12 +2441,11 @@ compute_antic (void)
changed = false;
for (i = postorder_num - 1; i >= 0; i--)
{
- if (bitmap_bit_p (changed_blocks, postorder[i]))
+ if (changed_blocks[postorder[i]])
{
basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
changed |= compute_antic_aux (block,
- bitmap_bit_p (has_abnormal_preds,
- block->index));
+ has_abnormal_preds[block->index]);
}
}
/* Theoretically possible, but *highly* unlikely. */
@@ -2458,7 +2457,7 @@ compute_antic (void)
if (do_partial_partial)
{
- bitmap_ones (changed_blocks);
+ changed_blocks.set ();
mark_dfs_back_edges ();
num_iterations = 0;
changed = true;
@@ -2470,13 +2469,11 @@ compute_antic (void)
changed = false;
for (i = postorder_num - 1 ; i >= 0; i--)
{
- if (bitmap_bit_p (changed_blocks, postorder[i]))
+ if (changed_blocks[postorder[i]])
{
basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
- changed
- |= compute_partial_antic_aux (block,
- bitmap_bit_p (has_abnormal_preds,
- block->index));
+ changed |= compute_partial_antic_aux
+ (block, has_abnormal_preds[block->index]);
}
}
/* Theoretically possible, but *highly* unlikely. */
@@ -2485,8 +2482,8 @@ compute_antic (void)
statistics_histogram_event (cfun, "compute_partial_antic iterations",
num_iterations);
}
- sbitmap_free (has_abnormal_preds);
- sbitmap_free (changed_blocks);
+ has_abnormal_preds.clear_and_release ();
+ changed_blocks.clear_and_release ();
}
diff --git a/gcc/tree-ssa-propagate.c b/gcc/tree-ssa-propagate.c
index c3f9d3e1cda..1760d018d51 100644
--- a/gcc/tree-ssa-propagate.c
+++ b/gcc/tree-ssa-propagate.c
@@ -22,6 +22,7 @@
#include "system.h"
#include "coretypes.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -45,7 +46,6 @@
#include "gimple-pretty-print.h"
#include "dumpfile.h"
#include "bitmap.h"
-#include "sbitmap.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
#include "gimple-fold.h"
@@ -157,7 +157,7 @@ static ssa_prop_visit_phi_fn ssa_prop_visit_phi;
#define STMT_IN_SSA_EDGE_WORKLIST GF_PLF_1
/* A bitmap to keep track of executable blocks in the CFG. */
-static sbitmap executable_blocks;
+static bitvec executable_blocks;
/* Array of control flow edges on the worklist. */
static vec<basic_block> cfg_blocks;
@@ -166,7 +166,7 @@ static unsigned int cfg_blocks_num = 0;
static int cfg_blocks_tail;
static int cfg_blocks_head;
-static sbitmap bb_in_list;
+static bitvec bb_in_list;
/* Worklist of SSA edges which will need reexamination as their
definition has changed. SSA edges are def-use edges in the SSA
@@ -210,7 +210,7 @@ cfg_blocks_add (basic_block bb)
gcc_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
&& bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
- gcc_assert (!bitmap_bit_p (bb_in_list, bb->index));
+ gcc_assert (!bb_in_list[bb->index]);
if (cfg_blocks_empty_p ())
{
@@ -247,7 +247,7 @@ cfg_blocks_add (basic_block bb)
}
cfg_blocks[head ? cfg_blocks_head : cfg_blocks_tail] = bb;
- bitmap_set_bit (bb_in_list, bb->index);
+ bb_in_list[bb->index] = true;
}
@@ -265,7 +265,7 @@ cfg_blocks_get (void)
cfg_blocks_head = ((cfg_blocks_head + 1) % cfg_blocks.length ());
--cfg_blocks_num;
- bitmap_clear_bit (bb_in_list, bb->index);
+ bb_in_list[bb->index] = false;
return bb;
}
@@ -314,7 +314,7 @@ add_control_edge (edge e)
e->flags |= EDGE_EXECUTABLE;
/* If the block is already in the list, we're done. */
- if (bitmap_bit_p (bb_in_list, bb->index))
+ if (bb_in_list[bb->index])
return;
cfg_blocks_add (bb);
@@ -418,7 +418,7 @@ process_ssa_edge_worklist (vec<gimple> *worklist)
the destination block is executable. Otherwise, visit the
statement only if its block is marked executable. */
if (gimple_code (stmt) == GIMPLE_PHI
- || bitmap_bit_p (executable_blocks, bb->index))
+ || executable_blocks[bb->index])
simulate_stmt (stmt);
}
}
@@ -446,7 +446,7 @@ simulate_block (basic_block block)
/* If this is the first time we've simulated this block, then we
must simulate each of its statements. */
- if (!bitmap_bit_p (executable_blocks, block->index))
+ if (!executable_blocks[block->index])
{
gimple_stmt_iterator j;
unsigned int normal_edge_count;
@@ -454,7 +454,7 @@ simulate_block (basic_block block)
edge_iterator ei;
/* Note that we have simulated this block. */
- bitmap_set_bit (executable_blocks, block->index);
+ executable_blocks[block->index] = true;
for (j = gsi_start_bb (block); !gsi_end_p (j); gsi_next (&j))
{
@@ -514,11 +514,9 @@ ssa_prop_init (void)
interesting_ssa_edges.create (20);
varying_ssa_edges.create (20);
- executable_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (executable_blocks);
+ executable_blocks.grow (last_basic_block_for_fn (cfun));
- bb_in_list = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (bb_in_list);
+ bb_in_list.grow (last_basic_block_for_fn (cfun));
if (dump_file && (dump_flags & TDF_DETAILS))
dump_immediate_uses (dump_file);
@@ -557,8 +555,8 @@ ssa_prop_fini (void)
interesting_ssa_edges.release ();
varying_ssa_edges.release ();
cfg_blocks.release ();
- sbitmap_free (bb_in_list);
- sbitmap_free (executable_blocks);
+ bb_in_list.clear_and_release ();
+ executable_blocks.clear_and_release ();
}
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 77640e5dd53..247c6d5781c 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "hash-table.h"
#include "tm.h"
#include "rtl.h"
@@ -1439,9 +1440,7 @@ undistribute_ops_list (enum tree_code opcode,
unsigned int length = ops->length ();
operand_entry_t oe1;
unsigned i, j;
- sbitmap candidates, candidates2;
unsigned nr_candidates, nr_candidates2;
- sbitmap_iterator sbi0;
vec<operand_entry_t> *subops;
bool changed = false;
int next_oecount_id = 0;
@@ -1451,8 +1450,7 @@ undistribute_ops_list (enum tree_code opcode,
return false;
/* Build a list of candidates to process. */
- candidates = sbitmap_alloc (length);
- bitmap_clear (candidates);
+ stack_bitvec candidates (length);
nr_candidates = 0;
FOR_EACH_VEC_ELT (*ops, i, oe1)
{
@@ -1470,21 +1468,18 @@ undistribute_ops_list (enum tree_code opcode,
|| !is_reassociable_op (oe1def, dcode, loop))
continue;
- bitmap_set_bit (candidates, i);
+ candidates[i] = true;
nr_candidates++;
}
if (nr_candidates < 2)
- {
- sbitmap_free (candidates);
- return false;
- }
+ return false;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "searching for un-distribute opportunities ");
print_generic_expr (dump_file,
- (*ops)[bitmap_first_set_bit (candidates)]->op, 0);
+ (*ops)[candidates.first_set_bit ()]->op, 0);
fprintf (dump_file, " %d\n", nr_candidates);
}
@@ -1497,40 +1492,41 @@ undistribute_ops_list (enum tree_code opcode,
them. This typedef is needed to workaround that limitation. */
typedef vec<operand_entry_t> vec_operand_entry_t_heap;
subops = XCNEWVEC (vec_operand_entry_t_heap, ops->length ());
- EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0)
- {
- gimple oedef;
- enum tree_code oecode;
- unsigned j;
+ for (size_t i = candidates.begin (); i < candidates.end (); i++)
+ if (candidates[i])
+ {
+ gimple oedef;
+ enum tree_code oecode;
+ unsigned j;
- oedef = SSA_NAME_DEF_STMT ((*ops)[i]->op);
- oecode = gimple_assign_rhs_code (oedef);
- linearize_expr_tree (&subops[i], oedef,
- associative_tree_code (oecode), false);
+ oedef = SSA_NAME_DEF_STMT ((*ops)[i]->op);
+ oecode = gimple_assign_rhs_code (oedef);
+ linearize_expr_tree (&subops[i], oedef,
+ associative_tree_code (oecode), false);
- FOR_EACH_VEC_ELT (subops[i], j, oe1)
- {
- oecount c;
- int *slot;
- int idx;
- c.oecode = oecode;
- c.cnt = 1;
- c.id = next_oecount_id++;
- c.op = oe1->op;
- cvec.safe_push (c);
- idx = cvec.length () + 41;
- slot = ctable.find_slot (idx, INSERT);
- if (!*slot)
- {
- *slot = idx;
- }
- else
- {
- cvec.pop ();
- cvec[*slot - 42].cnt++;
- }
- }
- }
+ FOR_EACH_VEC_ELT (subops[i], j, oe1)
+ {
+ oecount c;
+ int *slot;
+ int idx;
+ c.oecode = oecode;
+ c.cnt = 1;
+ c.id = next_oecount_id++;
+ c.op = oe1->op;
+ cvec.safe_push (c);
+ idx = cvec.length () + 41;
+ slot = ctable.find_slot (idx, INSERT);
+ if (!*slot)
+ {
+ *slot = idx;
+ }
+ else
+ {
+ cvec.pop ();
+ cvec[*slot - 42].cnt++;
+ }
+ }
+ }
/* Sort the counting table. */
cvec.qsort (oecount_cmp);
@@ -1550,7 +1546,7 @@ undistribute_ops_list (enum tree_code opcode,
}
/* Process the (operand, code) pairs in order of most occurrence. */
- candidates2 = sbitmap_alloc (length);
+ stack_bitvec candidates2 (length);
while (!cvec.is_empty ())
{
oecount *c = &cvec.last ();
@@ -1559,41 +1555,42 @@ undistribute_ops_list (enum tree_code opcode,
/* Now collect the operands in the outer chain that contain
the common operand in their inner chain. */
- bitmap_clear (candidates2);
+ candidates2.clear ();
nr_candidates2 = 0;
- EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0)
- {
- gimple oedef;
- enum tree_code oecode;
- unsigned j;
- tree op = (*ops)[i]->op;
-
- /* If we undistributed in this chain already this may be
- a constant. */
- if (TREE_CODE (op) != SSA_NAME)
- continue;
+ for (size_t i = candidates.begin (); i < candidates.end (); i++)
+ if (candidates[i])
+ {
+ gimple oedef;
+ enum tree_code oecode;
+ unsigned j;
+ tree op = (*ops)[i]->op;
+
+ /* If we undistributed in this chain already this may be
+ a constant. */
+ if (TREE_CODE (op) != SSA_NAME)
+ continue;
- oedef = SSA_NAME_DEF_STMT (op);
- oecode = gimple_assign_rhs_code (oedef);
- if (oecode != c->oecode)
- continue;
+ oedef = SSA_NAME_DEF_STMT (op);
+ oecode = gimple_assign_rhs_code (oedef);
+ if (oecode != c->oecode)
+ continue;
- FOR_EACH_VEC_ELT (subops[i], j, oe1)
- {
- if (oe1->op == c->op)
- {
- bitmap_set_bit (candidates2, i);
- ++nr_candidates2;
- break;
- }
- }
- }
+ FOR_EACH_VEC_ELT (subops[i], j, oe1)
+ {
+ if (oe1->op == c->op)
+ {
+ candidates2[i] = true;
+ ++nr_candidates2;
+ break;
+ }
+ }
+ }
if (nr_candidates2 >= 2)
{
operand_entry_t oe1, oe2;
gimple prod;
- int first = bitmap_first_set_bit (candidates2);
+ ssize_t first = candidates2.first_set_bit ();
/* Build the new addition chain. */
oe1 = (*ops)[first];
@@ -1603,22 +1600,23 @@ undistribute_ops_list (enum tree_code opcode,
print_generic_expr (dump_file, oe1->op, 0);
}
zero_one_operation (&oe1->op, c->oecode, c->op);
- EXECUTE_IF_SET_IN_BITMAP (candidates2, first+1, i, sbi0)
- {
- gimple sum;
- oe2 = (*ops)[i];
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, " + ");
- print_generic_expr (dump_file, oe2->op, 0);
- }
- zero_one_operation (&oe2->op, c->oecode, c->op);
- sum = build_and_add_sum (TREE_TYPE (oe1->op),
- oe1->op, oe2->op, opcode);
- oe2->op = build_zero_cst (TREE_TYPE (oe2->op));
- oe2->rank = 0;
- oe1->op = gimple_get_lhs (sum);
- }
+ for (size_t i = first + 1; i < candidates2.end (); i++)
+ if (candidates2[i])
+ {
+ gimple sum;
+ oe2 = (*ops)[i];
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, " + ");
+ print_generic_expr (dump_file, oe2->op, 0);
+ }
+ zero_one_operation (&oe2->op, c->oecode, c->op);
+ sum = build_and_add_sum (TREE_TYPE (oe1->op),
+ oe1->op, oe2->op, opcode);
+ oe2->op = build_zero_cst (TREE_TYPE (oe2->op));
+ oe2->rank = 0;
+ oe1->op = gimple_get_lhs (sum);
+ }
/* Apply the multiplication/division. */
prod = build_and_add_sum (TREE_TYPE (oe1->op),
@@ -1646,8 +1644,6 @@ undistribute_ops_list (enum tree_code opcode,
subops[i].release ();
free (subops);
cvec.release ();
- sbitmap_free (candidates);
- sbitmap_free (candidates2);
return changed;
}
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index fd0f5359dd7..9feb0ac965f 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -24,6 +24,7 @@
#include "tm.h"
#include "obstack.h"
#include "bitmap.h"
+#include "bitvec.h"
#include "sbitmap.h"
#include "flags.h"
#include "predict.h"
@@ -618,7 +619,7 @@ struct constraint_graph
/* Bitmap of nodes where the bit is set if the node is a direct
node. Used for variable substitution. */
- sbitmap direct_nodes;
+ bitvec direct_nodes;
/* Bitmap of nodes where the bit is set if the node is address
taken. Used for variable substitution. */
@@ -1254,14 +1255,13 @@ build_pred_graph (void)
graph->pointed_by = XCNEWVEC (bitmap, graph->size);
graph->points_to = XCNEWVEC (bitmap, graph->size);
graph->eq_rep = XNEWVEC (int, graph->size);
- graph->direct_nodes = sbitmap_alloc (graph->size);
+ new (&graph->direct_nodes) bitvec (graph->size);
graph->address_taken = BITMAP_ALLOC (&predbitmap_obstack);
- bitmap_clear (graph->direct_nodes);
for (j = 1; j < FIRST_REF_NODE; j++)
{
if (!get_varinfo (j)->is_special_var)
- bitmap_set_bit (graph->direct_nodes, j);
+ graph->direct_nodes[j] = true;
}
for (j = 0; j < graph->size; j++)
@@ -1289,7 +1289,7 @@ build_pred_graph (void)
if (rhs.offset == 0 && lhs.offset == 0 && lhs.type == SCALAR)
add_pred_graph_edge (graph, lhsvar, FIRST_REF_NODE + rhsvar);
else
- bitmap_clear_bit (graph->direct_nodes, lhsvar);
+ graph->direct_nodes[lhsvar] = false;
}
else if (rhs.type == ADDRESSOF)
{
@@ -1308,14 +1308,14 @@ build_pred_graph (void)
add_implicit_graph_edge (graph, FIRST_REF_NODE + lhsvar, rhsvar);
/* All related variables are no longer direct nodes. */
- bitmap_clear_bit (graph->direct_nodes, rhsvar);
+ graph->direct_nodes[rhsvar] = false;
v = get_varinfo (rhsvar);
if (!v->is_full_var)
{
v = get_varinfo (v->head);
do
{
- bitmap_clear_bit (graph->direct_nodes, v->id);
+ graph->direct_nodes[v->id] = false;
v = vi_next (v);
}
while (v != NULL);
@@ -1334,9 +1334,9 @@ build_pred_graph (void)
else if (lhs.offset != 0 || rhs.offset != 0)
{
if (rhs.offset != 0)
- bitmap_clear_bit (graph->direct_nodes, lhs.var);
+ graph->direct_nodes[lhs.var] = false;
else if (lhs.offset != 0)
- bitmap_clear_bit (graph->direct_nodes, rhs.var);
+ graph->direct_nodes[rhs.var] = false;
}
}
}
@@ -1392,7 +1392,7 @@ build_succ_graph (void)
t = find (storedanything_id);
for (i = integer_id + 1; i < FIRST_REF_NODE; ++i)
{
- if (!bitmap_bit_p (graph->direct_nodes, i)
+ if (!graph->direct_nodes[i]
&& get_varinfo (i)->may_have_pointers)
add_graph_edge (graph, find (i), t);
}
@@ -1409,8 +1409,8 @@ static bitmap changed;
struct scc_info
{
- sbitmap visited;
- sbitmap deleted;
+ bitvec visited;
+ bitvec deleted;
unsigned int *dfs;
unsigned int *node_mapping;
int current_index;
@@ -1436,7 +1436,7 @@ scc_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
bitmap_iterator bi;
unsigned int my_dfs;
- bitmap_set_bit (si->visited, n);
+ si->visited[n] = true;
si->dfs[n] = si->current_index ++;
my_dfs = si->dfs[n];
@@ -1449,10 +1449,10 @@ scc_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
break;
w = find (i);
- if (bitmap_bit_p (si->deleted, w))
+ if (si->deleted[w])
continue;
- if (!bitmap_bit_p (si->visited, w))
+ if (!si->visited[w])
scc_visit (graph, si, w);
unsigned int t = find (w);
@@ -1500,7 +1500,7 @@ scc_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
}
}
}
- bitmap_set_bit (si->deleted, n);
+ si->deleted[n] = true;
}
else
si->scc_stack.safe_push (n);
@@ -1566,8 +1566,8 @@ unify_nodes (constraint_graph_t graph, unsigned int to, unsigned int from,
struct topo_info
{
- /* sbitmap of visited nodes. */
- sbitmap visited;
+ /* bitvec of visited nodes. */
+ bitvec visited;
/* Array that stores the topological order of the graph, *in
reverse*. */
vec<unsigned> topo_order;
@@ -1581,8 +1581,7 @@ init_topo_info (void)
{
size_t size = graph->size;
struct topo_info *ti = XNEW (struct topo_info);
- ti->visited = sbitmap_alloc (size);
- bitmap_clear (ti->visited);
+ new (&ti->visited) bitvec (size);
ti->topo_order.create (1);
return ti;
}
@@ -1593,7 +1592,7 @@ init_topo_info (void)
static void
free_topo_info (struct topo_info *ti)
{
- sbitmap_free (ti->visited);
+ ti->visited.~bitvec ();
ti->topo_order.release ();
free (ti);
}
@@ -1608,12 +1607,12 @@ topo_visit (constraint_graph_t graph, struct topo_info *ti,
bitmap_iterator bi;
unsigned int j;
- bitmap_set_bit (ti->visited, n);
+ ti->visited[n] = true;
if (graph->succs[n])
EXECUTE_IF_SET_IN_BITMAP (graph->succs[n], 0, j, bi)
{
- if (!bitmap_bit_p (ti->visited, j))
+ if (!ti->visited[j])
topo_visit (graph, ti, j);
}
@@ -1861,10 +1860,8 @@ init_scc_info (size_t size)
size_t i;
si->current_index = 0;
- si->visited = sbitmap_alloc (size);
- bitmap_clear (si->visited);
- si->deleted = sbitmap_alloc (size);
- bitmap_clear (si->deleted);
+ new (&si->visited) bitvec (size);
+ new (&si->deleted) bitvec (size);
si->node_mapping = XNEWVEC (unsigned int, size);
si->dfs = XCNEWVEC (unsigned int, size);
@@ -1880,8 +1877,8 @@ init_scc_info (size_t size)
static void
free_scc_info (struct scc_info *si)
{
- sbitmap_free (si->visited);
- sbitmap_free (si->deleted);
+ si->visited.~bitvec ();
+ si->deleted.~bitvec ();
free (si->node_mapping);
free (si->dfs);
si->scc_stack.release ();
@@ -1904,7 +1901,7 @@ find_indirect_cycles (constraint_graph_t graph)
struct scc_info *si = init_scc_info (size);
for (i = 0; i < MIN (LAST_REF_NODE, size); i ++ )
- if (!bitmap_bit_p (si->visited, i) && find (i) == i)
+ if (!si->visited[i] && find (i) == i)
scc_visit (graph, si, i);
free_scc_info (si);
@@ -1921,7 +1918,7 @@ compute_topo_order (constraint_graph_t graph,
unsigned int size = graph->size;
for (i = 0; i != size; ++i)
- if (!bitmap_bit_p (ti->visited, i) && find (i) == i)
+ if (!ti->visited[i] && find (i) == i)
topo_visit (graph, ti, i);
}
@@ -2056,7 +2053,7 @@ condense_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
unsigned int my_dfs;
gcc_checking_assert (si->node_mapping[n] == n);
- bitmap_set_bit (si->visited, n);
+ si->visited[n] = true;
si->dfs[n] = si->current_index ++;
my_dfs = si->dfs[n];
@@ -2065,10 +2062,10 @@ condense_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
{
unsigned int w = si->node_mapping[i];
- if (bitmap_bit_p (si->deleted, w))
+ if (si->deleted[w])
continue;
- if (!bitmap_bit_p (si->visited, w))
+ if (!si->visited[w])
condense_visit (graph, si, w);
unsigned int t = si->node_mapping[w];
@@ -2082,10 +2079,10 @@ condense_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
{
unsigned int w = si->node_mapping[i];
- if (bitmap_bit_p (si->deleted, w))
+ if (si->deleted[w])
continue;
- if (!bitmap_bit_p (si->visited, w))
+ if (!si->visited[w])
condense_visit (graph, si, w);
unsigned int t = si->node_mapping[w];
@@ -2103,8 +2100,8 @@ condense_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
unsigned int w = si->scc_stack.pop ();
si->node_mapping[w] = n;
- if (!bitmap_bit_p (graph->direct_nodes, w))
- bitmap_clear_bit (graph->direct_nodes, n);
+ if (!graph->direct_nodes[w])
+ graph->direct_nodes[n] = false;
/* Unify our nodes. */
if (graph->preds[w])
@@ -2128,7 +2125,7 @@ condense_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
graph->points_to[w]);
}
}
- bitmap_set_bit (si->deleted, n);
+ si->deleted[n] = true;
}
else
si->scc_stack.safe_push (n);
@@ -2159,14 +2156,14 @@ label_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
unsigned int i, first_pred;
bitmap_iterator bi;
- bitmap_set_bit (si->visited, n);
+ si->visited[n] = true;
/* Label and union our incoming edges's points to sets. */
first_pred = -1U;
EXECUTE_IF_IN_NONNULL_BITMAP (graph->preds[n], 0, i, bi)
{
unsigned int w = si->node_mapping[i];
- if (!bitmap_bit_p (si->visited, w))
+ if (!si->visited[w])
label_visit (graph, si, w);
/* Skip unused edges */
@@ -2193,7 +2190,7 @@ label_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
}
/* Indirect nodes get fresh variables and a new pointer equiv class. */
- if (!bitmap_bit_p (graph->direct_nodes, n))
+ if (!graph->direct_nodes[n])
{
if (!graph->points_to[n])
{
@@ -2329,7 +2326,7 @@ perform_var_substitution (constraint_graph_t graph)
/* Condense the nodes, which means to find SCC's, count incoming
predecessors, and unite nodes in SCC's. */
for (i = 1; i < FIRST_REF_NODE; i++)
- if (!bitmap_bit_p (si->visited, si->node_mapping[i]))
+ if (!si->visited[si->node_mapping[i]])
condense_visit (graph, si, si->node_mapping[i]);
if (dump_file && (dump_flags & TDF_GRAPH))
@@ -2340,10 +2337,10 @@ perform_var_substitution (constraint_graph_t graph)
fprintf (dump_file, "\n\n");
}
- bitmap_clear (si->visited);
+ si->visited.clear ();
/* Actually the label the nodes for pointer equivalences */
for (i = 1; i < FIRST_REF_NODE; i++)
- if (!bitmap_bit_p (si->visited, si->node_mapping[i]))
+ if (!si->visited[si->node_mapping[i]])
label_visit (graph, si, si->node_mapping[i]);
/* Calculate location equivalence labels. */
@@ -2391,7 +2388,7 @@ perform_var_substitution (constraint_graph_t graph)
if (j != i)
{
fprintf (dump_file, "%s node id %d ",
- bitmap_bit_p (graph->direct_nodes, i)
+ graph->direct_nodes[i]
? "Direct" : "Indirect", i);
if (i < FIRST_REF_NODE)
fprintf (dump_file, "\"%s\"", get_varinfo (i)->name);
@@ -2409,7 +2406,7 @@ perform_var_substitution (constraint_graph_t graph)
{
fprintf (dump_file,
"Equivalence classes for %s node id %d ",
- bitmap_bit_p (graph->direct_nodes, i)
+ graph->direct_nodes[i]
? "direct" : "indirect", i);
if (i < FIRST_REF_NODE)
fprintf (dump_file, "\"%s\"", get_varinfo (i)->name);
@@ -2454,7 +2451,7 @@ free_var_substitution_info (struct scc_info *si)
free (graph->pointed_by);
free (graph->points_to);
free (graph->eq_rep);
- sbitmap_free (graph->direct_nodes);
+ graph->direct_nodes.~bitvec ();
delete pointer_equiv_class_table;
pointer_equiv_class_table = NULL;
delete location_equiv_class_table;
diff --git a/gcc/tree-stdarg.c b/gcc/tree-stdarg.c
index 0c70790f3eb..327afa22704 100644
--- a/gcc/tree-stdarg.c
+++ b/gcc/tree-stdarg.c
@@ -57,6 +57,7 @@ along with GCC; see the file COPYING3. If not see
#include "stringpool.h"
#include "tree-ssanames.h"
#include "sbitmap.h"
+#include "bitvec.h"
#include "tree-pass.h"
#include "tree-stdarg.h"
@@ -78,7 +79,6 @@ reachable_at_most_once (basic_block va_arg_bb, basic_block va_start_bb)
vec<edge> stack = vNULL;
edge e;
edge_iterator ei;
- sbitmap visited;
bool ret;
if (va_arg_bb == va_start_bb)
@@ -87,8 +87,7 @@ reachable_at_most_once (basic_block va_arg_bb, basic_block va_start_bb)
if (! dominated_by_p (CDI_DOMINATORS, va_arg_bb, va_start_bb))
return false;
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (visited);
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
ret = true;
FOR_EACH_EDGE (e, ei, va_arg_bb->preds)
@@ -119,16 +118,15 @@ reachable_at_most_once (basic_block va_arg_bb, basic_block va_start_bb)
gcc_assert (src != ENTRY_BLOCK_PTR_FOR_FN (cfun));
- if (! bitmap_bit_p (visited, src->index))
+ if (! visited[src->index])
{
- bitmap_set_bit (visited, src->index);
+ visited[src->index] = true;
FOR_EACH_EDGE (e, ei, src->preds)
stack.safe_push (e);
}
}
stack.release ();
- sbitmap_free (visited);
return ret;
}
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 73ab24e8f3b..b0ef53d1d33 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "dumpfile.h"
#include "tm.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -1194,7 +1195,6 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
{
unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
unsigned int i, j, k, next;
- sbitmap load_index;
slp_tree node;
gimple stmt, load, next_load, first_load;
struct data_reference *dr;
@@ -1228,6 +1228,8 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
node = SLP_INSTANCE_TREE (slp_instn);
stmt = SLP_TREE_SCALAR_STMTS (node)[0];
+ stack_bitvec load_index (group_size);
+
/* Reduction (there are no data-refs in the root).
In reduction chain the order of the loads is important. */
if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
@@ -1252,24 +1254,15 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
/* Check that the loads in the first sequence are different and there
are no gaps between them. */
- load_index = sbitmap_alloc (group_size);
- bitmap_clear (load_index);
FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
{
- if (bitmap_bit_p (load_index, lidx))
- {
- sbitmap_free (load_index);
- return false;
- }
- bitmap_set_bit (load_index, lidx);
+ if (load_index[lidx])
+ return false;
+ load_index[lidx] = true;
}
for (i = 0; i < group_size; i++)
- if (!bitmap_bit_p (load_index, i))
- {
- sbitmap_free (load_index);
- return false;
- }
- sbitmap_free (load_index);
+ if (!load_index[i])
+ return false;
/* This permutation is valid for reduction. Since the order of the
statements in the nodes is not important unless they are memory
@@ -1343,31 +1336,20 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
if (!node->load_permutation.exists ())
return false;
- load_index = sbitmap_alloc (group_size);
- bitmap_clear (load_index);
+ load_index.clear ();
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
unsigned int lidx = node->load_permutation[0];
- if (bitmap_bit_p (load_index, lidx))
- {
- sbitmap_free (load_index);
- return false;
- }
- bitmap_set_bit (load_index, lidx);
+ if (load_index[lidx])
+ return false;
+ load_index[lidx] = true;
FOR_EACH_VEC_ELT (node->load_permutation, j, k)
if (k != lidx)
- {
- sbitmap_free (load_index);
- return false;
- }
+ return false;
}
for (i = 0; i < group_size; i++)
- if (!bitmap_bit_p (load_index, i))
- {
- sbitmap_free (load_index);
- return false;
- }
- sbitmap_free (load_index);
+ if (!load_index[i])
+ return false;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
if (node->load_permutation.exists ()
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 9556ede6247..443c2e84d06 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "bitvec.h"
#include "tm.h"
#include "flags.h"
#include "hash-set.h"
@@ -124,15 +125,15 @@ typedef struct value_range_d value_range_t;
/* Set of SSA names found live during the RPO traversal of the function
for still active basic-blocks. */
-static sbitmap *live;
+static bitvec *live;
/* Return true if the SSA name NAME is live on the edge E. */
static bool
live_on_edge (edge e, tree name)
{
- return (live[e->dest->index]
- && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
+ return (live[e->dest->index].size ()
+ && live[e->dest->index][SSA_NAME_VERSION (name)]);
}
/* Local functions. */
@@ -6120,7 +6121,7 @@ find_switch_asserts (basic_block bb, gswitch *last)
P_4 will receive an ASSERT_EXPR. */
static void
-find_assert_locations_1 (basic_block bb, sbitmap live)
+find_assert_locations_1 (basic_block bb, bitvec *live)
{
gimple last;
@@ -6163,7 +6164,7 @@ find_assert_locations_1 (basic_block bb, sbitmap live)
/* If op is not live beyond this stmt, do not bother to insert
asserts for it. */
- if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
+ if (!(*live)[SSA_NAME_VERSION (op)])
continue;
/* If OP is used in such a way that we can infer a value
@@ -6208,9 +6209,9 @@ find_assert_locations_1 (basic_block bb, sbitmap live)
/* Update live. */
FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
- bitmap_set_bit (live, SSA_NAME_VERSION (op));
+ (*live)[SSA_NAME_VERSION (op)] = true;
FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
- bitmap_clear_bit (live, SSA_NAME_VERSION (op));
+ (*live)[SSA_NAME_VERSION (op)] = false;
}
/* Traverse all PHI nodes in BB, updating live. */
@@ -6229,10 +6230,10 @@ find_assert_locations_1 (basic_block bb, sbitmap live)
{
tree arg = USE_FROM_PTR (arg_p);
if (TREE_CODE (arg) == SSA_NAME)
- bitmap_set_bit (live, SSA_NAME_VERSION (arg));
+ (*live)[SSA_NAME_VERSION (arg)] = true;
}
- bitmap_clear_bit (live, SSA_NAME_VERSION (res));
+ (*live)[SSA_NAME_VERSION (res)] = false;
}
}
@@ -6247,7 +6248,7 @@ find_assert_locations (void)
int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
int rpo_cnt, i;
- live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
+ live = new bitvec[last_basic_block_for_fn (cfun)];
rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
for (i = 0; i < rpo_cnt; ++i)
bb_rpo[rpo[i]] = i;
@@ -6269,12 +6270,10 @@ find_assert_locations (void)
tree arg = gimple_phi_arg_def (phi, j);
if (TREE_CODE (arg) == SSA_NAME)
{
- if (live[i] == NULL)
- {
- live[i] = sbitmap_alloc (num_ssa_names);
- bitmap_clear (live[i]);
- }
- bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
+ if (live[i].size () == 0)
+ live[i].grow (num_ssa_names);
+
+ live[i][SSA_NAME_VERSION (arg)] = true;
}
}
}
@@ -6285,18 +6284,15 @@ find_assert_locations (void)
edge e;
edge_iterator ei;
- if (!live[rpo[i]])
- {
- live[rpo[i]] = sbitmap_alloc (num_ssa_names);
- bitmap_clear (live[rpo[i]]);
- }
+ if (!live[rpo[i]].size ())
+ live[rpo[i]].grow (num_ssa_names);
/* Process BB and update the live information with uses in
this block. */
- find_assert_locations_1 (bb, live[rpo[i]]);
+ find_assert_locations_1 (bb, &live[rpo[i]]);
/* Merge liveness into the predecessor blocks and free it. */
- if (!bitmap_empty_p (live[rpo[i]]))
+ if (live[rpo[i]].any_set_bits ())
{
int pred_rpo = i;
FOR_EACH_EDGE (e, ei, bb->preds)
@@ -6305,12 +6301,10 @@ find_assert_locations (void)
if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
continue;
- if (!live[pred])
- {
- live[pred] = sbitmap_alloc (num_ssa_names);
- bitmap_clear (live[pred]);
- }
- bitmap_ior (live[pred], live[pred], live[rpo[i]]);
+ if (!live[pred].size ())
+ live[pred].grow (num_ssa_names);
+
+ live[pred] |= live[rpo[i]];
if (bb_rpo[pred] < pred_rpo)
pred_rpo = bb_rpo[pred];
@@ -6321,29 +6315,19 @@ find_assert_locations (void)
last_rpo[rpo[i]] = pred_rpo;
}
else
- {
- sbitmap_free (live[rpo[i]]);
- live[rpo[i]] = NULL;
- }
+ live[rpo[i]].clear_and_release ();
/* We can free all successors live bitmaps if all their
predecessors have been visited already. */
FOR_EACH_EDGE (e, ei, bb->succs)
- if (last_rpo[e->dest->index] == i
- && live[e->dest->index])
- {
- sbitmap_free (live[e->dest->index]);
- live[e->dest->index] = NULL;
- }
+ if (last_rpo[e->dest->index] == i)
+ live[e->dest->index].clear_and_release ();
}
XDELETEVEC (rpo);
XDELETEVEC (bb_rpo);
XDELETEVEC (last_rpo);
- for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
- if (live[i])
- sbitmap_free (live[i]);
- XDELETEVEC (live);
+ delete[] live;
}
/* Create an ASSERT_EXPR for NAME and insert it in the location
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 18eff20355f..9cdf0fe1da9 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -90,6 +90,7 @@
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
+#include "bitvec.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
@@ -6985,7 +6986,7 @@ vt_find_locations (void)
bb_heap_t *worklist = new bb_heap_t (LONG_MIN);
bb_heap_t *pending = new bb_heap_t (LONG_MIN);
bb_heap_t *fibheap_swap = NULL;
- sbitmap visited, in_worklist, in_pending, sbitmap_swap;
+ sbitmap in_worklist, in_pending, sbitmap_swap;
basic_block bb;
edge e;
int *bb_order;
@@ -7005,7 +7006,7 @@ vt_find_locations (void)
bb_order[rc_order[i]] = i;
free (rc_order);
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ stack_bitvec visited (last_basic_block_for_fn (cfun));
in_worklist = sbitmap_alloc (last_basic_block_for_fn (cfun));
in_pending = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (in_worklist);
@@ -7023,20 +7024,20 @@ vt_find_locations (void)
in_pending = in_worklist;
in_worklist = sbitmap_swap;
- bitmap_clear (visited);
+ visited.clear ();
while (!worklist->empty ())
{
bb = worklist->extract_min ();
bitmap_clear_bit (in_worklist, bb->index);
- gcc_assert (!bitmap_bit_p (visited, bb->index));
- if (!bitmap_bit_p (visited, bb->index))
+ gcc_assert (!visited[bb->index]);
+ if (!visited[bb->index])
{
bool changed;
edge_iterator ei;
int oldinsz, oldoutsz;
- bitmap_set_bit (visited, bb->index);
+ visited[bb->index] = true;
if (VTI (bb)->in.vars)
{
@@ -7128,7 +7129,7 @@ vt_find_locations (void)
if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
- if (bitmap_bit_p (visited, e->dest->index))
+ if (visited[e->dest->index])
{
if (!bitmap_bit_p (in_pending, e->dest->index))
{
@@ -7177,7 +7178,6 @@ vt_find_locations (void)
free (bb_order);
delete worklist;
delete pending;
- sbitmap_free (visited);
sbitmap_free (in_worklist);
sbitmap_free (in_pending);