summaryrefslogtreecommitdiff
path: root/gcc/haifa-sched.c
diff options
context:
space:
mode:
authorbernds <bernds@138bc75d-0d04-0410-961f-82ee72b054a4>2011-10-21 13:35:44 +0000
committerbernds <bernds@138bc75d-0d04-0410-961f-82ee72b054a4>2011-10-21 13:35:44 +0000
commiteffd16405e093ae6782c9e932205d0272ae4b386 (patch)
tree1c0a8e176ba35a8459d7c1186a3754909d829c2b /gcc/haifa-sched.c
parent285f2354dc334dac2c0068737cca7d6fa58b1898 (diff)
downloadgcc-effd16405e093ae6782c9e932205d0272ae4b386.tar.gz
* reg-notes.def (DEP_CONTROL): New.
* sched-ebb.c (add_deps_for_risky_insns): Add a REG_DEP_CONTROL when not doing speculation. * rtlanal.c (record_hard_reg_sets, find_all_hard_reg_sets, record_hard_reg_uses_1, record_hard_reg_uses): New functions. * function.c (record_hard_reg_sets, record_hard_reg_uses, record_hard_reg_uses_1): Remove; move to rtlanal.c. * lists.c (copy_INSN_LIST, concat_INSN_LIST): New functions. * haifa-sched.c: Swap includes of "rtl.h" and "hard-reg-set.h". (MUST_RECOMPUTE_SPEC_P): New macro. (real_insn_for_shadow): New function. (cond_clobbered_p, recompute_todo_spec, check_clobbered_conditions, toggle_cancelled_flags): New static functions. (schedule_insn): Relax an assert to only check for empty hard back dependencies. Skip cancelled dependencies. Call check_clobbered_conditions. (copy_insn_list): Remove function, renamed moved to lists.c. (save_backtrack_point): Use new spelling copy_INSN_LIST. (unschedule_insns_until): Ensure TODO_SPEC is reset properly. (restore_last_backtrack_point): Likewise. Call toggle_cancelled_flags. (estimate_insn_tick): Ignore cancelled dependencies. (haifa_speculate_insn): Move declaration. (try_ready): Move code into recompute_todo_spec and call it. Tweak some asserts. Ensure predicated patterns are restored if necessary. Dump DEP_CONTROL flag. (haifa_change_pattern): Merge with sched_change_pattern. (sched_change_pattern): Remove function. * sched-deps.c (NON_FLUSH_JUMP_KIND, NON_FLUSH_JUMP): Remove. All uses changed to simply not test NON_FLUSH_JUMP_P. (ds_to_dk, dk_to_ds, dump_dep, ds_to_dt, dump_ds, check_dep): Handle REG_DEP_CONTROL. (dep_spec_p): If DO_PREDICATION, REG_DEP_CONTROL is speculative. (reg_pending_control_uses, control_dependency_cache): New static variables. (sched_get_reverse_condition_uncached): New function. (sd_find_dep_between): Remove pointless assert. Look in control_dependency_cache. (ask_dependency_caches, set_dependency_caches, sd_delete_dep, extend_dependency_caches, sched_deps_finish): Handle REG_DEP_CONTROL and control_dependency_cache. (sd_unresolve_dep): Use dep_spec_p. (add_dependence): Now a wrapper around add_dependence_1, handling REG_DEP_CONTROL specially. (flush_pending_lists): Clear pending_jump_insns. (sched_analyze_1): Handle pending_jump_insns like a memory flush. (sched_analyze_2): Unconditionally add to pending memory flushes, keep previous behaviour but apply it to pending_jump_insns instead. (sched_analyze_insn): Defer adding jump reg dependencies using reg_pending_control_uses; add them to the control_uses list. Handle pending_jump_insns and control_uses when adding dependence lists. (deps_analyze_insn): Update INSN_COND_DEPS. (deps_analyze_insn): Add jumps to pending_jump_insns rather than last_pending_memory_flush. (init_deps): Initialize pending_jump_insns. (free_deps): Free control_uses. (remove_from_deps): Remove from pending_jump_insns. (init_deps_global): Allocate reg_pending_control_uses). (finish_deps_global): Free it. (add_dependence_1): Renamed from add_dependence. Handle REG_DEP_CONTROL. * rtl.h (record_hard_reg_uses, find_all_hard_reg_sets): Declare. (copy_INSN_LIST, concat_INSN_LIST): Declare. * sched-int.h (struct deps_reg): Add control_uses. (struct deps_desc): Add pending_jump_insns. (struct _haifa_deps_insn_data): Add cond_deps. (struct _haifa_insn_data): Add must_recompute_spec and predicated_pat. (INSN_COND_DEPS, PREDICATED_PAT): New macros. (BITS_PER_DEP_WEAK): Adjust for two extra bits in the word. (DEP_CONTROL): New macro. (DEP_TYPES): Include it. (HARD_DEP): Adjust definition. (DEP_CANCELLED): New macro. (enum SCHED_FLAGS): Add DO_PREDICATION. (sched_get_reverse_condition_uncached, real_insn_for_shadow): Declare. * sched-rgn.c (concat_INSN_LIST): Remove function. (deps_join): Handle pending_jump_insns. (free_pending_lists): Likewise. * config/c6x/c6x.c (c6x_set_sched_flags): Set DO_PREDICATION for final schedule. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@180302 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/haifa-sched.c')
-rw-r--r--gcc/haifa-sched.c479
1 files changed, 381 insertions, 98 deletions
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index a5b4aebefee..d87a608b901 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -129,9 +129,9 @@ along with GCC; see the file COPYING3. If not see
#include "coretypes.h"
#include "tm.h"
#include "diagnostic-core.h"
+#include "hard-reg-set.h"
#include "rtl.h"
#include "tm_p.h"
-#include "hard-reg-set.h"
#include "regs.h"
#include "function.h"
#include "flags.h"
@@ -213,6 +213,7 @@ struct common_sched_info_def *common_sched_info;
#define INTER_TICK(INSN) (HID (INSN)->inter_tick)
#define FEEDS_BACKTRACK_INSN(INSN) (HID (INSN)->feeds_backtrack_insn)
#define SHADOW_P(INSN) (HID (INSN)->shadow_p)
+#define MUST_RECOMPUTE_SPEC_P(INSN) (HID (INSN)->must_recompute_spec)
/* If INSN_TICK of an instruction is equal to INVALID_TICK,
then it should be recalculated from scratch. */
@@ -706,6 +707,24 @@ record_delay_slot_pair (rtx i1, rtx i2, int cycles, int stages)
*slot = p;
}
+/* Examine the delay pair hashtable to see if INSN is a shadow for another,
+ and return the other insn if so. Return NULL otherwise. */
+rtx
+real_insn_for_shadow (rtx insn)
+{
+ struct delay_pair *pair;
+
+ if (delay_htab == NULL)
+ return NULL_RTX;
+
+ pair
+ = (struct delay_pair *)htab_find_with_hash (delay_htab_i2, insn,
+ htab_hash_pointer (insn));
+ if (!pair || pair->stages > 0)
+ return NULL_RTX;
+ return pair->i1;
+}
+
/* For a pair P of insns, return the fixed distance in cycles from the first
insn after which the second must be scheduled. */
static int
@@ -820,6 +839,7 @@ static void change_queue_index (rtx, int);
static void extend_h_i_d (void);
static void init_h_i_d (rtx);
+static int haifa_speculate_insn (rtx, ds_t, rtx *);
static void generate_recovery_code (rtx);
static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
static void begin_speculative_block (rtx);
@@ -827,7 +847,7 @@ static void add_to_speculative_block (rtx);
static void init_before_recovery (basic_block *);
static void create_check_block_twin (rtx, bool);
static void fix_recovery_deps (basic_block);
-static void haifa_change_pattern (rtx, rtx);
+static bool haifa_change_pattern (rtx, rtx);
static void dump_new_block_header (int, basic_block, rtx, rtx);
static void restore_bb_notes (basic_block);
static void fix_jump_move (rtx);
@@ -1056,7 +1076,178 @@ print_curr_reg_pressure (void)
}
fprintf (sched_dump, "\n");
}
+
+/* Determine if INSN has a condition that is clobbered if a register
+ in SET_REGS is modified. */
+static bool
+cond_clobbered_p (rtx insn, HARD_REG_SET set_regs)
+{
+ rtx pat = PATTERN (insn);
+ gcc_assert (GET_CODE (pat) == COND_EXEC);
+ if (TEST_HARD_REG_BIT (set_regs, REGNO (XEXP (COND_EXEC_TEST (pat), 0))))
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+ haifa_change_pattern (insn, ORIG_PAT (insn));
+ FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
+ DEP_STATUS (dep) &= ~DEP_CANCELLED;
+ TODO_SPEC (insn) = HARD_DEP;
+ if (sched_verbose >= 2)
+ fprintf (sched_dump,
+ ";;\t\tdequeue insn %s because of clobbered condition\n",
+ (*current_sched_info->print_insn) (insn, 0));
+ return true;
+ }
+
+ return false;
+}
+
+/* Look at the remaining dependencies for insn NEXT, and compute and return
+ the TODO_SPEC value we should use for it. This is called after one of
+ NEXT's dependencies has been resolved. */
+
+static ds_t
+recompute_todo_spec (rtx next)
+{
+ ds_t new_ds;
+ sd_iterator_def sd_it;
+ dep_t dep, control_dep = NULL;
+ int n_spec = 0;
+ int n_control = 0;
+ bool first_p = true;
+
+ if (sd_lists_empty_p (next, SD_LIST_BACK))
+ /* NEXT has all its dependencies resolved. */
+ return 0;
+
+ if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
+ return HARD_DEP;
+
+ /* Now we've got NEXT with speculative deps only.
+ 1. Look at the deps to see what we have to do.
+ 2. Check if we can do 'todo'. */
+ new_ds = 0;
+
+ FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
+ {
+ ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
+
+ if (DEBUG_INSN_P (DEP_PRO (dep)) && !DEBUG_INSN_P (next))
+ continue;
+
+ if (ds)
+ {
+ n_spec++;
+ if (first_p)
+ {
+ first_p = false;
+
+ new_ds = ds;
+ }
+ else
+ new_ds = ds_merge (new_ds, ds);
+ }
+ if (DEP_TYPE (dep) == REG_DEP_CONTROL)
+ {
+ n_control++;
+ control_dep = dep;
+ DEP_STATUS (dep) &= ~DEP_CANCELLED;
+ }
+ }
+
+ if (n_control == 1 && n_spec == 0)
+ {
+ rtx pro, other, new_pat;
+ rtx cond = NULL_RTX;
+ bool success;
+ rtx prev = NULL_RTX;
+ int i;
+ unsigned regno;
+
+ if ((current_sched_info->flags & DO_PREDICATION) == 0
+ || (ORIG_PAT (next) != NULL_RTX
+ && PREDICATED_PAT (next) == NULL_RTX))
+ return HARD_DEP;
+
+ pro = DEP_PRO (control_dep);
+ other = real_insn_for_shadow (pro);
+ if (other != NULL_RTX)
+ pro = other;
+
+ cond = sched_get_reverse_condition_uncached (pro);
+ regno = REGNO (XEXP (cond, 0));
+
+ /* Find the last scheduled insn that modifies the condition register.
+ If we have a true dependency on it, it sets it to the correct value,
+ otherwise it must be a later insn scheduled in-between that clobbers
+ the condition. */
+ FOR_EACH_VEC_ELT_REVERSE (rtx, scheduled_insns, i, prev)
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+ HARD_REG_SET t;
+ bool found;
+
+ find_all_hard_reg_sets (prev, &t);
+ if (!TEST_HARD_REG_BIT (t, regno))
+ continue;
+
+ found = false;
+ FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
+ {
+ if (DEP_PRO (dep) == prev && DEP_TYPE (dep) == REG_DEP_TRUE)
+ {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return HARD_DEP;
+ break;
+ }
+ if (ORIG_PAT (next) == NULL_RTX)
+ {
+ ORIG_PAT (next) = PATTERN (next);
+ new_pat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (next));
+ success = haifa_change_pattern (next, new_pat);
+ if (!success)
+ return HARD_DEP;
+ PREDICATED_PAT (next) = new_pat;
+ }
+ else if (PATTERN (next) != PREDICATED_PAT (next))
+ {
+ bool success = haifa_change_pattern (next,
+ PREDICATED_PAT (next));
+ gcc_assert (success);
+ }
+ DEP_STATUS (control_dep) |= DEP_CANCELLED;
+ return DEP_CONTROL;
+ }
+
+ if (PREDICATED_PAT (next) != NULL_RTX)
+ {
+ int tick = INSN_TICK (next);
+ bool success = haifa_change_pattern (next,
+ ORIG_PAT (next));
+ INSN_TICK (next) = tick;
+ gcc_assert (success);
+ }
+
+ /* We can't handle the case where there are both speculative and control
+ dependencies, so we return HARD_DEP in such a case. Also fail if
+ we have speculative dependencies with not enough points, or more than
+ one control dependency. */
+ if ((n_spec > 0 && n_control > 0)
+ || (n_spec > 0
+ /* Too few points? */
+ && ds_weak (new_ds) < spec_info->data_weakness_cutoff)
+ || (n_control > 1))
+ return HARD_DEP;
+
+ return new_ds;
+}
+
/* Pointer to the last instruction scheduled. */
static rtx last_scheduled_insn;
@@ -1963,6 +2154,51 @@ sched_setup_bb_reg_pressure_info (basic_block bb, rtx after)
setup_insn_max_reg_pressure (after, false);
}
+/* If doing predication while scheduling, verify whether INSN, which
+ has just been scheduled, clobbers the conditions of any
+ instructions that must be predicated in order to break their
+ dependencies. If so, remove them from the queues so that they will
+ only be scheduled once their control dependency is resolved. */
+
+static void
+check_clobbered_conditions (rtx insn)
+{
+ HARD_REG_SET t;
+ int i;
+
+ if ((current_sched_info->flags & DO_PREDICATION) == 0)
+ return;
+
+ find_all_hard_reg_sets (insn, &t);
+
+ restart:
+ for (i = 0; i < ready.n_ready; i++)
+ {
+ rtx x = ready_element (&ready, i);
+ if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
+ {
+ ready_remove_insn (x);
+ goto restart;
+ }
+ }
+ for (i = 0; i <= max_insn_queue_index; i++)
+ {
+ rtx link;
+ int q = NEXT_Q_AFTER (q_ptr, i);
+
+ restart_queue:
+ for (link = insn_queue[q]; link; link = XEXP (link, 1))
+ {
+ rtx x = XEXP (link, 0);
+ if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
+ {
+ queue_remove (x);
+ goto restart_queue;
+ }
+ }
+ }
+}
+
/* A structure that holds local state for the loop in schedule_block. */
struct sched_block_state
{
@@ -2023,7 +2259,7 @@ schedule_insn (rtx insn)
/* Scheduling instruction should have all its dependencies resolved and
should have been removed from the ready list. */
- gcc_assert (sd_lists_empty_p (insn, SD_LIST_BACK));
+ gcc_assert (sd_lists_empty_p (insn, SD_LIST_HARD_BACK));
/* Reset debug insns invalidated by moving this insn. */
if (MAY_HAVE_DEBUG_INSNS && !DEBUG_INSN_P (insn))
@@ -2033,6 +2269,12 @@ schedule_insn (rtx insn)
rtx dbg = DEP_PRO (dep);
struct reg_use_data *use, *next;
+ if (DEP_STATUS (dep) & DEP_CANCELLED)
+ {
+ sd_iterator_next (&sd_it);
+ continue;
+ }
+
gcc_assert (DEBUG_INSN_P (dbg));
if (sched_verbose >= 6)
@@ -2086,17 +2328,36 @@ schedule_insn (rtx insn)
INSN_TICK untouched. This is a machine-dependent issue, actually. */
INSN_TICK (insn) = clock_var;
+ check_clobbered_conditions (insn);
+
/* Update dependent instructions. */
for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
sd_iterator_cond (&sd_it, &dep);)
{
rtx next = DEP_CON (dep);
+ bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
/* Resolve the dependence between INSN and NEXT.
sd_resolve_dep () moves current dep to another list thus
advancing the iterator. */
sd_resolve_dep (sd_it);
+ if (cancelled)
+ {
+ if (QUEUE_INDEX (next) != QUEUE_SCHEDULED)
+ {
+ int tick = INSN_TICK (next);
+ gcc_assert (ORIG_PAT (next) != NULL_RTX);
+ haifa_change_pattern (next, ORIG_PAT (next));
+ INSN_TICK (next) = tick;
+ if (sd_lists_empty_p (next, SD_LIST_BACK))
+ TODO_SPEC (next) = 0;
+ else if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
+ TODO_SPEC (next) = HARD_DEP;
+ }
+ continue;
+ }
+
/* Don't bother trying to mark next as ready if insn is a debug
insn. If insn is the last hard dependency, it will have
already been discounted. */
@@ -2270,24 +2531,6 @@ mark_backtrack_feeds (rtx insn, int set_p)
}
}
-/* Make a copy of the INSN_LIST list LINK and return it. */
-static rtx
-copy_insn_list (rtx link)
-{
- rtx new_queue;
- rtx *pqueue = &new_queue;
-
- for (; link; link = XEXP (link, 1))
- {
- rtx x = XEXP (link, 0);
- rtx newlink = alloc_INSN_LIST (x, NULL);
- *pqueue = newlink;
- pqueue = &XEXP (newlink, 1);
- }
- *pqueue = NULL_RTX;
- return new_queue;
-}
-
/* Save the current scheduler state so that we can backtrack to it
later if necessary. PAIR gives the insns that make it necessary to
save this point. SCHED_BLOCK is the local state of schedule_block
@@ -2314,7 +2557,7 @@ save_backtrack_point (struct delay_pair *pair,
for (i = 0; i <= max_insn_queue_index; i++)
{
int q = NEXT_Q_AFTER (q_ptr, i);
- save->insn_queue[i] = copy_insn_list (insn_queue[q]);
+ save->insn_queue[i] = copy_INSN_LIST (insn_queue[q]);
}
save->clock_var = clock_var;
@@ -2351,6 +2594,49 @@ save_backtrack_point (struct delay_pair *pair,
}
}
+/* Walk the ready list and all queues. If any insns have unresolved backwards
+ dependencies, these must be cancelled deps, broken by predication. Set or
+ clear (depending on SET) the DEP_CANCELLED bit in DEP_STATUS. */
+
+static void
+toggle_cancelled_flags (bool set)
+{
+ int i;
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ if (ready.n_ready > 0)
+ {
+ rtx *first = ready_lastpos (&ready);
+ for (i = 0; i < ready.n_ready; i++)
+ FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
+ if (!DEBUG_INSN_P (DEP_PRO (dep)))
+ {
+ if (set)
+ DEP_STATUS (dep) |= DEP_CANCELLED;
+ else
+ DEP_STATUS (dep) &= ~DEP_CANCELLED;
+ }
+ }
+ for (i = 0; i <= max_insn_queue_index; i++)
+ {
+ int q = NEXT_Q_AFTER (q_ptr, i);
+ rtx link;
+ for (link = insn_queue[q]; link; link = XEXP (link, 1))
+ {
+ rtx insn = XEXP (link, 0);
+ FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
+ if (!DEBUG_INSN_P (DEP_PRO (dep)))
+ {
+ if (set)
+ DEP_STATUS (dep) |= DEP_CANCELLED;
+ else
+ DEP_STATUS (dep) &= ~DEP_CANCELLED;
+ }
+ }
+ }
+}
+
/* Pop entries from the SCHEDULED_INSNS vector up to and including INSN.
Restore their dependencies to an unresolved state, and mark them as
queued nowhere. */
@@ -2358,6 +2644,12 @@ save_backtrack_point (struct delay_pair *pair,
static void
unschedule_insns_until (rtx insn)
{
+ VEC (rtx, heap) *recompute_vec;
+
+ recompute_vec = VEC_alloc (rtx, heap, 0);
+
+ /* Make two passes over the insns to be unscheduled. First, we clear out
+ dependencies and other trivial bookkeeping. */
for (;;)
{
rtx last;
@@ -2379,14 +2671,40 @@ unschedule_insns_until (rtx insn)
sd_iterator_cond (&sd_it, &dep);)
{
rtx con = DEP_CON (dep);
- TODO_SPEC (con) = HARD_DEP;
- INSN_TICK (con) = INVALID_TICK;
sd_unresolve_dep (sd_it);
+ if (!MUST_RECOMPUTE_SPEC_P (con))
+ {
+ MUST_RECOMPUTE_SPEC_P (con) = 1;
+ VEC_safe_push (rtx, heap, recompute_vec, con);
+ }
}
if (last == insn)
break;
}
+
+ /* A second pass, to update ready and speculation status for insns
+ depending on the unscheduled ones. The first pass must have
+ popped the scheduled_insns vector up to the point where we
+ restart scheduling, as recompute_todo_spec requires it to be
+ up-to-date. */
+ while (!VEC_empty (rtx, recompute_vec))
+ {
+ rtx con;
+
+ con = VEC_pop (rtx, recompute_vec);
+ MUST_RECOMPUTE_SPEC_P (con) = 0;
+ if (!sd_lists_empty_p (con, SD_LIST_HARD_BACK))
+ {
+ TODO_SPEC (con) = HARD_DEP;
+ INSN_TICK (con) = INVALID_TICK;
+ if (PREDICATED_PAT (con) != NULL_RTX)
+ haifa_change_pattern (con, ORIG_PAT (con));
+ }
+ else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
+ TODO_SPEC (con) = recompute_todo_spec (con);
+ }
+ VEC_free (rtx, heap, recompute_vec);
}
/* Restore scheduler state from the topmost entry on the backtracking queue.
@@ -2396,7 +2714,6 @@ unschedule_insns_until (rtx insn)
static void
restore_last_backtrack_point (struct sched_block_state *psched_block)
-
{
rtx link;
int i;
@@ -2420,8 +2737,9 @@ restore_last_backtrack_point (struct sched_block_state *psched_block)
rtx *first = ready_lastpos (&ready);
for (i = 0; i < ready.n_ready; i++)
{
- QUEUE_INDEX (first[i]) = QUEUE_NOWHERE;
- INSN_TICK (first[i]) = INVALID_TICK;
+ rtx insn = first[i];
+ QUEUE_INDEX (insn) = QUEUE_NOWHERE;
+ INSN_TICK (insn) = INVALID_TICK;
}
}
for (i = 0; i <= max_insn_queue_index; i++)
@@ -2445,8 +2763,10 @@ restore_last_backtrack_point (struct sched_block_state *psched_block)
rtx *first = ready_lastpos (&ready);
for (i = 0; i < ready.n_ready; i++)
{
- QUEUE_INDEX (first[i]) = QUEUE_READY;
- INSN_TICK (first[i]) = save->clock_var;
+ rtx insn = first[i];
+ QUEUE_INDEX (insn) = QUEUE_READY;
+ TODO_SPEC (insn) = recompute_todo_spec (insn);
+ INSN_TICK (insn) = save->clock_var;
}
}
@@ -2462,11 +2782,14 @@ restore_last_backtrack_point (struct sched_block_state *psched_block)
{
rtx x = XEXP (link, 0);
QUEUE_INDEX (x) = i;
+ TODO_SPEC (x) = recompute_todo_spec (x);
INSN_TICK (x) = save->clock_var + i;
}
}
free (save->insn_queue);
+ toggle_cancelled_flags (true);
+
clock_var = save->clock_var;
last_clock_var = save->last_clock_var;
cycle_issued_insns = save->cycle_issued_insns;
@@ -2547,6 +2870,9 @@ estimate_insn_tick (bitmap processed, rtx insn, int budget)
rtx pro = DEP_PRO (dep);
int t;
+ if (DEP_STATUS (dep) & DEP_CANCELLED)
+ continue;
+
if (QUEUE_INDEX (pro) == QUEUE_SCHEDULED)
gcc_assert (INSN_TICK (pro) + dep_cost (dep) <= INSN_TICK (insn));
else
@@ -4217,6 +4543,7 @@ schedule_block (basic_block *target_bb)
gcc_assert (failed);
failed_insn = failed->delay_pair->i1;
+ toggle_cancelled_flags (false);
unschedule_insns_until (failed_insn);
while (failed != backtrack_queue)
free_topmost_backtrack_point (true);
@@ -4732,8 +5059,6 @@ fix_inter_tick (rtx head, rtx tail)
bitmap_clear (&processed);
}
-static int haifa_speculate_insn (rtx, ds_t, rtx *);
-
/* Check if NEXT is ready to be added to the ready or queue list.
If "yes", add it to the proper list.
Returns:
@@ -4747,57 +5072,15 @@ try_ready (rtx next)
old_ts = TODO_SPEC (next);
- gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
+ gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP | DEP_CONTROL))
&& ((old_ts & HARD_DEP)
- || (old_ts & SPECULATIVE)));
-
- if (sd_lists_empty_p (next, SD_LIST_BACK))
- /* NEXT has all its dependencies resolved. */
- new_ts = 0;
- else
- {
- /* One of the NEXT's dependencies has been resolved.
- Recalculate NEXT's status. */
-
- if (!sd_lists_empty_p (next, SD_LIST_HARD_BACK))
- new_ts = HARD_DEP;
- else
- /* Now we've got NEXT with speculative deps only.
- 1. Look at the deps to see what we have to do.
- 2. Check if we can do 'todo'. */
- {
- sd_iterator_def sd_it;
- dep_t dep;
- bool first_p = true;
-
- new_ts = 0;
-
- FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
- {
- ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
+ || (old_ts & SPECULATIVE)
+ || (old_ts & DEP_CONTROL)));
- if (DEBUG_INSN_P (DEP_PRO (dep))
- && !DEBUG_INSN_P (next))
- continue;
-
- if (first_p)
- {
- first_p = false;
-
- new_ts = ds;
- }
- else
- new_ts = ds_merge (new_ts, ds);
- }
-
- if (ds_weak (new_ts) < spec_info->data_weakness_cutoff)
- /* Too few points. */
- new_ts = HARD_DEP;
- }
- }
+ new_ts = recompute_todo_spec (next);
if (new_ts & HARD_DEP)
- gcc_assert (new_ts == HARD_DEP && new_ts == old_ts
+ gcc_assert (new_ts == old_ts
&& QUEUE_INDEX (next) == QUEUE_NOWHERE);
else if (current_sched_info->new_ready)
new_ts = current_sched_info->new_ready (next, new_ts);
@@ -4820,7 +5103,7 @@ try_ready (rtx next)
int res;
rtx new_pat;
- gcc_assert (!(new_ts & ~SPECULATIVE));
+ gcc_assert ((new_ts & SPECULATIVE) && !(new_ts & ~SPECULATIVE));
res = haifa_speculate_insn (next, new_ts, &new_pat);
@@ -4846,7 +5129,8 @@ try_ready (rtx next)
save it. */
ORIG_PAT (next) = PATTERN (next);
- haifa_change_pattern (next, new_pat);
+ res = haifa_change_pattern (next, new_pat);
+ gcc_assert (res);
break;
default:
@@ -4871,16 +5155,19 @@ try_ready (rtx next)
/*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
change_queue_index (next, QUEUE_NOWHERE);
+
return -1;
}
else if (!(new_ts & BEGIN_SPEC)
- && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next))
+ && ORIG_PAT (next) && PREDICATED_PAT (next) == NULL_RTX
+ && !IS_SPECULATION_CHECK_P (next))
/* We should change pattern of every previously speculative
instruction - and we determine if NEXT was speculative by using
ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
pat too, so skip them. */
{
- haifa_change_pattern (next, ORIG_PAT (next));
+ bool success = haifa_change_pattern (next, ORIG_PAT (next));
+ gcc_assert (success);
ORIG_PAT (next) = 0;
}
@@ -4898,7 +5185,8 @@ try_ready (rtx next)
if (new_ts & BE_IN_CONTROL)
fprintf (spec_info->dump, "; in-control-spec;");
}
-
+ if (TODO_SPEC (next) & DEP_CONTROL)
+ fprintf (sched_dump, " predicated");
fprintf (sched_dump, "\n");
}
@@ -5874,38 +6162,33 @@ fix_recovery_deps (basic_block rec)
add_jump_dependencies (insn, jump);
}
-/* Change pattern of INSN to NEW_PAT. */
-void
-sched_change_pattern (rtx insn, rtx new_pat)
+/* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
+ instruction data. */
+static bool
+haifa_change_pattern (rtx insn, rtx new_pat)
{
sd_iterator_def sd_it;
dep_t dep;
int t;
t = validate_change (insn, &PATTERN (insn), new_pat, 0);
- gcc_assert (t);
+ if (!t)
+ return false;
dfa_clear_single_insn_cache (insn);
- for (sd_it = sd_iterator_start (insn, (SD_LIST_FORW | SD_LIST_BACK
- | SD_LIST_RES_BACK));
- sd_iterator_cond (&sd_it, &dep);)
+ sd_it = sd_iterator_start (insn,
+ SD_LIST_FORW | SD_LIST_BACK | SD_LIST_RES_BACK);
+ while (sd_iterator_cond (&sd_it, &dep))
{
DEP_COST (dep) = UNKNOWN_DEP_COST;
sd_iterator_next (&sd_it);
}
-}
-
-/* Change pattern of INSN to NEW_PAT. Invalidate cached haifa
- instruction data. */
-static void
-haifa_change_pattern (rtx insn, rtx new_pat)
-{
- sched_change_pattern (insn, new_pat);
/* Invalidate INSN_COST, so it'll be recalculated. */
INSN_COST (insn) = -1;
/* Invalidate INSN_TICK, so it'll be recalculated. */
INSN_TICK (insn) = INVALID_TICK;
+ return true;
}
/* -1 - can't speculate,