diff options
Diffstat (limited to 'gcc/haifa-sched.c')
-rw-r--r-- | gcc/haifa-sched.c | 354 |
1 files changed, 177 insertions, 177 deletions
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c index 1f05a7231f6..204fab6aff9 100644 --- a/gcc/haifa-sched.c +++ b/gcc/haifa-sched.c @@ -185,7 +185,7 @@ fix_sched_param (const char *param, const char *val) warning (0, "fix_sched_param: unknown param: %s", param); } -/* This is a placeholder for the scheduler parameters common +/* This is a placeholder for the scheduler parameters common to all schedulers. */ struct common_sched_info_def *common_sched_info; @@ -295,7 +295,7 @@ static int q_size = 0; queue or ready list. QUEUE_READY - INSN is in ready list. N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */ - + #define QUEUE_INDEX(INSN) (HID (INSN)->queue_index) /* The following variable value refers for all current and future @@ -336,7 +336,7 @@ static int may_trap_exp (const_rtx, int); static int haifa_luid_for_non_insn (rtx x); /* Haifa version of sched_info hooks common to all headers. */ -const struct common_sched_info_def haifa_common_sched_info = +const struct common_sched_info_def haifa_common_sched_info = { NULL, /* fix_recovery_cfg */ NULL, /* add_block */ @@ -726,7 +726,7 @@ initiate_bb_reg_pressure_info (basic_block bb) for (i = 0; ; ++i) { unsigned int regno = EH_RETURN_DATA_REGNO (i); - + if (regno == INVALID_REGNUM) break; if (! bitmap_bit_p (df_get_live_in (bb), regno)) @@ -740,7 +740,7 @@ static void save_reg_pressure (void) { int i; - + for (i = 0; i < ira_reg_class_cover_size; i++) saved_reg_pressure[ira_reg_class_cover[i]] = curr_reg_pressure[ira_reg_class_cover[i]]; @@ -752,7 +752,7 @@ static void restore_reg_pressure (void) { int i; - + for (i = 0; i < ira_reg_class_cover_size; i++) curr_reg_pressure[ira_reg_class_cover[i]] = saved_reg_pressure[ira_reg_class_cover[i]]; @@ -887,7 +887,7 @@ dep_cost_1 (dep_t link, dw_t dw) else if (bypass_p (insn)) cost = insn_latency (insn, used); } - + if (targetm.sched.adjust_cost_2) cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost, @@ -940,7 +940,7 @@ increase_insn_priority (rtx insn, int amount) } else { - /* In sel-sched.c INSN_PRIORITY is not kept up to date. + /* In sel-sched.c INSN_PRIORITY is not kept up to date. Use EXPR_PRIORITY instead. */ sel_add_to_insn_priority (insn, amount); } @@ -1027,7 +1027,7 @@ priority (rtx insn) different than that of normal instructions. Instead of walking through INSN_FORW_DEPS (check) list, we walk through INSN_FORW_DEPS list of each instruction in the corresponding - recovery block. */ + recovery block. */ /* Selective scheduling does not define RECOVERY_BLOCK macro. */ rec = sel_sched_p () ? NULL : RECOVERY_BLOCK (insn); @@ -1078,7 +1078,7 @@ priority (rtx insn) this_priority = next_priority; } } - + twin = PREV_INSN (twin); } while (twin != prev_first); @@ -1178,7 +1178,7 @@ rank_for_schedule (const void *x, const void *y) } /* The insn in a schedule group should be issued the first. */ - if (flag_sched_group_heuristic && + if (flag_sched_group_heuristic && SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2)) return SCHED_GROUP_P (tmp2) ? 1 : -1; @@ -1216,7 +1216,7 @@ rank_for_schedule (const void *x, const void *y) if (flag_sched_critical_path_heuristic && priority_val) return priority_val; - + /* Prefer speculative insn with greater dependencies weakness. */ if (flag_sched_spec_insn_heuristic && spec_info) { @@ -1229,7 +1229,7 @@ rank_for_schedule (const void *x, const void *y) dw1 = ds_weak (ds1); else dw1 = NO_DEP_WEAK; - + ds2 = TODO_SPEC (tmp2) & SPECULATIVE; if (ds2) dw2 = ds_weak (ds2); @@ -1416,7 +1416,7 @@ HAIFA_INLINE static rtx ready_remove_first (struct ready_list *ready) { rtx t; - + gcc_assert (ready->n_ready); t = ready->vec[ready->first--]; ready->n_ready--; @@ -1444,7 +1444,7 @@ rtx ready_element (struct ready_list *ready, int index) { gcc_assert (ready->n_ready && index < ready->n_ready); - + return ready->vec[ready->first - index]; } @@ -1534,7 +1534,7 @@ advance_state (state_t state) targetm.sched.dfa_pre_cycle_insn ()); state_transition (state, NULL); - + if (targetm.sched.dfa_post_cycle_insn) state_transition (state, targetm.sched.dfa_post_cycle_insn ()); @@ -1728,7 +1728,7 @@ schedule_insn (rtx insn) if (INSN_TICK (insn) > clock_var) /* INSN has been prematurely moved from the queue to the ready list. This is possible only if following flag is set. */ - gcc_assert (flag_sched_stalled_insns); + gcc_assert (flag_sched_stalled_insns); /* ??? Probably, if INSN is scheduled prematurely, we should leave INSN_TICK untouched. This is a machine-dependent issue, actually. */ @@ -1753,10 +1753,10 @@ schedule_insn (rtx insn) if (!IS_SPECULATION_BRANCHY_CHECK_P (insn)) { - int effective_cost; - + int effective_cost; + effective_cost = try_ready (next); - + if (effective_cost >= 0 && SCHED_GROUP_P (next) && advance < effective_cost) @@ -1804,7 +1804,7 @@ schedule_insn (rtx insn) /* Functions for handling of notes. */ /* Insert the INSN note at the end of the notes list. */ -static void +static void add_to_note_list (rtx insn, rtx *note_list_end_p) { PREV_INSN (insn) = *note_list_end_p; @@ -1832,7 +1832,7 @@ concat_note_lists (rtx from_end, rtx *to_endp) from_start = from_end; /* A note list should be traversed via PREV_INSN. */ - while (PREV_INSN (from_start) != NULL) + while (PREV_INSN (from_start) != NULL) from_start = PREV_INSN (from_start); add_to_note_list (from_start, to_endp); @@ -2123,17 +2123,17 @@ queue_to_ready (struct ready_list *ready) } /* Used by early_queue_to_ready. Determines whether it is "ok" to - prematurely move INSN from the queue to the ready list. Currently, - if a target defines the hook 'is_costly_dependence', this function + prematurely move INSN from the queue to the ready list. Currently, + if a target defines the hook 'is_costly_dependence', this function uses the hook to check whether there exist any dependences which are - considered costly by the target, between INSN and other insns that + considered costly by the target, between INSN and other insns that have already been scheduled. Dependences are checked up to Y cycles back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows - controlling this value. - (Other considerations could be taken into account instead (or in + controlling this value. + (Other considerations could be taken into account instead (or in addition) depending on user flags and target hooks. */ -static bool +static bool ok_for_early_queue_removal (rtx insn) { int n_cycles; @@ -2173,9 +2173,9 @@ ok_for_early_queue_removal (rtx insn) break; } - if (!prev_insn) + if (!prev_insn) break; - prev_insn = PREV_INSN (prev_insn); + prev_insn = PREV_INSN (prev_insn); } } @@ -2186,7 +2186,7 @@ ok_for_early_queue_removal (rtx insn) /* Remove insns from the queue, before they become "ready" with respect to FU latency considerations. */ -static int +static int early_queue_to_ready (state_t state, struct ready_list *ready) { rtx insn; @@ -2200,20 +2200,20 @@ early_queue_to_ready (state_t state, struct ready_list *ready) int insns_removed = 0; /* - Flag '-fsched-stalled-insns=X' determines the aggressiveness of this - function: + Flag '-fsched-stalled-insns=X' determines the aggressiveness of this + function: - X == 0: There is no limit on how many queued insns can be removed + X == 0: There is no limit on how many queued insns can be removed prematurely. (flag_sched_stalled_insns = -1). - X >= 1: Only X queued insns can be removed prematurely in each + X >= 1: Only X queued insns can be removed prematurely in each invocation. (flag_sched_stalled_insns = X). Otherwise: Early queue removal is disabled. (flag_sched_stalled_insns = 0) */ - if (! flag_sched_stalled_insns) + if (! flag_sched_stalled_insns) return 0; for (stalls = 0; stalls <= max_insn_queue_index; stalls++) @@ -2232,7 +2232,7 @@ early_queue_to_ready (state_t state, struct ready_list *ready) print_rtl_single (sched_dump, insn); memcpy (temp_state, state, dfa_state_size); - if (recog_memoized (insn) < 0) + if (recog_memoized (insn) < 0) /* non-negative to indicate that it's not ready to avoid infinite Q->R->Q->R... */ cost = 0; @@ -2243,7 +2243,7 @@ early_queue_to_ready (state_t state, struct ready_list *ready) fprintf (sched_dump, "transition cost = %d\n", cost); move_to_ready = false; - if (cost < 0) + if (cost < 0) { move_to_ready = ok_for_early_queue_removal (insn); if (move_to_ready == true) @@ -2252,7 +2252,7 @@ early_queue_to_ready (state_t state, struct ready_list *ready) q_size -= 1; ready_add (ready, insn, false); - if (prev_link) + if (prev_link) XEXP (prev_link, 1) = next_link; else insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link; @@ -2276,11 +2276,11 @@ early_queue_to_ready (state_t state, struct ready_list *ready) link = next_link; } /* while link */ - } /* if link */ + } /* if link */ } /* for stalls.. */ - return insns_removed; + return insns_removed; } @@ -2348,9 +2348,9 @@ move_insn (rtx insn, rtx last, rtx nt) int jump_p = 0; bb = BLOCK_FOR_INSN (insn); - + /* BB_HEAD is either LABEL or NOTE. */ - gcc_assert (BB_HEAD (bb) != insn); + gcc_assert (BB_HEAD (bb) != insn); if (BB_END (bb) == insn) /* If this is last instruction in BB, move end marker one @@ -2364,7 +2364,7 @@ move_insn (rtx insn, rtx last, rtx nt) && IS_SPECULATION_BRANCHY_CHECK_P (insn)) || (common_sched_info->sched_pass_id == SCHED_EBB_PASS)); - + gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb); BB_END (bb) = PREV_INSN (insn); @@ -2385,7 +2385,7 @@ move_insn (rtx insn, rtx last, rtx nt) && (LABEL_P (note) || BARRIER_P (note))) note = NEXT_INSN (note); - + gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); } else @@ -2413,13 +2413,13 @@ move_insn (rtx insn, rtx last, rtx nt) } df_insn_change_bb (insn, bb); - + /* Update BB_END, if needed. */ if (BB_END (bb) == last) - BB_END (bb) = insn; + BB_END (bb) = insn; } - SCHED_GROUP_P (insn) = 0; + SCHED_GROUP_P (insn) = 0; } /* Return true if scheduling INSN will finish current clock cycle. */ @@ -2528,8 +2528,8 @@ max_issue (struct ready_list *ready, int privileged_n, state_t state, /* ??? We used to assert here that we never issue more insns than issue_rate. However, some targets (e.g. MIPS/SB1) claim lower issue rate than can be achieved to get better performance. Until these targets are fixed to use - scheduler hooks to manipulate insns priority instead, the assert should - be disabled. + scheduler hooks to manipulate insns priority instead, the assert should + be disabled. gcc_assert (more_issue >= 0); */ @@ -2646,7 +2646,7 @@ max_issue (struct ready_list *ready, int privileged_n, state_t state, } /* Restore the original state of the DFA. */ - memcpy (state, choice_stack->state, dfa_state_size); + memcpy (state, choice_stack->state, dfa_state_size); return best; } @@ -2698,7 +2698,7 @@ choose_ready (struct ready_list *ready, rtx *insn_ptr) rtx insn; int try_data = 1, try_control = 1; ds_t ts; - + insn = ready_element (ready, 0); if (INSN_CODE (insn) < 0) { @@ -2717,16 +2717,16 @@ choose_ready (struct ready_list *ready, rtx *insn_ptr) x = ready_element (ready, i); s = TODO_SPEC (x); - + if (spec_info->flags & PREFER_NON_DATA_SPEC && !(s & DATA_SPEC)) - { + { try_data = 0; if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC) || !try_control) break; } - + if (spec_info->flags & PREFER_NON_CONTROL_SPEC && !(s & CONTROL_SPEC)) { @@ -2790,17 +2790,17 @@ choose_ready (struct ready_list *ready, rtx *insn_ptr) { *insn_ptr = ready_remove_first (ready); if (sched_verbose >= 4) - fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n", + fprintf (sched_dump, ";;\t\tChosen insn (but can't issue) : %s \n", (*current_sched_info->print_insn) (*insn_ptr, 0)); return 0; } else { - if (sched_verbose >= 4) + if (sched_verbose >= 4) fprintf (sched_dump, ";;\t\tChosen insn : %s\n", (*current_sched_info->print_insn) (ready_element (ready, index), 0)); - + *insn_ptr = ready_remove (ready, index); return 0; } @@ -2871,7 +2871,7 @@ schedule_block (basic_block *target_bb) /* Start just before the beginning of time. */ clock_var = -1; - /* We need queue and ready lists and clock_var be initialized + /* We need queue and ready lists and clock_var be initialized in try_ready () (which is called through init_ready_list ()). */ (*current_sched_info->init_ready_list) (); @@ -3037,9 +3037,9 @@ schedule_block (basic_block *target_bb) print_curr_reg_pressure (); } - if (ready.n_ready == 0 - && can_issue_more - && reload_completed) + if (ready.n_ready == 0 + && can_issue_more + && reload_completed) { /* Allow scheduling insns directly from the queue in case there's nothing better to do (ready list is empty) but @@ -3095,10 +3095,10 @@ schedule_block (basic_block *target_bb) to have the highest priority (so it will be returned by the ready_remove_first call above), we invoke ready_add (&ready, insn, true). - But, still, there is one issue: INSN can be later - discarded by scheduler's front end through + But, still, there is one issue: INSN can be later + discarded by scheduler's front end through current_sched_info->can_schedule_ready_p, hence, won't - be issued next. */ + be issued next. */ { ready_add (&ready, insn, true); break; @@ -3140,7 +3140,7 @@ schedule_block (basic_block *target_bb) advance = cost; break; } - + continue; } @@ -3153,12 +3153,12 @@ schedule_block (basic_block *target_bb) continue; } - /* DECISION is made. */ - + /* DECISION is made. */ + if (TODO_SPEC (insn) & SPECULATIVE) generate_recovery_code (insn); - if (control_flow_insn_p (last_scheduled_insn) + if (control_flow_insn_p (last_scheduled_insn) /* This is used to switch basic blocks by request from scheduler front-end (actually, sched-ebb.c only). This is used to process blocks with single fallthru @@ -3168,7 +3168,7 @@ schedule_block (basic_block *target_bb) { *target_bb = current_sched_info->advance_target_bb (*target_bb, 0); - + if (sched_verbose) { rtx x; @@ -3180,15 +3180,15 @@ schedule_block (basic_block *target_bb) last_scheduled_insn = bb_note (*target_bb); } - + /* Update counters, etc in the scheduler's front end. */ (*current_sched_info->begin_schedule_ready) (insn, last_scheduled_insn); - + move_insn (insn, last_scheduled_insn, current_sched_info->next_tail); reemit_notes (insn); last_scheduled_insn = insn; - + if (memcmp (curr_state, temp_state, dfa_state_size) != 0) { cycle_issued_insns++; @@ -3282,19 +3282,19 @@ schedule_block (basic_block *target_bb) /* Sanity check -- queue must be empty now. Meaningless if region has multiple bbs. */ gcc_assert (!q_size && !ready.n_ready && !ready.n_debug); - else + else { /* We must maintain QUEUE_INDEX between blocks in region. */ for (i = ready.n_ready - 1; i >= 0; i--) { rtx x; - + x = ready_element (&ready, i); QUEUE_INDEX (x) = QUEUE_NOWHERE; TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP; } - if (q_size) + if (q_size) for (i = 0; i <= max_insn_queue_index; i++) { rtx link; @@ -3356,7 +3356,7 @@ set_priorities (rtx head, rtx tail) { rtx insn; int n_insn; - int sched_max_insns_priority = + int sched_max_insns_priority = current_sched_info->sched_max_insns_priority; rtx prev_head; @@ -3398,7 +3398,7 @@ setup_sched_dump (void) ? stderr : dump_file); } -/* Initialize some global state for the scheduler. This function works +/* Initialize some global state for the scheduler. This function works with the common data shared between all the schedulers. It is called from the scheduler specific initialization routine. */ @@ -3478,8 +3478,8 @@ sched_init (void) } df_analyze (); - - /* Do not run DCE after reload, as this can kill nops inserted + + /* Do not run DCE after reload, as this can kill nops inserted by bundling. */ if (reload_completed) df_clear_flags (DF_LR_RUN_DCE); @@ -3506,7 +3506,7 @@ sched_init (void) saved_reg_live = BITMAP_ALLOC (NULL); region_ref_regs = BITMAP_ALLOC (NULL); } - + curr_state = xmalloc (dfa_state_size); } @@ -3597,7 +3597,7 @@ haifa_sched_finish (void) sched_finish (); } -/* Free global data used during insn scheduling. This function works with +/* Free global data used during insn scheduling. This function works with the common data shared between the schedulers. */ void @@ -3644,7 +3644,7 @@ fix_inter_tick (rtx head, rtx tail) int next_clock = clock_var + 1; bitmap_initialize (&processed, 0); - + /* Iterates over scheduled instructions and fix their INSN_TICKs and INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent across different blocks. */ @@ -3655,26 +3655,26 @@ fix_inter_tick (rtx head, rtx tail) int tick; sd_iterator_def sd_it; dep_t dep; - + tick = INSN_TICK (head); gcc_assert (tick >= MIN_TICK); - + /* Fix INSN_TICK of instruction from just scheduled block. */ if (!bitmap_bit_p (&processed, INSN_LUID (head))) { bitmap_set_bit (&processed, INSN_LUID (head)); tick -= next_clock; - + if (tick < MIN_TICK) tick = MIN_TICK; - - INSN_TICK (head) = tick; + + INSN_TICK (head) = tick; } - + FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep) { rtx next; - + next = DEP_CON (dep); tick = INSN_TICK (next); @@ -3686,10 +3686,10 @@ fix_inter_tick (rtx head, rtx tail) { bitmap_set_bit (&processed, INSN_LUID (next)); tick -= next_clock; - + if (tick < MIN_TICK) tick = MIN_TICK; - + if (tick > INTER_TICK (next)) INTER_TICK (next) = tick; else @@ -3704,7 +3704,7 @@ fix_inter_tick (rtx head, rtx tail) } static int haifa_speculate_insn (rtx, ds_t, rtx *); - + /* Check if NEXT is ready to be added to the ready or queue list. If "yes", add it to the proper list. Returns: @@ -3713,7 +3713,7 @@ static int haifa_speculate_insn (rtx, ds_t, rtx *); 0 < N - queued for N cycles. */ int try_ready (rtx next) -{ +{ ds_t old_ts, *ts; ts = &TODO_SPEC (next); @@ -3722,7 +3722,7 @@ try_ready (rtx next) gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP)) && ((old_ts & HARD_DEP) || (old_ts & SPECULATIVE))); - + if (sd_lists_empty_p (next, SD_LIST_BACK)) /* NEXT has all its dependencies resolved. */ { @@ -3798,11 +3798,11 @@ try_ready (rtx next) { int res; rtx new_pat; - + gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE)); - + res = haifa_speculate_insn (next, *ts, &new_pat); - + switch (res) { case -1: @@ -3811,47 +3811,47 @@ try_ready (rtx next) so we won't reanalyze anything. */ *ts = (*ts & ~SPECULATIVE) | HARD_DEP; break; - + case 0: /* We follow the rule, that every speculative insn has non-null ORIG_PAT. */ if (!ORIG_PAT (next)) ORIG_PAT (next) = PATTERN (next); break; - - case 1: + + case 1: if (!ORIG_PAT (next)) /* If we gonna to overwrite the original pattern of insn, save it. */ ORIG_PAT (next) = PATTERN (next); - + haifa_change_pattern (next, new_pat); break; - + default: gcc_unreachable (); } } - + /* We need to restore pattern only if (*ts == 0), because otherwise it is either correct (*ts & SPECULATIVE), or we simply don't care (*ts & HARD_DEP). */ - + gcc_assert (!ORIG_PAT (next) || !IS_SPECULATION_BRANCHY_CHECK_P (next)); - + if (*ts & HARD_DEP) { /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because control-speculative NEXT could have been discarded by sched-rgn.c (the same case as when discarded by can_schedule_ready_p ()). */ /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/ - + change_queue_index (next, QUEUE_NOWHERE); return -1; } else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !IS_SPECULATION_CHECK_P (next)) - /* We should change pattern of every previously speculative + /* We should change pattern of every previously speculative instruction - and we determine if NEXT was speculative by using ORIG_PAT field. Except one case - speculation checks have ORIG_PAT pat too, so skip them. */ @@ -3861,12 +3861,12 @@ try_ready (rtx next) } if (sched_verbose >= 2) - { + { int s = TODO_SPEC (next); - + fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s", (*current_sched_info->print_insn) (next, 0)); - + if (spec_info && spec_info->dump) { if (s & BEGIN_DATA) @@ -3878,10 +3878,10 @@ try_ready (rtx next) } fprintf (sched_dump, "\n"); - } - + } + adjust_priority (next); - + return fix_tick_ready (next); } @@ -3904,10 +3904,10 @@ fix_tick_ready (rtx next) full_p = (tick == INVALID_TICK); FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep) - { + { rtx pro = DEP_PRO (dep); int tick1; - + gcc_assert (INSN_TICK (pro) >= MIN_TICK); tick1 = INSN_TICK (pro) + dep_cost (dep); @@ -3940,10 +3940,10 @@ change_queue_index (rtx next, int delay) { int i = QUEUE_INDEX (next); - gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index + gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index && delay != 0); gcc_assert (i != QUEUE_SCHEDULED); - + if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i) || (delay < 0 && delay == i)) /* We have nothing to do. */ @@ -3954,18 +3954,18 @@ change_queue_index (rtx next, int delay) ready_remove_insn (next); else if (i >= 0) queue_remove (next); - + /* Add it to the proper place. */ if (delay == QUEUE_READY) ready_add (readyp, next, false); else if (delay >= 1) queue_insn (next, delay); - + if (sched_verbose >= 2) - { + { fprintf (sched_dump, ";;\t\ttick updated: insn %s", (*current_sched_info->print_insn) (next, 0)); - + if (delay == QUEUE_READY) fprintf (sched_dump, " into ready\n"); else if (delay >= 1) @@ -4047,10 +4047,10 @@ generate_recovery_code (rtx insn) { if (TODO_SPEC (insn) & BEGIN_SPEC) begin_speculative_block (insn); - + /* Here we have insn with no dependencies to instructions other then CHECK_SPEC ones. */ - + if (TODO_SPEC (insn) & BE_IN_SPEC) add_to_speculative_block (insn); } @@ -4094,7 +4094,7 @@ process_insn_forw_deps_be_in_spec (rtx insn, rtx twin, ds_t fs) ds_t new_ds; new_ds = (ds & ~BEGIN_SPEC) | fs; - + if (/* consumer can 'be in speculative'. */ sched_insn_is_legitimate_for_speculation_p (consumer, new_ds)) @@ -4121,7 +4121,7 @@ static void begin_speculative_block (rtx insn) { if (TODO_SPEC (insn) & BEGIN_DATA) - nr_begin_data++; + nr_begin_data++; if (TODO_SPEC (insn) & BEGIN_CONTROL) nr_begin_control++; @@ -4152,7 +4152,7 @@ add_to_speculative_block (rtx insn) TODO_SPEC (insn) &= ~BE_IN_SPEC; gcc_assert (!TODO_SPEC (insn)); - + DONE_SPEC (insn) |= ts; /* First we convert all simple checks to branchy. */ @@ -4263,7 +4263,7 @@ add_to_speculative_block (rtx insn) twin = XEXP (twins, 1); free_INSN_LIST_node (twins); - twins = twin; + twins = twin; } calc_priorities (priorities_roots); @@ -4354,16 +4354,16 @@ init_before_recovery (basic_block *before_recovery_ptr) if (e) { - /* We create two basic blocks: + /* We create two basic blocks: 1. Single instruction block is inserted right after E->SRC - and has jump to + and has jump to 2. Empty block right before EXIT_BLOCK. Between these two blocks recovery blocks will be emitted. */ basic_block single, empty; rtx x, label; - /* If the fallthrough edge to exit we've found is from the block we've + /* If the fallthrough edge to exit we've found is from the block we've created before, don't do anything more. */ if (last == after_recovery) return; @@ -4397,7 +4397,7 @@ init_before_recovery (basic_block *before_recovery_ptr) JUMP_LABEL (x) = label; LABEL_NUSES (label)++; haifa_init_insn (x); - + emit_barrier_after (x); sched_init_only_bb (empty, NULL); @@ -4413,8 +4413,8 @@ init_before_recovery (basic_block *before_recovery_ptr) if (sched_verbose >= 2 && spec_info->dump) fprintf (spec_info->dump, - ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n", - last->index, single->index, empty->index); + ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n", + last->index, single->index, empty->index); } else before_recovery = last; @@ -4427,7 +4427,7 @@ sched_create_recovery_block (basic_block *before_recovery_ptr) rtx label; rtx barrier; basic_block rec; - + haifa_recovery_bb_recently_added_p = true; haifa_recovery_bb_ever_added_p = true; @@ -4445,8 +4445,8 @@ sched_create_recovery_block (basic_block *before_recovery_ptr) if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED) BB_SET_PARTITION (rec, BB_COLD_PARTITION); - - if (sched_verbose && spec_info->dump) + + if (sched_verbose && spec_info->dump) fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n", rec->index); @@ -4464,13 +4464,13 @@ sched_create_recovery_edges (basic_block first_bb, basic_block rec, int edge_flags; /* This is fixing of incoming edge. */ - /* ??? Which other flags should be specified? */ + /* ??? Which other flags should be specified? */ if (BB_PARTITION (first_bb) != BB_PARTITION (rec)) /* Partition type is the same, if it is "unpartitioned". */ edge_flags = EDGE_CROSSING; else edge_flags = 0; - + make_edge (first_bb, rec, edge_flags); label = block_label (second_bb); jump = emit_jump_insn_after (gen_jump (label), BB_END (rec)); @@ -4491,9 +4491,9 @@ sched_create_recovery_edges (basic_block first_bb, basic_block rec, edge_flags = EDGE_CROSSING; } else - edge_flags = 0; + edge_flags = 0; - make_single_succ_edge (rec, second_bb, edge_flags); + make_single_succ_edge (rec, second_bb, edge_flags); } /* This function creates recovery code for INSN. If MUTATE_P is nonzero, @@ -4541,7 +4541,7 @@ create_check_block_twin (rtx insn, bool mutate_p) if (rec != EXIT_BLOCK_PTR) { /* To have mem_reg alive at the beginning of second_bb, - we emit check BEFORE insn, so insn after splitting + we emit check BEFORE insn, so insn after splitting insn will be at the beginning of second_bb, which will provide us with the correct life information. */ check = emit_jump_insn_before (check, insn); @@ -4619,14 +4619,14 @@ create_check_block_twin (rtx insn, bool mutate_p) sched_create_recovery_edges (first_bb, rec, second_bb); - sched_init_only_bb (second_bb, first_bb); + sched_init_only_bb (second_bb, first_bb); sched_init_only_bb (rec, EXIT_BLOCK_PTR); jump = BB_END (rec); haifa_init_insn (jump); } - /* Move backward dependences from INSN to CHECK and + /* Move backward dependences from INSN to CHECK and move forward dependences from INSN to TWIN. */ /* First, create dependencies between INSN's producers and CHECK & TWIN. */ @@ -4639,7 +4639,7 @@ create_check_block_twin (rtx insn, bool mutate_p) check --TRUE--> producer ??? or ANTI ??? twin --TRUE--> producer twin --ANTI--> check - + If BEGIN_CONTROL: [insn ~~ANTI~~> producer]: check --ANTI--> producer twin --ANTI--> producer @@ -4648,7 +4648,7 @@ create_check_block_twin (rtx insn, bool mutate_p) If BE_IN_SPEC: [insn ~~TRUE~~> producer]: check ~~TRUE~~> producer twin ~~TRUE~~> producer - twin --ANTI--> check */ + twin --ANTI--> check */ ds = DEP_STATUS (dep); @@ -4665,7 +4665,7 @@ create_check_block_twin (rtx insn, bool mutate_p) { DEP_CON (new_dep) = twin; sd_add_dep (new_dep, false); - } + } } /* Second, remove backward dependencies of INSN. */ @@ -4686,11 +4686,11 @@ create_check_block_twin (rtx insn, bool mutate_p) /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only here. */ - + gcc_assert (!DONE_SPEC (insn)); - + if (!mutate_p) - { + { ds_t ts = TODO_SPEC (insn); DONE_SPEC (insn) = ts & BEGIN_SPEC; @@ -4726,7 +4726,7 @@ create_check_block_twin (rtx insn, bool mutate_p) } else { - if (spec_info->dump) + if (spec_info->dump) fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n", (*current_sched_info->print_insn) (insn, 0)); @@ -4781,7 +4781,7 @@ fix_recovery_deps (basic_block rec) rtx link; bitmap_initialize (&in_ready, 0); - + /* NOTE - a basic block note. */ note = NEXT_INSN (BB_HEAD (rec)); gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); @@ -4816,7 +4816,7 @@ fix_recovery_deps (basic_block rec) sd_iterator_next (&sd_it); } } - + insn = PREV_INSN (insn); } while (insn != note); @@ -4831,10 +4831,10 @@ fix_recovery_deps (basic_block rec) /* Fixing jump's dependences. */ insn = BB_HEAD (rec); jump = BB_END (rec); - + gcc_assert (LABEL_P (insn)); insn = NEXT_INSN (insn); - + gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn)); add_jump_dependencies (insn, jump); } @@ -4948,7 +4948,7 @@ unlink_bb_notes (basic_block first, basic_block last) if (LABEL_P (label)) note = NEXT_INSN (label); else - note = label; + note = label; gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); prev = PREV_INSN (label); @@ -4962,7 +4962,7 @@ unlink_bb_notes (basic_block first, basic_block last) if (last == first) break; - + last = last->prev_bb; } while (1); @@ -4977,14 +4977,14 @@ restore_bb_notes (basic_block first) return; /* We DON'T unlink basic block notes of the first block in the ebb. */ - first = first->next_bb; + first = first->next_bb; /* Remember: FIRST is actually a second basic block in the ebb. */ while (first != EXIT_BLOCK_PTR && bb_header[first->index]) { rtx prev, label, note, next; - + label = bb_header[first->index]; prev = PREV_INSN (label); next = NEXT_INSN (prev); @@ -4992,7 +4992,7 @@ restore_bb_notes (basic_block first) if (LABEL_P (label)) note = NEXT_INSN (label); else - note = label; + note = label; gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note)); bb_header[first->index] = 0; @@ -5000,7 +5000,7 @@ restore_bb_notes (basic_block first) NEXT_INSN (prev) = label; NEXT_INSN (note) = next; PREV_INSN (next) = note; - + first = first->next_bb; } @@ -5022,7 +5022,7 @@ fix_jump_move (rtx jump) gcc_assert (common_sched_info->sched_pass_id == SCHED_EBB_PASS || IS_SPECULATION_BRANCHY_CHECK_P (jump)); - + if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next))) /* if jump_bb_next is not empty. */ BB_END (jump_bb) = BB_END (jump_bb_next); @@ -5051,9 +5051,9 @@ move_block_after_check (rtx jump) bb = BLOCK_FOR_INSN (PREV_INSN (jump)); jump_bb = BLOCK_FOR_INSN (jump); jump_bb_next = jump_bb->next_bb; - + update_bb_for_insn (jump_bb); - + gcc_assert (IS_SPECULATION_CHECK_P (jump) || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next))); @@ -5067,7 +5067,7 @@ move_block_after_check (rtx jump) move_succs (&t, jump_bb_next); df_mark_solutions_dirty (); - + common_sched_info->fix_recovery_cfg (bb->index, jump_bb->index, jump_bb_next->index); } @@ -5160,7 +5160,7 @@ add_jump_dependencies (rtx insn, rtx jump) insn = NEXT_INSN (insn); if (insn == jump) break; - + if (dep_list_size (insn) == 0) { dep_def _new_dep, *new_dep = &_new_dep; @@ -5234,23 +5234,23 @@ check_cfg (rtx head, rtx tail) next_tail = NEXT_INSN (tail); do - { - not_last = head != tail; + { + not_last = head != tail; if (not_first) gcc_assert (NEXT_INSN (PREV_INSN (head)) == head); if (not_last) gcc_assert (PREV_INSN (NEXT_INSN (head)) == head); - if (LABEL_P (head) + if (LABEL_P (head) || (NOTE_INSN_BASIC_BLOCK_P (head) && (!not_first || (not_first && !LABEL_P (PREV_INSN (head)))))) { - gcc_assert (bb == 0); + gcc_assert (bb == 0); bb = BLOCK_FOR_INSN (head); if (bb != 0) - gcc_assert (BB_HEAD (bb) == head); + gcc_assert (BB_HEAD (bb) == head); else /* This is the case of jump table. See inside_basic_block_p (). */ gcc_assert (LABEL_P (head) && !inside_basic_block_p (head)); @@ -5266,7 +5266,7 @@ check_cfg (rtx head, rtx tail) gcc_assert (inside_basic_block_p (head) || NOTE_P (head)); gcc_assert (BLOCK_FOR_INSN (head) == bb); - + if (LABEL_P (head)) { head = NEXT_INSN (head); @@ -5384,7 +5384,7 @@ sched_scan (const struct sched_scan_info_def *ssi, extend_insn (); if (bbs != NULL) - { + { unsigned i; basic_block x; @@ -5481,12 +5481,12 @@ sched_extend_target (void) static void extend_h_i_d (void) { - int reserve = (get_max_uid () + 1 + int reserve = (get_max_uid () + 1 - VEC_length (haifa_insn_data_def, h_i_d)); - if (reserve > 0 + if (reserve > 0 && ! VEC_space (haifa_insn_data_def, h_i_d, reserve)) { - VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d, + VEC_safe_grow_cleared (haifa_insn_data_def, heap, h_i_d, 3 * get_max_uid () / 2); sched_extend_target (); } @@ -5585,7 +5585,7 @@ sched_split_block_1 (basic_block first_bb, rtx after) e = split_block (first_bb, after); gcc_assert (e->src == first_bb); - /* sched_split_block emits note if *check == BB_END. Probably it + /* sched_split_block emits note if *check == BB_END. Probably it is better to rip that note off. */ return e->dest; |