summaryrefslogtreecommitdiff
path: root/gcc/haifa-sched.c
diff options
context:
space:
mode:
authorrsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>2012-04-26 15:49:30 +0000
committerrsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>2012-04-26 15:49:30 +0000
commitb30b031c80c745d45199d7052482b98d23413b50 (patch)
treee56ccaafc3cad4864a20e07e80010ba4569da004 /gcc/haifa-sched.c
parent11189c7a936c0075b867159661da40a0a985d307 (diff)
downloadgcc-b30b031c80c745d45199d7052482b98d23413b50.tar.gz
gcc/
* sched-int.h (_haifa_insn_data): Move priority_status. Add model_index. (INSN_MODEL_INDEX): New macro. * haifa-sched.c (insn_delay): New function. (sched_regno_pressure_class): Update commentary. (mark_regno_birth_or_death): Pass the liveness bitmap and pressure array as arguments, instead of using curr_reg_live and curr_reg_pressure. Only update the pressure if the bit in the liveness set has changed. (initiate_reg_pressure_info): Always trust the live-in set for SCHED_PRESSURE_MODEL. (initiate_bb_reg_pressure_info): Update call to mark_regno_birth_or_death. (dep_list_size): Take the list as argument. (calculate_reg_deaths): New function, extracted from... (setup_insn_reg_pressure_info): ...here. (MODEL_BAR): New macro. (model_pressure_data, model_insn_info, model_pressure_limit) (model_pressure_group): New structures. (model_schedule, model_worklist, model_insns, model_num_insns) (model_curr_point, model_before_pressure, model_next_priority): New variables. (MODEL_PRESSURE_DATA, MODEL_MAX_PRESSURE, MODEL_REF_PRESSURE) (MODEL_INSN_INFO, MODEL_INSN): New macros. (model_index, model_update_limit_points_in_group): New functions. (model_update_limit_points, model_last_use_except): Likewise. (model_start_update_pressure, model_update_pressure): Likewise. (model_recompute, model_spill_cost, model_excess_group_cost): Likewise. (model_excess_cost, model_dump_pressure_points): Likewise. (model_set_excess_costs): Likewise. (rank_for_schedule): Extend SCHED_PRIORITY_WEIGHTED ordering to SCHED_PRIORITY_MODEL. Use insn_delay. Use the order in the model schedule as an alternative tie-breaker. Update the call to dep_list_size. (ready_sort): Call model_set_excess_costs. (update_register_pressure): Update call to mark_regno_birth_or_death. Rely on that function to check liveness rather than doing it here. (model_classify_pressure, model_order_p, model_add_to_worklist_at) (model_remove_from_worklist, model_add_to_worklist, model_promote_insn) (model_add_to_schedule, model_analyze_insns, model_init_pressure_group) (model_record_pressure, model_record_pressures): New functions. (model_record_final_pressures, model_add_successors_to_worklist) (model_promote_predecessors, model_choose_insn): Likewise. (model_reset_queue_indices, model_dump_pressure_summary): Likewise. (model_start_schedule, model_finalize_pressure_group): Likewise. (model_end_schedule): Likewise. (schedule_insn): Say when we're scheduling the next instruction in the model schedule. (schedule_insn): Handle SCHED_PRESSURE_MODEL. (queue_to_ready): Do not add instructions that are MAX_SCHED_READY_INSNS beyond the current point of the model schedule. Always allow the next instruction in the model schedule to be added. (debug_ready_list): Print the INSN_REG_PRESSURE_EXCESS_COST_CHANGE and delay for SCHED_PRESSURE_MODEL too. (prune_ready_list): Extend SCHED_PRIORITY_WEIGHTED handling to SCHED_PRIORITY_MODEL, but also take the DFA into account. (schedule_block): Call model_start_schedule and model_end_schedule. Extend SCHED_PRIORITY_WEIGHTED stall handling to SCHED_PRIORITY_MODEL. (sched_init): Extend INSN_REG_PRESSURE_EXCESS_COST_CHANGE handling to SCHED_PRESSURE_MODEL, but don't allocate saved_reg_live or region_ref_regs. (sched_finish): Update accordingly. (fix_tick_ready): Extend INSN_REG_PRESSURE_EXCESS_COST_CHANGE handling to SCHED_PRESSURE_MODEL. (add_jump_dependencies): Update call to dep_list_size. (haifa_finish_h_i_d): Fix leak of max_reg_pressure. (haifa_init_insn): Extend INSN_REG_PRESSURE_EXCESS_COST_CHANGE handling to SCHED_PRESSURE_MODEL. * sched-deps.c (init_insn_reg_pressure_info): Likewise, but don't allocate INSN_MAX_REG_PRESSURE for SCHED_PRESSURE_MODEL. (sched_analyze_insn): Extend INSN_REG_PRESSURE_EXCESS_COST_CHANGE handling to SCHED_PRESSURE_MODEL. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@186882 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/haifa-sched.c')
-rw-r--r--gcc/haifa-sched.c1582
1 files changed, 1515 insertions, 67 deletions
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 308141f1c24..869159c036f 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -398,6 +398,14 @@ basic_block (* sched_split_block) (basic_block, rtx);
/* Create empty basic block after the specified block. */
basic_block (* sched_create_empty_bb) (basic_block);
+/* Return the number of cycles until INSN is expected to be ready.
+ Return zero if it already is. */
+static int
+insn_delay (rtx insn)
+{
+ return MAX (INSN_TICK (insn) - clock_var, 0);
+}
+
static int
may_trap_exp (const_rtx x, int is_store)
{
@@ -875,7 +883,7 @@ schedule_insns (void)
enum sched_pressure_algorithm sched_pressure;
/* Map regno -> its pressure class. The map defined only when
- SCHED_PRESSURE is SCHED_PRESSURE_WEIGHTED. */
+ SCHED_PRESSURE != SCHED_PRESSURE_NONE. */
enum reg_class *sched_regno_pressure_class;
/* The current register pressure. Only elements corresponding pressure
@@ -903,10 +911,12 @@ sched_init_region_reg_pressure_info (void)
bitmap_clear (region_ref_regs);
}
-/* Update current register pressure related info after birth (if
- BIRTH_P) or death of register REGNO. */
-static void
-mark_regno_birth_or_death (int regno, bool birth_p)
+/* PRESSURE[CL] describes the pressure on register class CL. Update it
+ for the birth (if BIRTH_P) or death (if !BIRTH_P) of register REGNO.
+ LIVE tracks the set of live registers; if it is null, assume that
+ every birth or death is genuine. */
+static inline void
+mark_regno_birth_or_death (bitmap live, int *pressure, int regno, bool birth_p)
{
enum reg_class pressure_class;
@@ -917,17 +927,17 @@ mark_regno_birth_or_death (int regno, bool birth_p)
{
if (birth_p)
{
- bitmap_set_bit (curr_reg_live, regno);
- curr_reg_pressure[pressure_class]
- += (ira_reg_class_max_nregs
- [pressure_class][PSEUDO_REGNO_MODE (regno)]);
+ if (!live || bitmap_set_bit (live, regno))
+ pressure[pressure_class]
+ += (ira_reg_class_max_nregs
+ [pressure_class][PSEUDO_REGNO_MODE (regno)]);
}
else
{
- bitmap_clear_bit (curr_reg_live, regno);
- curr_reg_pressure[pressure_class]
- -= (ira_reg_class_max_nregs
- [pressure_class][PSEUDO_REGNO_MODE (regno)]);
+ if (!live || bitmap_clear_bit (live, regno))
+ pressure[pressure_class]
+ -= (ira_reg_class_max_nregs
+ [pressure_class][PSEUDO_REGNO_MODE (regno)]);
}
}
}
@@ -936,13 +946,13 @@ mark_regno_birth_or_death (int regno, bool birth_p)
{
if (birth_p)
{
- bitmap_set_bit (curr_reg_live, regno);
- curr_reg_pressure[pressure_class]++;
+ if (!live || bitmap_set_bit (live, regno))
+ pressure[pressure_class]++;
}
else
{
- bitmap_clear_bit (curr_reg_live, regno);
- curr_reg_pressure[pressure_class]--;
+ if (!live || bitmap_clear_bit (live, regno))
+ pressure[pressure_class]--;
}
}
}
@@ -960,8 +970,10 @@ initiate_reg_pressure_info (bitmap live)
curr_reg_pressure[ira_pressure_classes[i]] = 0;
bitmap_clear (curr_reg_live);
EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
- if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
- mark_regno_birth_or_death (j, true);
+ if (sched_pressure == SCHED_PRESSURE_MODEL
+ || current_nr_blocks == 1
+ || bitmap_bit_p (region_ref_regs, j))
+ mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure, j, true);
}
/* Mark registers in X as mentioned in the current region. */
@@ -1015,7 +1027,8 @@ initiate_bb_reg_pressure_info (basic_block bb)
if (regno == INVALID_REGNUM)
break;
if (! bitmap_bit_p (df_get_live_in (bb), regno))
- mark_regno_birth_or_death (regno, true);
+ mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
+ regno, true);
}
#endif
}
@@ -1445,19 +1458,19 @@ contributes_to_priority_p (dep_t dep)
return true;
}
-/* Compute the number of nondebug forward deps of an insn. */
+/* Compute the number of nondebug deps in list LIST for INSN. */
static int
-dep_list_size (rtx insn)
+dep_list_size (rtx insn, sd_list_types_def list)
{
sd_iterator_def sd_it;
dep_t dep;
int dbgcount = 0, nodbgcount = 0;
if (!MAY_HAVE_DEBUG_INSNS)
- return sd_lists_size (insn, SD_LIST_FORW);
+ return sd_lists_size (insn, list);
- FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ FOR_EACH_DEP (insn, list, sd_it, dep)
{
if (DEBUG_INSN_P (DEP_CON (dep)))
dbgcount++;
@@ -1465,7 +1478,7 @@ dep_list_size (rtx insn)
nodbgcount++;
}
- gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, SD_LIST_FORW));
+ gcc_assert (dbgcount + nodbgcount == sd_lists_size (insn, list));
return nodbgcount;
}
@@ -1484,7 +1497,7 @@ priority (rtx insn)
{
int this_priority = -1;
- if (dep_list_size (insn) == 0)
+ if (dep_list_size (insn, SD_LIST_FORW) == 0)
/* ??? We should set INSN_PRIORITY to insn_cost when and insn has
some forward deps but all of them are ignored by
contributes_to_priority hook. At the moment we set priority of
@@ -1580,6 +1593,22 @@ do { if ((N_READY) == 2) \
qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
while (0)
+/* For each pressure class CL, set DEATH[CL] to the number of registers
+ in that class that die in INSN. */
+
+static void
+calculate_reg_deaths (rtx insn, int *death)
+{
+ int i;
+ struct reg_use_data *use;
+
+ for (i = 0; i < ira_pressure_classes_num; i++)
+ death[ira_pressure_classes[i]] = 0;
+ for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
+ if (dying_use_p (use))
+ mark_regno_birth_or_death (0, death, use->regno, true);
+}
+
/* Setup info about the current register pressure impact of scheduling
INSN at the current scheduling point. */
static void
@@ -1591,24 +1620,12 @@ setup_insn_reg_pressure_info (rtx insn)
enum reg_class cl;
struct reg_pressure_data *pressure_info;
int *max_reg_pressure;
- struct reg_use_data *use;
static int death[N_REG_CLASSES];
gcc_checking_assert (!DEBUG_INSN_P (insn));
excess_cost_change = 0;
- for (i = 0; i < ira_pressure_classes_num; i++)
- death[ira_pressure_classes[i]] = 0;
- for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
- if (dying_use_p (use))
- {
- cl = sched_regno_pressure_class[use->regno];
- if (use->regno < FIRST_PSEUDO_REGISTER)
- death[cl]++;
- else
- death[cl]
- += ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
- }
+ calculate_reg_deaths (insn, death);
pressure_info = INSN_REG_PRESSURE (insn);
max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
@@ -1629,7 +1646,765 @@ setup_insn_reg_pressure_info (rtx insn)
}
INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insn) = excess_cost_change;
}
+
+/* This is the first page of code related to SCHED_PRESSURE_MODEL.
+ It tries to make the scheduler take register pressure into account
+ without introducing too many unnecessary stalls. It hooks into the
+ main scheduling algorithm at several points:
+
+ - Before scheduling starts, model_start_schedule constructs a
+ "model schedule" for the current block. This model schedule is
+ chosen solely to keep register pressure down. It does not take the
+ target's pipeline or the original instruction order into account,
+ except as a tie-breaker. It also doesn't work to a particular
+ pressure limit.
+
+ This model schedule gives us an idea of what pressure can be
+ achieved for the block and gives us an example of a schedule that
+ keeps to that pressure. It also makes the final schedule less
+ dependent on the original instruction order. This is important
+ because the original order can either be "wide" (many values live
+ at once, such as in user-scheduled code) or "narrow" (few values
+ live at once, such as after loop unrolling, where several
+ iterations are executed sequentially).
+
+ We do not apply this model schedule to the rtx stream. We simply
+ record it in model_schedule. We also compute the maximum pressure,
+ MP, that was seen during this schedule.
+
+ - Instructions are added to the ready queue even if they require
+ a stall. The length of the stall is instead computed as:
+
+ MAX (INSN_TICK (INSN) - clock_var, 0)
+
+ (= insn_delay). This allows rank_for_schedule to choose between
+ introducing a deliberate stall or increasing pressure.
+
+ - Before sorting the ready queue, model_set_excess_costs assigns
+ a pressure-based cost to each ready instruction in the queue.
+ This is the instruction's INSN_REG_PRESSURE_EXCESS_COST_CHANGE
+ (ECC for short) and is effectively measured in cycles.
+
+ - rank_for_schedule ranks instructions based on:
+
+ ECC (insn) + insn_delay (insn)
+
+ then as:
+
+ insn_delay (insn)
+
+ So, for example, an instruction X1 with an ECC of 1 that can issue
+ now will win over an instruction X0 with an ECC of zero that would
+ introduce a stall of one cycle. However, an instruction X2 with an
+ ECC of 2 that can issue now will lose to both X0 and X1.
+
+ - When an instruction is scheduled, model_recompute updates the model
+ schedule with the new pressures (some of which might now exceed the
+ original maximum pressure MP). model_update_limit_points then searches
+ for the new point of maximum pressure, if not already known. */
+
+/* Used to separate high-verbosity debug information for SCHED_PRESSURE_MODEL
+ from surrounding debug information. */
+#define MODEL_BAR \
+ ";;\t\t+------------------------------------------------------\n"
+
+/* Information about the pressure on a particular register class at a
+ particular point of the model schedule. */
+struct model_pressure_data {
+ /* The pressure at this point of the model schedule, or -1 if the
+ point is associated with an instruction that has already been
+ scheduled. */
+ int ref_pressure;
+
+ /* The maximum pressure during or after this point of the model schedule. */
+ int max_pressure;
+};
+
+/* Per-instruction information that is used while building the model
+ schedule. Here, "schedule" refers to the model schedule rather
+ than the main schedule. */
+struct model_insn_info {
+ /* The instruction itself. */
+ rtx insn;
+
+ /* If this instruction is in model_worklist, these fields link to the
+ previous (higher-priority) and next (lower-priority) instructions
+ in the list. */
+ struct model_insn_info *prev;
+ struct model_insn_info *next;
+
+ /* While constructing the schedule, QUEUE_INDEX describes whether an
+ instruction has already been added to the schedule (QUEUE_SCHEDULED),
+ is in model_worklist (QUEUE_READY), or neither (QUEUE_NOWHERE).
+ old_queue records the value that QUEUE_INDEX had before scheduling
+ started, so that we can restore it once the schedule is complete. */
+ int old_queue;
+
+ /* The relative importance of an unscheduled instruction. Higher
+ values indicate greater importance. */
+ unsigned int model_priority;
+
+ /* The length of the longest path of satisfied true dependencies
+ that leads to this instruction. */
+ unsigned int depth;
+
+ /* The length of the longest path of dependencies of any kind
+ that leads from this instruction. */
+ unsigned int alap;
+
+ /* The number of predecessor nodes that must still be scheduled. */
+ int unscheduled_preds;
+};
+
+/* Information about the pressure limit for a particular register class.
+ This structure is used when applying a model schedule to the main
+ schedule. */
+struct model_pressure_limit {
+ /* The maximum register pressure seen in the original model schedule. */
+ int orig_pressure;
+
+ /* The maximum register pressure seen in the current model schedule
+ (which excludes instructions that have already been scheduled). */
+ int pressure;
+
+ /* The point of the current model schedule at which PRESSURE is first
+ reached. It is set to -1 if the value needs to be recomputed. */
+ int point;
+};
+
+/* Describes a particular way of measuring register pressure. */
+struct model_pressure_group {
+ /* Index PCI describes the maximum pressure on ira_pressure_classes[PCI]. */
+ struct model_pressure_limit limits[N_REG_CLASSES];
+
+ /* Index (POINT * ira_num_pressure_classes + PCI) describes the pressure
+ on register class ira_pressure_classes[PCI] at point POINT of the
+ current model schedule. A POINT of model_num_insns describes the
+ pressure at the end of the schedule. */
+ struct model_pressure_data *model;
+};
+
+/* Index POINT gives the instruction at point POINT of the model schedule.
+ This array doesn't change during main scheduling. */
+static VEC (rtx, heap) *model_schedule;
+
+/* The list of instructions in the model worklist, sorted in order of
+ decreasing priority. */
+static struct model_insn_info *model_worklist;
+
+/* Index I describes the instruction with INSN_LUID I. */
+static struct model_insn_info *model_insns;
+
+/* The number of instructions in the model schedule. */
+static int model_num_insns;
+
+/* The index of the first instruction in model_schedule that hasn't yet been
+ added to the main schedule, or model_num_insns if all of them have. */
+static int model_curr_point;
+
+/* Describes the pressure before each instruction in the model schedule. */
+static struct model_pressure_group model_before_pressure;
+
+/* The first unused model_priority value (as used in model_insn_info). */
+static unsigned int model_next_priority;
+
+
+/* The model_pressure_data for ira_pressure_classes[PCI] in GROUP
+ at point POINT of the model schedule. */
+#define MODEL_PRESSURE_DATA(GROUP, POINT, PCI) \
+ (&(GROUP)->model[(POINT) * ira_pressure_classes_num + (PCI)])
+
+/* The maximum pressure on ira_pressure_classes[PCI] in GROUP at or
+ after point POINT of the model schedule. */
+#define MODEL_MAX_PRESSURE(GROUP, POINT, PCI) \
+ (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->max_pressure)
+
+/* The pressure on ira_pressure_classes[PCI] in GROUP at point POINT
+ of the model schedule. */
+#define MODEL_REF_PRESSURE(GROUP, POINT, PCI) \
+ (MODEL_PRESSURE_DATA (GROUP, POINT, PCI)->ref_pressure)
+
+/* Information about INSN that is used when creating the model schedule. */
+#define MODEL_INSN_INFO(INSN) \
+ (&model_insns[INSN_LUID (INSN)])
+
+/* The instruction at point POINT of the model schedule. */
+#define MODEL_INSN(POINT) \
+ (VEC_index (rtx, model_schedule, POINT))
+
+
+/* Return INSN's index in the model schedule, or model_num_insns if it
+ doesn't belong to that schedule. */
+
+static int
+model_index (rtx insn)
+{
+ if (INSN_MODEL_INDEX (insn) == 0)
+ return model_num_insns;
+ return INSN_MODEL_INDEX (insn) - 1;
+}
+
+/* Make sure that GROUP->limits is up-to-date for the current point
+ of the model schedule. */
+
+static void
+model_update_limit_points_in_group (struct model_pressure_group *group)
+{
+ int pci, max_pressure, point;
+
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ /* We may have passed the final point at which the pressure in
+ group->limits[pci].pressure was reached. Update the limit if so. */
+ max_pressure = MODEL_MAX_PRESSURE (group, model_curr_point, pci);
+ group->limits[pci].pressure = max_pressure;
+
+ /* Find the point at which MAX_PRESSURE is first reached. We need
+ to search in three cases:
+
+ - We've already moved past the previous pressure point.
+ In this case we search forward from model_curr_point.
+
+ - We scheduled the previous point of maximum pressure ahead of
+ its position in the model schedule, but doing so didn't bring
+ the pressure point earlier. In this case we search forward
+ from that previous pressure point.
+
+ - Scheduling an instruction early caused the maximum pressure
+ to decrease. In this case we will have set the pressure
+ point to -1, and we search forward from model_curr_point. */
+ point = MAX (group->limits[pci].point, model_curr_point);
+ while (point < model_num_insns
+ && MODEL_REF_PRESSURE (group, point, pci) < max_pressure)
+ point++;
+ group->limits[pci].point = point;
+
+ gcc_assert (MODEL_REF_PRESSURE (group, point, pci) == max_pressure);
+ gcc_assert (MODEL_MAX_PRESSURE (group, point, pci) == max_pressure);
+ }
+}
+
+/* Make sure that all register-pressure limits are up-to-date for the
+ current position in the model schedule. */
+
+static void
+model_update_limit_points (void)
+{
+ model_update_limit_points_in_group (&model_before_pressure);
+}
+
+/* Return the model_index of the last unscheduled use in chain USE
+ outside of USE's instruction. Return -1 if there are no other uses,
+ or model_num_insns if the register is live at the end of the block. */
+
+static int
+model_last_use_except (struct reg_use_data *use)
+{
+ struct reg_use_data *next;
+ int last, index;
+
+ last = -1;
+ for (next = use->next_regno_use; next != use; next = next->next_regno_use)
+ if (NONDEBUG_INSN_P (next->insn)
+ && QUEUE_INDEX (next->insn) != QUEUE_SCHEDULED)
+ {
+ index = model_index (next->insn);
+ if (index == model_num_insns)
+ return model_num_insns;
+ if (last < index)
+ last = index;
+ }
+ return last;
+}
+
+/* An instruction with model_index POINT has just been scheduled, and it
+ adds DELTA to the pressure on ira_pressure_classes[PCI] after POINT - 1.
+ Update MODEL_REF_PRESSURE (GROUP, POINT, PCI) and
+ MODEL_MAX_PRESSURE (GROUP, POINT, PCI) accordingly. */
+
+static void
+model_start_update_pressure (struct model_pressure_group *group,
+ int point, int pci, int delta)
+{
+ int next_max_pressure;
+
+ if (point == model_num_insns)
+ {
+ /* The instruction wasn't part of the model schedule; it was moved
+ from a different block. Update the pressure for the end of
+ the model schedule. */
+ MODEL_REF_PRESSURE (group, point, pci) += delta;
+ MODEL_MAX_PRESSURE (group, point, pci) += delta;
+ }
+ else
+ {
+ /* Record that this instruction has been scheduled. Nothing now
+ changes between POINT and POINT + 1, so get the maximum pressure
+ from the latter. If the maximum pressure decreases, the new
+ pressure point may be before POINT. */
+ MODEL_REF_PRESSURE (group, point, pci) = -1;
+ next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
+ if (MODEL_MAX_PRESSURE (group, point, pci) > next_max_pressure)
+ {
+ MODEL_MAX_PRESSURE (group, point, pci) = next_max_pressure;
+ if (group->limits[pci].point == point)
+ group->limits[pci].point = -1;
+ }
+ }
+}
+
+/* Record that scheduling a later instruction has changed the pressure
+ at point POINT of the model schedule by DELTA (which might be 0).
+ Update GROUP accordingly. Return nonzero if these changes might
+ trigger changes to previous points as well. */
+
+static int
+model_update_pressure (struct model_pressure_group *group,
+ int point, int pci, int delta)
+{
+ int ref_pressure, max_pressure, next_max_pressure;
+
+ /* If POINT hasn't yet been scheduled, update its pressure. */
+ ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
+ if (ref_pressure >= 0 && delta != 0)
+ {
+ ref_pressure += delta;
+ MODEL_REF_PRESSURE (group, point, pci) = ref_pressure;
+
+ /* Check whether the maximum pressure in the overall schedule
+ has increased. (This means that the MODEL_MAX_PRESSURE of
+ every point <= POINT will need to increae too; see below.) */
+ if (group->limits[pci].pressure < ref_pressure)
+ group->limits[pci].pressure = ref_pressure;
+
+ /* If we are at maximum pressure, and the maximum pressure
+ point was previously unknown or later than POINT,
+ bring it forward. */
+ if (group->limits[pci].pressure == ref_pressure
+ && !IN_RANGE (group->limits[pci].point, 0, point))
+ group->limits[pci].point = point;
+
+ /* If POINT used to be the point of maximum pressure, but isn't
+ any longer, we need to recalculate it using a forward walk. */
+ if (group->limits[pci].pressure > ref_pressure
+ && group->limits[pci].point == point)
+ group->limits[pci].point = -1;
+ }
+
+ /* Update the maximum pressure at POINT. Changes here might also
+ affect the maximum pressure at POINT - 1. */
+ next_max_pressure = MODEL_MAX_PRESSURE (group, point + 1, pci);
+ max_pressure = MAX (ref_pressure, next_max_pressure);
+ if (MODEL_MAX_PRESSURE (group, point, pci) != max_pressure)
+ {
+ MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
+ return 1;
+ }
+ return 0;
+}
+
+/* INSN has just been scheduled. Update the model schedule accordingly. */
+
+static void
+model_recompute (rtx insn)
+{
+ struct {
+ int last_use;
+ int regno;
+ } uses[FIRST_PSEUDO_REGISTER + MAX_RECOG_OPERANDS];
+ struct reg_use_data *use;
+ struct reg_pressure_data *reg_pressure;
+ int delta[N_REG_CLASSES];
+ int pci, point, mix, new_last, cl, ref_pressure, queue;
+ unsigned int i, num_uses, num_pending_births;
+ bool print_p;
+
+ /* The destinations of INSN were previously live from POINT onwards, but are
+ now live from model_curr_point onwards. Set up DELTA accordingly. */
+ point = model_index (insn);
+ reg_pressure = INSN_REG_PRESSURE (insn);
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ delta[cl] = reg_pressure[pci].set_increase;
+ }
+
+ /* Record which registers previously died at POINT, but which now die
+ before POINT. Adjust DELTA so that it represents the effect of
+ this change after POINT - 1. Set NUM_PENDING_BIRTHS to the number of
+ registers that will be born in the range [model_curr_point, POINT). */
+ num_uses = 0;
+ num_pending_births = 0;
+ for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
+ {
+ new_last = model_last_use_except (use);
+ if (new_last < point)
+ {
+ gcc_assert (num_uses < ARRAY_SIZE (uses));
+ uses[num_uses].last_use = new_last;
+ uses[num_uses].regno = use->regno;
+ /* This register is no longer live after POINT - 1. */
+ mark_regno_birth_or_death (NULL, delta, use->regno, false);
+ num_uses++;
+ if (new_last >= 0)
+ num_pending_births++;
+ }
+ }
+
+ /* Update the MODEL_REF_PRESSURE and MODEL_MAX_PRESSURE for POINT.
+ Also set each group pressure limit for POINT. */
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ model_start_update_pressure (&model_before_pressure,
+ point, pci, delta[cl]);
+ }
+
+ /* Walk the model schedule backwards, starting immediately before POINT. */
+ print_p = false;
+ if (point != model_curr_point)
+ do
+ {
+ point--;
+ insn = MODEL_INSN (point);
+ queue = QUEUE_INDEX (insn);
+
+ if (queue != QUEUE_SCHEDULED)
+ {
+ /* DELTA describes the effect of the move on the register pressure
+ after POINT. Make it describe the effect on the pressure
+ before POINT. */
+ i = 0;
+ while (i < num_uses)
+ {
+ if (uses[i].last_use == point)
+ {
+ /* This register is now live again. */
+ mark_regno_birth_or_death (NULL, delta,
+ uses[i].regno, true);
+
+ /* Remove this use from the array. */
+ uses[i] = uses[num_uses - 1];
+ num_uses--;
+ num_pending_births--;
+ }
+ else
+ i++;
+ }
+
+ if (sched_verbose >= 5)
+ {
+ char buf[2048];
+
+ if (!print_p)
+ {
+ fprintf (sched_dump, MODEL_BAR);
+ fprintf (sched_dump, ";;\t\t| New pressure for model"
+ " schedule\n");
+ fprintf (sched_dump, MODEL_BAR);
+ print_p = true;
+ }
+
+ print_pattern (buf, PATTERN (insn), 0);
+ fprintf (sched_dump, ";;\t\t| %3d %4d %-30s ",
+ point, INSN_UID (insn), buf);
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ ref_pressure = MODEL_REF_PRESSURE (&model_before_pressure,
+ point, pci);
+ fprintf (sched_dump, " %s:[%d->%d]",
+ reg_class_names[ira_pressure_classes[pci]],
+ ref_pressure, ref_pressure + delta[cl]);
+ }
+ fprintf (sched_dump, "\n");
+ }
+ }
+
+ /* Adjust the pressure at POINT. Set MIX to nonzero if POINT - 1
+ might have changed as well. */
+ mix = num_pending_births;
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ mix |= delta[cl];
+ mix |= model_update_pressure (&model_before_pressure,
+ point, pci, delta[cl]);
+ }
+ }
+ while (mix && point > model_curr_point);
+
+ if (print_p)
+ fprintf (sched_dump, MODEL_BAR);
+}
+
+/* model_spill_cost (CL, P, P') returns the cost of increasing the
+ pressure on CL from P to P'. We use this to calculate a "base ECC",
+ baseECC (CL, X), for each pressure class CL and each instruction X.
+ Supposing X changes the pressure on CL from P to P', and that the
+ maximum pressure on CL in the current model schedule is MP', then:
+
+ * if X occurs before or at the next point of maximum pressure in
+ the model schedule and P' > MP', then:
+
+ baseECC (CL, X) = model_spill_cost (CL, MP, P')
+
+ The idea is that the pressure after scheduling a fixed set of
+ instructions -- in this case, the set up to and including the
+ next maximum pressure point -- is going to be the same regardless
+ of the order; we simply want to keep the intermediate pressure
+ under control. Thus X has a cost of zero unless scheduling it
+ now would exceed MP'.
+
+ If all increases in the set are by the same amount, no zero-cost
+ instruction will ever cause the pressure to exceed MP'. However,
+ if X is instead moved past an instruction X' with pressure in the
+ range (MP' - (P' - P), MP'), the pressure at X' will increase
+ beyond MP'. Since baseECC is very much a heuristic anyway,
+ it doesn't seem worth the overhead of tracking cases like these.
+
+ The cost of exceeding MP' is always based on the original maximum
+ pressure MP. This is so that going 2 registers over the original
+ limit has the same cost regardless of whether it comes from two
+ separate +1 deltas or from a single +2 delta.
+
+ * if X occurs after the next point of maximum pressure in the model
+ schedule and P' > P, then:
+
+ baseECC (CL, X) = model_spill_cost (CL, MP, MP' + (P' - P))
+
+ That is, if we move X forward across a point of maximum pressure,
+ and if X increases the pressure by P' - P, then we conservatively
+ assume that scheduling X next would increase the maximum pressure
+ by P' - P. Again, the cost of doing this is based on the original
+ maximum pressure MP, for the same reason as above.
+
+ * if P' < P, P > MP, and X occurs at or after the next point of
+ maximum pressure, then:
+
+ baseECC (CL, X) = -model_spill_cost (CL, MAX (MP, P'), P)
+ That is, if we have already exceeded the original maximum pressure MP,
+ and if X might reduce the maximum pressure again -- or at least push
+ it further back, and thus allow more scheduling freedom -- it is given
+ a negative cost to reflect the improvement.
+
+ * otherwise,
+
+ baseECC (CL, X) = 0
+
+ In this case, X is not expected to affect the maximum pressure MP',
+ so it has zero cost.
+
+ We then create a combined value baseECC (X) that is the sum of
+ baseECC (CL, X) for each pressure class CL.
+
+ baseECC (X) could itself be used as the ECC value described above.
+ However, this is often too conservative, in the sense that it
+ tends to make high-priority instructions that increase pressure
+ wait too long in cases where introducing a spill would be better.
+ For this reason the final ECC is a priority-adjusted form of
+ baseECC (X). Specifically, we calculate:
+
+ P (X) = INSN_PRIORITY (X) - insn_delay (X) - baseECC (X)
+ baseP = MAX { P (X) | baseECC (X) <= 0 }
+
+ Then:
+
+ ECC (X) = MAX (MIN (baseP - P (X), baseECC (X)), 0)
+
+ Thus an instruction's effect on pressure is ignored if it has a high
+ enough priority relative to the ones that don't increase pressure.
+ Negative values of baseECC (X) do not increase the priority of X
+ itself, but they do make it harder for other instructions to
+ increase the pressure further.
+
+ This pressure cost is deliberately timid. The intention has been
+ to choose a heuristic that rarely interferes with the normal list
+ scheduler in cases where that scheduler would produce good code.
+ We simply want to curb some of its worst excesses. */
+
+/* Return the cost of increasing the pressure in class CL from FROM to TO.
+
+ Here we use the very simplistic cost model that every register above
+ ira_available_class_regs[CL] has a spill cost of 1. We could use other
+ measures instead, such as one based on MEMORY_MOVE_COST. However:
+
+ (1) In order for an instruction to be scheduled, the higher cost
+ would need to be justified in a single saving of that many stalls.
+ This is overly pessimistic, because the benefit of spilling is
+ often to avoid a sequence of several short stalls rather than
+ a single long one.
+
+ (2) The cost is still arbitrary. Because we are not allocating
+ registers during scheduling, we have no way of knowing for
+ sure how many memory accesses will be required by each spill,
+ where the spills will be placed within the block, or even
+ which block(s) will contain the spills.
+
+ So a higher cost than 1 is often too conservative in practice,
+ forcing blocks to contain unnecessary stalls instead of spill code.
+ The simple cost below seems to be the best compromise. It reduces
+ the interference with the normal list scheduler, which helps make
+ it more suitable for a default-on option. */
+
+static int
+model_spill_cost (int cl, int from, int to)
+{
+ from = MAX (from, ira_available_class_regs[cl]);
+ return MAX (to, from) - from;
+}
+
+/* Return baseECC (ira_pressure_classes[PCI], POINT), given that
+ P = curr_reg_pressure[ira_pressure_classes[PCI]] and that
+ P' = P + DELTA. */
+
+static int
+model_excess_group_cost (struct model_pressure_group *group,
+ int point, int pci, int delta)
+{
+ int pressure, cl;
+
+ cl = ira_pressure_classes[pci];
+ if (delta < 0 && point >= group->limits[pci].point)
+ {
+ pressure = MAX (group->limits[pci].orig_pressure,
+ curr_reg_pressure[cl] + delta);
+ return -model_spill_cost (cl, pressure, curr_reg_pressure[cl]);
+ }
+
+ if (delta > 0)
+ {
+ if (point > group->limits[pci].point)
+ pressure = group->limits[pci].pressure + delta;
+ else
+ pressure = curr_reg_pressure[cl] + delta;
+
+ if (pressure > group->limits[pci].pressure)
+ return model_spill_cost (cl, group->limits[pci].orig_pressure,
+ pressure);
+ }
+
+ return 0;
+}
+
+/* Return baseECC (MODEL_INSN (INSN)). Dump the costs to sched_dump
+ if PRINT_P. */
+
+static int
+model_excess_cost (rtx insn, bool print_p)
+{
+ int point, pci, cl, cost, this_cost, delta;
+ struct reg_pressure_data *insn_reg_pressure;
+ int insn_death[N_REG_CLASSES];
+
+ calculate_reg_deaths (insn, insn_death);
+ point = model_index (insn);
+ insn_reg_pressure = INSN_REG_PRESSURE (insn);
+ cost = 0;
+
+ if (print_p)
+ fprintf (sched_dump, ";;\t\t| %3d %4d | %4d %+3d |", point,
+ INSN_UID (insn), INSN_PRIORITY (insn), insn_delay (insn));
+
+ /* Sum up the individual costs for each register class. */
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ delta = insn_reg_pressure[pci].set_increase - insn_death[cl];
+ this_cost = model_excess_group_cost (&model_before_pressure,
+ point, pci, delta);
+ cost += this_cost;
+ if (print_p)
+ fprintf (sched_dump, " %s:[%d base cost %d]",
+ reg_class_names[cl], delta, this_cost);
+ }
+
+ if (print_p)
+ fprintf (sched_dump, "\n");
+
+ return cost;
+}
+
+/* Dump the next points of maximum pressure for GROUP. */
+
+static void
+model_dump_pressure_points (struct model_pressure_group *group)
+{
+ int pci, cl;
+
+ fprintf (sched_dump, ";;\t\t| pressure points");
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ fprintf (sched_dump, " %s:[%d->%d at ", reg_class_names[cl],
+ curr_reg_pressure[cl], group->limits[pci].pressure);
+ if (group->limits[pci].point < model_num_insns)
+ fprintf (sched_dump, "%d:%d]", group->limits[pci].point,
+ INSN_UID (MODEL_INSN (group->limits[pci].point)));
+ else
+ fprintf (sched_dump, "end]");
+ }
+ fprintf (sched_dump, "\n");
+}
+
+/* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
+
+static void
+model_set_excess_costs (rtx *insns, int count)
+{
+ int i, cost, priority_base, priority;
+ bool print_p;
+
+ /* Record the baseECC value for each instruction in the model schedule,
+ except that negative costs are converted to zero ones now rather thatn
+ later. Do not assign a cost to debug instructions, since they must
+ not change code-generation decisions. Experiments suggest we also
+ get better results by not assigning a cost to instructions from
+ a different block.
+
+ Set PRIORITY_BASE to baseP in the block comment above. This is the
+ maximum priority of the "cheap" instructions, which should always
+ include the next model instruction. */
+ priority_base = 0;
+ print_p = false;
+ for (i = 0; i < count; i++)
+ if (INSN_MODEL_INDEX (insns[i]))
+ {
+ if (sched_verbose >= 6 && !print_p)
+ {
+ fprintf (sched_dump, MODEL_BAR);
+ fprintf (sched_dump, ";;\t\t| Pressure costs for ready queue\n");
+ model_dump_pressure_points (&model_before_pressure);
+ fprintf (sched_dump, MODEL_BAR);
+ print_p = true;
+ }
+ cost = model_excess_cost (insns[i], print_p);
+ if (cost <= 0)
+ {
+ priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]) - cost;
+ priority_base = MAX (priority_base, priority);
+ cost = 0;
+ }
+ INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = cost;
+ }
+ if (print_p)
+ fprintf (sched_dump, MODEL_BAR);
+
+ /* Use MAX (baseECC, 0) and baseP to calculcate ECC for each
+ instruction. */
+ for (i = 0; i < count; i++)
+ {
+ cost = INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]);
+ priority = INSN_PRIORITY (insns[i]) - insn_delay (insns[i]);
+ if (cost > 0 && priority > priority_base)
+ {
+ cost += priority_base - priority;
+ INSN_REG_PRESSURE_EXCESS_COST_CHANGE (insns[i]) = MAX (cost, 0);
+ }
+ }
+}
+
/* Returns a positive value if x is preferred; returns a negative value if
y is preferred. Should never return 0, since that will make the sort
unstable. */
@@ -1661,23 +2436,20 @@ rank_for_schedule (const void *x, const void *y)
/* Make sure that priority of TMP and TMP2 are initialized. */
gcc_assert (INSN_PRIORITY_KNOWN (tmp) && INSN_PRIORITY_KNOWN (tmp2));
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ if (sched_pressure != SCHED_PRESSURE_NONE)
{
int diff;
/* Prefer insn whose scheduling results in the smallest register
pressure excess. */
if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp)
- + (INSN_TICK (tmp) > clock_var
- ? INSN_TICK (tmp) - clock_var : 0)
+ + insn_delay (tmp)
- INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2)
- - (INSN_TICK (tmp2) > clock_var
- ? INSN_TICK (tmp2) - clock_var : 0))) != 0)
+ - insn_delay (tmp2))))
return diff;
}
-
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED
+ if (sched_pressure != SCHED_PRESSURE_NONE
&& (INSN_TICK (tmp2) > clock_var || INSN_TICK (tmp) > clock_var))
{
if (INSN_TICK (tmp) <= clock_var)
@@ -1769,11 +2541,22 @@ rank_for_schedule (const void *x, const void *y)
return val;
}
+ /* Prefer instructions that occur earlier in the model schedule. */
+ if (sched_pressure == SCHED_PRESSURE_MODEL)
+ {
+ int diff;
+
+ diff = model_index (tmp) - model_index (tmp2);
+ if (diff != 0)
+ return diff;
+ }
+
/* Prefer the insn which has more later insns that depend on it.
This gives the scheduler more freedom when scheduling later
instructions at the expense of added register pressure. */
- val = (dep_list_size (tmp2) - dep_list_size (tmp));
+ val = (dep_list_size (tmp2, SD_LIST_FORW)
+ - dep_list_size (tmp, SD_LIST_FORW));
if (flag_sched_dep_count_heuristic && val != 0)
return val;
@@ -2001,6 +2784,9 @@ ready_sort (struct ready_list *ready)
if (!DEBUG_INSN_P (first[i]))
setup_insn_reg_pressure_info (first[i]);
}
+ if (sched_pressure == SCHED_PRESSURE_MODEL
+ && model_curr_point < model_num_insns)
+ model_set_excess_costs (first, ready->n_ready);
SCHED_SORT (first, ready->n_ready);
}
@@ -2063,10 +2849,12 @@ update_register_pressure (rtx insn)
gcc_checking_assert (!DEBUG_INSN_P (insn));
for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
- if (dying_use_p (use) && bitmap_bit_p (curr_reg_live, use->regno))
- mark_regno_birth_or_death (use->regno, false);
+ if (dying_use_p (use))
+ mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
+ use->regno, false);
for (set = INSN_REG_SET_LIST (insn); set != NULL; set = set->next_insn_set)
- mark_regno_birth_or_death (set->regno, true);
+ mark_regno_birth_or_death (curr_reg_live, curr_reg_pressure,
+ set->regno, true);
}
/* Set up or update (if UPDATE_P) max register pressure (see its
@@ -2188,6 +2976,613 @@ check_clobbered_conditions (rtx insn)
}
}
+/* Return (in order):
+
+ - positive if INSN adversely affects the pressure on one
+ register class
+
+ - negative if INSN reduces the pressure on one register class
+
+ - 0 if INSN doesn't affect the pressure on any register class. */
+
+static int
+model_classify_pressure (struct model_insn_info *insn)
+{
+ struct reg_pressure_data *reg_pressure;
+ int death[N_REG_CLASSES];
+ int pci, cl, sum;
+
+ calculate_reg_deaths (insn->insn, death);
+ reg_pressure = INSN_REG_PRESSURE (insn->insn);
+ sum = 0;
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ if (death[cl] < reg_pressure[pci].set_increase)
+ return 1;
+ sum += reg_pressure[pci].set_increase - death[cl];
+ }
+ return sum;
+}
+
+/* Return true if INSN1 should come before INSN2 in the model schedule. */
+
+static int
+model_order_p (struct model_insn_info *insn1, struct model_insn_info *insn2)
+{
+ unsigned int height1, height2;
+ unsigned int priority1, priority2;
+
+ /* Prefer instructions with a higher model priority. */
+ if (insn1->model_priority != insn2->model_priority)
+ return insn1->model_priority > insn2->model_priority;
+
+ /* Combine the length of the longest path of satisfied true dependencies
+ that leads to each instruction (depth) with the length of the longest
+ path of any dependencies that leads from the instruction (alap).
+ Prefer instructions with the greatest combined length. If the combined
+ lengths are equal, prefer instructions with the greatest depth.
+
+ The idea is that, if we have a set S of "equal" instructions that each
+ have ALAP value X, and we pick one such instruction I, any true-dependent
+ successors of I that have ALAP value X - 1 should be preferred over S.
+ This encourages the schedule to be "narrow" rather than "wide".
+ However, if I is a low-priority instruction that we decided to
+ schedule because of its model_classify_pressure, and if there
+ is a set of higher-priority instructions T, the aforementioned
+ successors of I should not have the edge over T. */
+ height1 = insn1->depth + insn1->alap;
+ height2 = insn2->depth + insn2->alap;
+ if (height1 != height2)
+ return height1 > height2;
+ if (insn1->depth != insn2->depth)
+ return insn1->depth > insn2->depth;
+
+ /* We have no real preference between INSN1 an INSN2 as far as attempts
+ to reduce pressure go. Prefer instructions with higher priorities. */
+ priority1 = INSN_PRIORITY (insn1->insn);
+ priority2 = INSN_PRIORITY (insn2->insn);
+ if (priority1 != priority2)
+ return priority1 > priority2;
+
+ /* Use the original rtl sequence as a tie-breaker. */
+ return insn1 < insn2;
+}
+
+/* Add INSN to the model worklist immediately after PREV. Add it to the
+ beginning of the list if PREV is null. */
+
+static void
+model_add_to_worklist_at (struct model_insn_info *insn,
+ struct model_insn_info *prev)
+{
+ gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_NOWHERE);
+ QUEUE_INDEX (insn->insn) = QUEUE_READY;
+
+ insn->prev = prev;
+ if (prev)
+ {
+ insn->next = prev->next;
+ prev->next = insn;
+ }
+ else
+ {
+ insn->next = model_worklist;
+ model_worklist = insn;
+ }
+ if (insn->next)
+ insn->next->prev = insn;
+}
+
+/* Remove INSN from the model worklist. */
+
+static void
+model_remove_from_worklist (struct model_insn_info *insn)
+{
+ gcc_assert (QUEUE_INDEX (insn->insn) == QUEUE_READY);
+ QUEUE_INDEX (insn->insn) = QUEUE_NOWHERE;
+
+ if (insn->prev)
+ insn->prev->next = insn->next;
+ else
+ model_worklist = insn->next;
+ if (insn->next)
+ insn->next->prev = insn->prev;
+}
+
+/* Add INSN to the model worklist. Start looking for a suitable position
+ between neighbors PREV and NEXT, testing at most MAX_SCHED_READY_INSNS
+ insns either side. A null PREV indicates the beginning of the list and
+ a null NEXT indicates the end. */
+
+static void
+model_add_to_worklist (struct model_insn_info *insn,
+ struct model_insn_info *prev,
+ struct model_insn_info *next)
+{
+ int count;
+
+ count = MAX_SCHED_READY_INSNS;
+ if (count > 0 && prev && model_order_p (insn, prev))
+ do
+ {
+ count--;
+ prev = prev->prev;
+ }
+ while (count > 0 && prev && model_order_p (insn, prev));
+ else
+ while (count > 0 && next && model_order_p (next, insn))
+ {
+ count--;
+ prev = next;
+ next = next->next;
+ }
+ model_add_to_worklist_at (insn, prev);
+}
+
+/* INSN may now have a higher priority (in the model_order_p sense)
+ than before. Move it up the worklist if necessary. */
+
+static void
+model_promote_insn (struct model_insn_info *insn)
+{
+ struct model_insn_info *prev;
+ int count;
+
+ prev = insn->prev;
+ count = MAX_SCHED_READY_INSNS;
+ while (count > 0 && prev && model_order_p (insn, prev))
+ {
+ count--;
+ prev = prev->prev;
+ }
+ if (prev != insn->prev)
+ {
+ model_remove_from_worklist (insn);
+ model_add_to_worklist_at (insn, prev);
+ }
+}
+
+/* Add INSN to the end of the model schedule. */
+
+static void
+model_add_to_schedule (rtx insn)
+{
+ unsigned int point;
+
+ gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
+ QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
+
+ point = VEC_length (rtx, model_schedule);
+ VEC_quick_push (rtx, model_schedule, insn);
+ INSN_MODEL_INDEX (insn) = point + 1;
+}
+
+/* Analyze the instructions that are to be scheduled, setting up
+ MODEL_INSN_INFO (...) and model_num_insns accordingly. Add ready
+ instructions to model_worklist. */
+
+static void
+model_analyze_insns (void)
+{
+ rtx start, end, iter;
+ sd_iterator_def sd_it;
+ dep_t dep;
+ struct model_insn_info *insn, *con;
+
+ model_num_insns = 0;
+ start = PREV_INSN (current_sched_info->next_tail);
+ end = current_sched_info->prev_head;
+ for (iter = start; iter != end; iter = PREV_INSN (iter))
+ if (NONDEBUG_INSN_P (iter))
+ {
+ insn = MODEL_INSN_INFO (iter);
+ insn->insn = iter;
+ FOR_EACH_DEP (iter, SD_LIST_FORW, sd_it, dep)
+ {
+ con = MODEL_INSN_INFO (DEP_CON (dep));
+ if (con->insn && insn->alap < con->alap + 1)
+ insn->alap = con->alap + 1;
+ }
+
+ insn->old_queue = QUEUE_INDEX (iter);
+ QUEUE_INDEX (iter) = QUEUE_NOWHERE;
+
+ insn->unscheduled_preds = dep_list_size (iter, SD_LIST_HARD_BACK);
+ if (insn->unscheduled_preds == 0)
+ model_add_to_worklist (insn, NULL, model_worklist);
+
+ model_num_insns++;
+ }
+}
+
+/* The global state describes the register pressure at the start of the
+ model schedule. Initialize GROUP accordingly. */
+
+static void
+model_init_pressure_group (struct model_pressure_group *group)
+{
+ int pci, cl;
+
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ group->limits[pci].pressure = curr_reg_pressure[cl];
+ group->limits[pci].point = 0;
+ }
+ /* Use index model_num_insns to record the state after the last
+ instruction in the model schedule. */
+ group->model = XNEWVEC (struct model_pressure_data,
+ (model_num_insns + 1) * ira_pressure_classes_num);
+}
+
+/* Record that MODEL_REF_PRESSURE (GROUP, POINT, PCI) is PRESSURE.
+ Update the maximum pressure for the whole schedule. */
+
+static void
+model_record_pressure (struct model_pressure_group *group,
+ int point, int pci, int pressure)
+{
+ MODEL_REF_PRESSURE (group, point, pci) = pressure;
+ if (group->limits[pci].pressure < pressure)
+ {
+ group->limits[pci].pressure = pressure;
+ group->limits[pci].point = point;
+ }
+}
+
+/* INSN has just been added to the end of the model schedule. Record its
+ register-pressure information. */
+
+static void
+model_record_pressures (struct model_insn_info *insn)
+{
+ struct reg_pressure_data *reg_pressure;
+ int point, pci, cl, delta;
+ int death[N_REG_CLASSES];
+
+ point = model_index (insn->insn);
+ if (sched_verbose >= 2)
+ {
+ char buf[2048];
+
+ if (point == 0)
+ {
+ fprintf (sched_dump, "\n;;\tModel schedule:\n;;\n");
+ fprintf (sched_dump, ";;\t| idx insn | mpri hght dpth prio |\n");
+ }
+ print_pattern (buf, PATTERN (insn->insn), 0);
+ fprintf (sched_dump, ";;\t| %3d %4d | %4d %4d %4d %4d | %-30s ",
+ point, INSN_UID (insn->insn), insn->model_priority,
+ insn->depth + insn->alap, insn->depth,
+ INSN_PRIORITY (insn->insn), buf);
+ }
+ calculate_reg_deaths (insn->insn, death);
+ reg_pressure = INSN_REG_PRESSURE (insn->insn);
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ delta = reg_pressure[pci].set_increase - death[cl];
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, " %s:[%d,%+d]", reg_class_names[cl],
+ curr_reg_pressure[cl], delta);
+ model_record_pressure (&model_before_pressure, point, pci,
+ curr_reg_pressure[cl]);
+ }
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, "\n");
+}
+
+/* All instructions have been added to the model schedule. Record the
+ final register pressure in GROUP and set up all MODEL_MAX_PRESSUREs. */
+
+static void
+model_record_final_pressures (struct model_pressure_group *group)
+{
+ int point, pci, max_pressure, ref_pressure, cl;
+
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ /* Record the final pressure for this class. */
+ cl = ira_pressure_classes[pci];
+ point = model_num_insns;
+ ref_pressure = curr_reg_pressure[cl];
+ model_record_pressure (group, point, pci, ref_pressure);
+
+ /* Record the original maximum pressure. */
+ group->limits[pci].orig_pressure = group->limits[pci].pressure;
+
+ /* Update the MODEL_MAX_PRESSURE for every point of the schedule. */
+ max_pressure = ref_pressure;
+ MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
+ while (point > 0)
+ {
+ point--;
+ ref_pressure = MODEL_REF_PRESSURE (group, point, pci);
+ max_pressure = MAX (max_pressure, ref_pressure);
+ MODEL_MAX_PRESSURE (group, point, pci) = max_pressure;
+ }
+ }
+}
+
+/* Update all successors of INSN, given that INSN has just been scheduled. */
+
+static void
+model_add_successors_to_worklist (struct model_insn_info *insn)
+{
+ sd_iterator_def sd_it;
+ struct model_insn_info *con;
+ dep_t dep;
+
+ FOR_EACH_DEP (insn->insn, SD_LIST_FORW, sd_it, dep)
+ {
+ con = MODEL_INSN_INFO (DEP_CON (dep));
+ /* Ignore debug instructions, and instructions from other blocks. */
+ if (con->insn)
+ {
+ con->unscheduled_preds--;
+
+ /* Update the depth field of each true-dependent successor.
+ Increasing the depth gives them a higher priority than
+ before. */
+ if (DEP_TYPE (dep) == REG_DEP_TRUE && con->depth < insn->depth + 1)
+ {
+ con->depth = insn->depth + 1;
+ if (QUEUE_INDEX (con->insn) == QUEUE_READY)
+ model_promote_insn (con);
+ }
+
+ /* If this is a true dependency, or if there are no remaining
+ dependencies for CON (meaning that CON only had non-true
+ dependencies), make sure that CON is on the worklist.
+ We don't bother otherwise because it would tend to fill the
+ worklist with a lot of low-priority instructions that are not
+ yet ready to issue. */
+ if ((con->depth > 0 || con->unscheduled_preds == 0)
+ && QUEUE_INDEX (con->insn) == QUEUE_NOWHERE)
+ model_add_to_worklist (con, insn, insn->next);
+ }
+ }
+}
+
+/* Give INSN a higher priority than any current instruction, then give
+ unscheduled predecessors of INSN a higher priority still. If any of
+ those predecessors are not on the model worklist, do the same for its
+ predecessors, and so on. */
+
+static void
+model_promote_predecessors (struct model_insn_info *insn)
+{
+ struct model_insn_info *pro, *first;
+ sd_iterator_def sd_it;
+ dep_t dep;
+
+ if (sched_verbose >= 7)
+ fprintf (sched_dump, ";;\t+--- priority of %d = %d, priority of",
+ INSN_UID (insn->insn), model_next_priority);
+ insn->model_priority = model_next_priority++;
+ model_remove_from_worklist (insn);
+ model_add_to_worklist_at (insn, NULL);
+
+ first = NULL;
+ for (;;)
+ {
+ FOR_EACH_DEP (insn->insn, SD_LIST_HARD_BACK, sd_it, dep)
+ {
+ pro = MODEL_INSN_INFO (DEP_PRO (dep));
+ /* The first test is to ignore debug instructions, and instructions
+ from other blocks. */
+ if (pro->insn
+ && pro->model_priority != model_next_priority
+ && QUEUE_INDEX (pro->insn) != QUEUE_SCHEDULED)
+ {
+ pro->model_priority = model_next_priority;
+ if (sched_verbose >= 7)
+ fprintf (sched_dump, " %d", INSN_UID (pro->insn));
+ if (QUEUE_INDEX (pro->insn) == QUEUE_READY)
+ {
+ /* PRO is already in the worklist, but it now has
+ a higher priority than before. Move it at the
+ appropriate place. */
+ model_remove_from_worklist (pro);
+ model_add_to_worklist (pro, NULL, model_worklist);
+ }
+ else
+ {
+ /* PRO isn't in the worklist. Recursively process
+ its predecessors until we find one that is. */
+ pro->next = first;
+ first = pro;
+ }
+ }
+ }
+ if (!first)
+ break;
+ insn = first;
+ first = insn->next;
+ }
+ if (sched_verbose >= 7)
+ fprintf (sched_dump, " = %d\n", model_next_priority);
+ model_next_priority++;
+}
+
+/* Pick one instruction from model_worklist and process it. */
+
+static void
+model_choose_insn (void)
+{
+ struct model_insn_info *insn, *fallback;
+ int count;
+
+ if (sched_verbose >= 7)
+ {
+ fprintf (sched_dump, ";;\t+--- worklist:\n");
+ insn = model_worklist;
+ count = MAX_SCHED_READY_INSNS;
+ while (count > 0 && insn)
+ {
+ fprintf (sched_dump, ";;\t+--- %d [%d, %d, %d, %d]\n",
+ INSN_UID (insn->insn), insn->model_priority,
+ insn->depth + insn->alap, insn->depth,
+ INSN_PRIORITY (insn->insn));
+ count--;
+ insn = insn->next;
+ }
+ }
+
+ /* Look for a ready instruction whose model_classify_priority is zero
+ or negative, picking the highest-priority one. Adding such an
+ instruction to the schedule now should do no harm, and may actually
+ do some good.
+
+ Failing that, see whether there is an instruction with the highest
+ extant model_priority that is not yet ready, but which would reduce
+ pressure if it became ready. This is designed to catch cases like:
+
+ (set (mem (reg R1)) (reg R2))
+
+ where the instruction is the last remaining use of R1 and where the
+ value of R2 is not yet available (or vice versa). The death of R1
+ means that this instruction already reduces pressure. It is of
+ course possible that the computation of R2 involves other registers
+ that are hard to kill, but such cases are rare enough for this
+ heuristic to be a win in general.
+
+ Failing that, just pick the highest-priority instruction in the
+ worklist. */
+ count = MAX_SCHED_READY_INSNS;
+ insn = model_worklist;
+ fallback = 0;
+ for (;;)
+ {
+ if (count == 0 || !insn)
+ {
+ insn = fallback ? fallback : model_worklist;
+ break;
+ }
+ if (insn->unscheduled_preds)
+ {
+ if (model_worklist->model_priority == insn->model_priority
+ && !fallback
+ && model_classify_pressure (insn) < 0)
+ fallback = insn;
+ }
+ else
+ {
+ if (model_classify_pressure (insn) <= 0)
+ break;
+ }
+ count--;
+ insn = insn->next;
+ }
+
+ if (sched_verbose >= 7 && insn != model_worklist)
+ {
+ if (insn->unscheduled_preds)
+ fprintf (sched_dump, ";;\t+--- promoting insn %d, with dependencies\n",
+ INSN_UID (insn->insn));
+ else
+ fprintf (sched_dump, ";;\t+--- promoting insn %d, which is ready\n",
+ INSN_UID (insn->insn));
+ }
+ if (insn->unscheduled_preds)
+ /* INSN isn't yet ready to issue. Give all its predecessors the
+ highest priority. */
+ model_promote_predecessors (insn);
+ else
+ {
+ /* INSN is ready. Add it to the end of model_schedule and
+ process its successors. */
+ model_add_successors_to_worklist (insn);
+ model_remove_from_worklist (insn);
+ model_add_to_schedule (insn->insn);
+ model_record_pressures (insn);
+ update_register_pressure (insn->insn);
+ }
+}
+
+/* Restore all QUEUE_INDEXs to the values that they had before
+ model_start_schedule was called. */
+
+static void
+model_reset_queue_indices (void)
+{
+ unsigned int i;
+ rtx insn;
+
+ FOR_EACH_VEC_ELT (rtx, model_schedule, i, insn)
+ QUEUE_INDEX (insn) = MODEL_INSN_INFO (insn)->old_queue;
+}
+
+/* We have calculated the model schedule and spill costs. Print a summary
+ to sched_dump. */
+
+static void
+model_dump_pressure_summary (void)
+{
+ int pci, cl;
+
+ fprintf (sched_dump, ";; Pressure summary:");
+ for (pci = 0; pci < ira_pressure_classes_num; pci++)
+ {
+ cl = ira_pressure_classes[pci];
+ fprintf (sched_dump, " %s:%d", reg_class_names[cl],
+ model_before_pressure.limits[pci].pressure);
+ }
+ fprintf (sched_dump, "\n\n");
+}
+
+/* Initialize the SCHED_PRESSURE_MODEL information for the current
+ scheduling region. */
+
+static void
+model_start_schedule (void)
+{
+ basic_block bb;
+
+ model_next_priority = 1;
+ model_schedule = VEC_alloc (rtx, heap, sched_max_luid);
+ model_insns = XCNEWVEC (struct model_insn_info, sched_max_luid);
+
+ bb = BLOCK_FOR_INSN (NEXT_INSN (current_sched_info->prev_head));
+ initiate_reg_pressure_info (df_get_live_in (bb));
+
+ model_analyze_insns ();
+ model_init_pressure_group (&model_before_pressure);
+ while (model_worklist)
+ model_choose_insn ();
+ gcc_assert (model_num_insns == (int) VEC_length (rtx, model_schedule));
+ if (sched_verbose >= 2)
+ fprintf (sched_dump, "\n");
+
+ model_record_final_pressures (&model_before_pressure);
+ model_reset_queue_indices ();
+
+ XDELETEVEC (model_insns);
+
+ model_curr_point = 0;
+ initiate_reg_pressure_info (df_get_live_in (bb));
+ if (sched_verbose >= 1)
+ model_dump_pressure_summary ();
+}
+
+/* Free the information associated with GROUP. */
+
+static void
+model_finalize_pressure_group (struct model_pressure_group *group)
+{
+ XDELETEVEC (group->model);
+}
+
+/* Free the information created by model_start_schedule. */
+
+static void
+model_end_schedule (void)
+{
+ model_finalize_pressure_group (&model_before_pressure);
+ VEC_free (rtx, heap, model_schedule);
+}
+
/* A structure that holds local state for the loop in schedule_block. */
struct sched_block_state
{
@@ -2240,6 +3635,10 @@ schedule_insn (rtx insn)
reg_class_names[ira_pressure_classes[i]],
pressure_info[i].set_increase, pressure_info[i].change);
}
+ if (sched_pressure == SCHED_PRESSURE_MODEL
+ && model_curr_point < model_num_insns
+ && model_index (insn) == model_curr_point)
+ fprintf (sched_dump, ":model %d", model_curr_point);
fputc ('\n', sched_dump);
}
@@ -2307,6 +3706,24 @@ schedule_insn (rtx insn)
gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
+ if (sched_pressure == SCHED_PRESSURE_MODEL
+ && model_curr_point < model_num_insns
+ && NONDEBUG_INSN_P (insn))
+ {
+ if (model_index (insn) == model_curr_point)
+ do
+ model_curr_point++;
+ while (model_curr_point < model_num_insns
+ && (QUEUE_INDEX (MODEL_INSN (model_curr_point))
+ == QUEUE_SCHEDULED));
+ else
+ model_recompute (insn);
+ model_update_limit_points ();
+ update_register_pressure (insn);
+ if (sched_verbose >= 2)
+ print_curr_reg_pressure ();
+ }
+
gcc_assert (INSN_TICK (insn) >= MIN_TICK);
if (INSN_TICK (insn) > clock_var)
/* INSN has been prematurely moved from the queue to the ready list.
@@ -3138,7 +4555,16 @@ queue_to_ready (struct ready_list *ready)
/* If the ready list is full, delay the insn for 1 cycle.
See the comment in schedule_block for the rationale. */
if (!reload_completed
- && ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
+ && (ready->n_ready - ready->n_debug > MAX_SCHED_READY_INSNS
+ || (sched_pressure == SCHED_PRESSURE_MODEL
+ /* Limit pressure recalculations to MAX_SCHED_READY_INSNS
+ instructions too. */
+ && model_index (insn) > (model_curr_point
+ + MAX_SCHED_READY_INSNS)))
+ && !(sched_pressure == SCHED_PRESSURE_MODEL
+ && model_curr_point < model_num_insns
+ /* Always allow the next model instruction to issue. */
+ && model_index (insn) == model_curr_point)
&& !SCHED_GROUP_P (insn)
&& insn != skip_insn)
queue_insn (insn, 1, "ready full");
@@ -3366,12 +4792,12 @@ debug_ready_list (struct ready_list *ready)
fprintf (sched_dump, " %s:%d",
(*current_sched_info->print_insn) (p[i], 0),
INSN_LUID (p[i]));
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ if (sched_pressure != SCHED_PRESSURE_NONE)
fprintf (sched_dump, "(cost=%d",
INSN_REG_PRESSURE_EXCESS_COST_CHANGE (p[i]));
if (INSN_TICK (p[i]) > clock_var)
fprintf (sched_dump, ":delay=%d", INSN_TICK (p[i]) - clock_var);
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ if (sched_pressure != SCHED_PRESSURE_NONE)
fprintf (sched_dump, ")");
}
fprintf (sched_dump, "\n");
@@ -4001,8 +5427,17 @@ prune_ready_list (state_t temp_state, bool first_cycle_insn_p,
cost = 1;
reason = "asm";
}
- else if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
- cost = 0;
+ else if (sched_pressure != SCHED_PRESSURE_NONE)
+ {
+ if (sched_pressure == SCHED_PRESSURE_MODEL
+ && INSN_TICK (insn) <= clock_var)
+ {
+ memcpy (temp_state, curr_state, dfa_state_size);
+ if (state_transition (temp_state, insn) >= 0)
+ INSN_TICK (insn) = clock_var + 1;
+ }
+ cost = 0;
+ }
else
{
int delay_cost = 0;
@@ -4175,6 +5610,9 @@ schedule_block (basic_block *target_bb)
in try_ready () (which is called through init_ready_list ()). */
(*current_sched_info->init_ready_list) ();
+ if (sched_pressure == SCHED_PRESSURE_MODEL)
+ model_start_schedule ();
+
/* The algorithm is O(n^2) in the number of ready insns at any given
time in the worst case. Before reload we are more likely to have
big lists so truncate them to a reasonable size. */
@@ -4420,7 +5858,7 @@ schedule_block (basic_block *target_bb)
else
insn = ready_remove_first (&ready);
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED
+ if (sched_pressure != SCHED_PRESSURE_NONE
&& INSN_TICK (insn) > clock_var)
{
ready_add (&ready, insn, true);
@@ -4671,6 +6109,9 @@ schedule_block (basic_block *target_bb)
}
}
+ if (sched_pressure == SCHED_PRESSURE_MODEL)
+ model_end_schedule ();
+
if (success)
{
commit_schedule (prev_head, tail, target_bb);
@@ -4793,7 +6234,7 @@ sched_init (void)
else
sched_pressure = SCHED_PRESSURE_NONE;
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ if (sched_pressure != SCHED_PRESSURE_NONE)
ira_setup_eliminable_regset ();
/* Initialize SPEC_INFO. */
@@ -4871,7 +6312,7 @@ sched_init (void)
if (targetm.sched.init_global)
targetm.sched.init_global (sched_dump, sched_verbose, get_max_uid () + 1);
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ if (sched_pressure != SCHED_PRESSURE_NONE)
{
int i, max_regno = max_reg_num ();
@@ -4888,8 +6329,11 @@ sched_init (void)
? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
: ira_pressure_class_translate[reg_allocno_class (i)]);
curr_reg_live = BITMAP_ALLOC (NULL);
- saved_reg_live = BITMAP_ALLOC (NULL);
- region_ref_regs = BITMAP_ALLOC (NULL);
+ if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ {
+ saved_reg_live = BITMAP_ALLOC (NULL);
+ region_ref_regs = BITMAP_ALLOC (NULL);
+ }
}
curr_state = xmalloc (dfa_state_size);
@@ -4988,14 +6432,17 @@ void
sched_finish (void)
{
haifa_finish_h_i_d ();
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ if (sched_pressure != SCHED_PRESSURE_NONE)
{
if (regstat_n_sets_and_refs != NULL)
regstat_free_n_sets_and_refs ();
- free (sched_regno_pressure_class);
- BITMAP_FREE (region_ref_regs);
- BITMAP_FREE (saved_reg_live);
+ if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ {
+ BITMAP_FREE (region_ref_regs);
+ BITMAP_FREE (saved_reg_live);
+ }
BITMAP_FREE (curr_reg_live);
+ free (sched_regno_pressure_class);
}
free (curr_state);
@@ -5267,7 +6714,7 @@ fix_tick_ready (rtx next)
INSN_TICK (next) = tick;
delay = tick - clock_var;
- if (delay <= 0 || sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ if (delay <= 0 || sched_pressure != SCHED_PRESSURE_NONE)
delay = QUEUE_READY;
change_queue_index (next, delay);
@@ -6522,7 +7969,7 @@ add_jump_dependencies (rtx insn, rtx jump)
if (insn == jump)
break;
- if (dep_list_size (insn) == 0)
+ if (dep_list_size (insn, SD_LIST_FORW) == 0)
{
dep_def _new_dep, *new_dep = &_new_dep;
@@ -6663,6 +8110,7 @@ haifa_finish_h_i_d (void)
FOR_EACH_VEC_ELT (haifa_insn_data_def, h_i_d, i, data)
{
+ free (data->max_reg_pressure);
free (data->reg_pressure);
for (use = data->reg_use_list; use != NULL; use = next)
{
@@ -6693,7 +8141,7 @@ haifa_init_insn (rtx insn)
/* Extend dependency caches by one element. */
extend_dependency_caches (1, false);
}
- if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
+ if (sched_pressure != SCHED_PRESSURE_NONE)
init_insn_reg_pressure_info (insn);
}