diff options
author | nathan <nathan@138bc75d-0d04-0410-961f-82ee72b054a4> | 2004-09-09 17:19:16 +0000 |
---|---|---|
committer | nathan <nathan@138bc75d-0d04-0410-961f-82ee72b054a4> | 2004-09-09 17:19:16 +0000 |
commit | 04e579b6fdd5e00f2367ee4282d097bee5efeadc (patch) | |
tree | 4493ed2091b537830e81c4e1100321bb065757b7 /gcc | |
parent | 197e583ae4f10a38714ff799141b8a94cbfb89d8 (diff) | |
download | gcc-04e579b6fdd5e00f2367ee4282d097bee5efeadc.tar.gz |
* ra-build.c (copy_insn_p, remember_move, defuse_overlap_p_1,
live_out_1, prune_hardregs_for_mode, init_one_web_common,
reinit_one_web, add_subweb, add_subweb_2, init_web_parts,
record_conflict, compare_and_free_webs, init_webs_defs_uses,
parts_to_webs_1, parts_to_webs, reset_conflicts,
check_conflict_numbers, remember_web_was_spilled, handle_asm_insn,
ra_build_free): Use gcc_assert and gcc_unreachable instead of abort.
* ra-colorize.c (push_list, put_web, reset_lists, put_web_at_end,
put_move, remove_move, combine, select_spill, colorize_one_web,
try_recolor_web, insert_coalesced_conflicts, check_colors,
break_precolored_alias, restore_conflicts_from_coalesce,
sort_and_combine_web_pairs, check_uncoalesced_moves): Likewise.
* ra-rewrite.c (spill_coalescing, slots_overlap_p, emit_loads,
reloads_to_loads, rewrite_program2, emit_colors): Likewise.
* ra.c (first_hard_reg, create_insn_info, find_subweb, init_ra,
check_df): Likewise.
* real.c (do_add, do_multiply, do_divide, do_compare, do_fix_trunc,
real_arithmetic, real_compare, real_exponent, real_ldexp,
real_identical, real_to_integer, real_to_integer2, real_to_decimal,
real_to_hexadecimal, real_from_integer, ten_to_ptwo, ten_to_mptwo,
real_digit, real_nan, real_maxval, round_for_format, real_convert,
real_to_target, real_from_target, real_hash, encode_ieee_single,
encode_ieee_double, encode_ieee_extended, encode_ieee_quad,
encode_vax_f, encode_vax_d, encode_vax_g, encode_i370_single,
encode_i370_double, encode_c4x_single, encode_c4x_extended): Likewise.
* recog.c (validate_change, validate_replace_rtx_1, asm_operand_ok,
extract_insn, peep2_next_insn, peep2_reg_dead_p,
peep2_find_free_register, peephole2_optimize, store_data_bypass_p,
if_test_bypass_p): Likewise.
* reg-stack.c (record_label_references, get_asm_operand_n_inputs,
stack_result, remove_regno_note, get_hard_regnum, emit_pop_insn,
emit_swap_insn, swap_to_top, move_for_stack_reg,
subst_stack_regs_pat, subst_asm_stack_regs, change_stack,
compensate_edge, convert_regs_1): Likewise.
* regclass.c (init_reg_sets, init_reg_sets_1,
memory_move_secondary_cost): Likewise.
* regrename.c (note_sets, clear_dead_regs, scan_rtx_reg, scan_rtx):
Likewise.
* reload.c (push_secondary_reload, find_valid_class, push_reload,
operands_match_p, decompose, immune_p, find_reloads,
find_reloads_toplev, find_reloads_address_1, subst_reloads,
copy_replacements, refers_to_regno_for_reload_p,
reg_overlap_mentioned_for_reload_p): Likewise.
* reload1.c (compute_use_by_pseudos, replace_pseudos_in, reload,
count_pseudo, find_reg, eliminate_regs, eliminate_regs_in_insn,
verify_initial_elim_offsets, finish_spills, clear_reload_reg_in_use,
reload_reg_free_p, reload_reg_reaches_end_p, reloads_conflict,
choose_reload_regs, merge_assigned_reloads, emit_input_reload_insns,
do_output_reload, fixup_abnormal_edges): Likewise.
* reorg.c (stop_search_p, emit_delay_sequence, get_jump_flags,
fill_slots_from_thread, relax_delay_slots): Likewise.
* resource.c (mark_referenced_resources, mark_set_resources):
Likewise.
* rtl.c (copy_rtx, rtx_equal_p): Likewise.
* rtlanal.c (insn_dependent_p, reg_overlap_mentioned_p,
dead_or_set_p, find_reg_fusage, remove_note, replace_rtx,
subreg_lsb_1, subreg_regno_offset, subreg_offset_representable_p,
find_first_parameter_load, can_hoist_insn_p, hoist_update_store,
hoist_insn_after, hoist_insn_to_edge, nonzero_bits1): Likewise.
* rtlhooks.c (gen_lowpart_general): Likewise.
* sbitmap.c (sbitmap_difference): Likewise.
* sched-deps.c (add_dependence, sched_analyze_1, sched_analyze_2,
sched_analyze, add_forward_dependence): Likewise.
* sched-ebb.c (fix_basic_block_boundaries, schedule_ebb): Likewise.
* sched-rgn.c (debug_regions, compute_trg_info, schedule_region,
schedule_insns): Likewise.
* sched-vis.c (print_pattern): Likewise.
* sdbout.c (sdbout_symbol, sdbout_toplevel_data): Likewise.
* simplify-rtx.c (simplify_unary_operation, simplify_binary_operation,
simplify_const_relational_operation, simplify_ternary_operation,
simplify_immed_subreg, simplify_subreg, simplify_gen_subreg):
Likewise.
* sreal.c (copy, sreal_sub, sreal_div): Likewise.
* stmt.c (force_label_rtx, expand_goto, expand_asm_operands,
resolve_operand_name_1, expand_return, expand_decl,
expand_anon_union_decl, emit_case_bit_tests, expand_case): Likewise.
* stor-layout.c (put_pending_size, smallest_mode_for_size,
int_mode_for_mode, layout_decl, finish_builtin_struct, layout_type,
set_sizetype, get_mode_bounds): Likewise.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@87244 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 86 | ||||
-rw-r--r-- | gcc/ra-build.c | 199 | ||||
-rw-r--r-- | gcc/ra-colorize.c | 208 | ||||
-rw-r--r-- | gcc/ra-rewrite.c | 26 | ||||
-rw-r--r-- | gcc/ra.c | 55 | ||||
-rw-r--r-- | gcc/real.c | 117 | ||||
-rw-r--r-- | gcc/recog.c | 69 | ||||
-rw-r--r-- | gcc/reg-stack.c | 196 | ||||
-rw-r--r-- | gcc/regclass.c | 15 | ||||
-rw-r--r-- | gcc/regrename.c | 11 | ||||
-rw-r--r-- | gcc/reload.c | 424 | ||||
-rw-r--r-- | gcc/reload1.c | 105 | ||||
-rw-r--r-- | gcc/reorg.c | 21 | ||||
-rw-r--r-- | gcc/resource.c | 15 | ||||
-rw-r--r-- | gcc/rtl.c | 4 | ||||
-rw-r--r-- | gcc/rtlanal.c | 86 | ||||
-rw-r--r-- | gcc/rtlhooks.c | 11 | ||||
-rw-r--r-- | gcc/sbitmap.c | 3 | ||||
-rw-r--r-- | gcc/sched-deps.c | 57 | ||||
-rw-r--r-- | gcc/sched-ebb.c | 6 | ||||
-rw-r--r-- | gcc/sched-rgn.c | 20 | ||||
-rw-r--r-- | gcc/sched-vis.c | 3 | ||||
-rw-r--r-- | gcc/sdbout.c | 9 | ||||
-rw-r--r-- | gcc/simplify-rtx.c | 234 | ||||
-rw-r--r-- | gcc/sreal.c | 43 | ||||
-rw-r--r-- | gcc/stmt.c | 102 | ||||
-rw-r--r-- | gcc/stor-layout.c | 84 |
27 files changed, 1031 insertions, 1178 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index bf58622e8fa..43c658b00ab 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,85 @@ +2004-09-09 Giovanni Bajo <giovannibajo@gcc.gnu.org> + + * ra-build.c (copy_insn_p, remember_move, defuse_overlap_p_1, + live_out_1, prune_hardregs_for_mode, init_one_web_common, + reinit_one_web, add_subweb, add_subweb_2, init_web_parts, + record_conflict, compare_and_free_webs, init_webs_defs_uses, + parts_to_webs_1, parts_to_webs, reset_conflicts, + check_conflict_numbers, remember_web_was_spilled, handle_asm_insn, + ra_build_free): Use gcc_assert and gcc_unreachable instead of abort. + * ra-colorize.c (push_list, put_web, reset_lists, put_web_at_end, + put_move, remove_move, combine, select_spill, colorize_one_web, + try_recolor_web, insert_coalesced_conflicts, check_colors, + break_precolored_alias, restore_conflicts_from_coalesce, + sort_and_combine_web_pairs, check_uncoalesced_moves): Likewise. + * ra-rewrite.c (spill_coalescing, slots_overlap_p, emit_loads, + reloads_to_loads, rewrite_program2, emit_colors): Likewise. + * ra.c (first_hard_reg, create_insn_info, find_subweb, init_ra, + check_df): Likewise. + * real.c (do_add, do_multiply, do_divide, do_compare, do_fix_trunc, + real_arithmetic, real_compare, real_exponent, real_ldexp, + real_identical, real_to_integer, real_to_integer2, real_to_decimal, + real_to_hexadecimal, real_from_integer, ten_to_ptwo, ten_to_mptwo, + real_digit, real_nan, real_maxval, round_for_format, real_convert, + real_to_target, real_from_target, real_hash, encode_ieee_single, + encode_ieee_double, encode_ieee_extended, encode_ieee_quad, + encode_vax_f, encode_vax_d, encode_vax_g, encode_i370_single, + encode_i370_double, encode_c4x_single, encode_c4x_extended): Likewise. + * recog.c (validate_change, validate_replace_rtx_1, asm_operand_ok, + extract_insn, peep2_next_insn, peep2_reg_dead_p, + peep2_find_free_register, peephole2_optimize, store_data_bypass_p, + if_test_bypass_p): Likewise. + * reg-stack.c (record_label_references, get_asm_operand_n_inputs, + stack_result, remove_regno_note, get_hard_regnum, emit_pop_insn, + emit_swap_insn, swap_to_top, move_for_stack_reg, + subst_stack_regs_pat, subst_asm_stack_regs, change_stack, + compensate_edge, convert_regs_1): Likewise. + * regclass.c (init_reg_sets, init_reg_sets_1, + memory_move_secondary_cost): Likewise. + * regrename.c (note_sets, clear_dead_regs, scan_rtx_reg, scan_rtx): + Likewise. + * reload.c (push_secondary_reload, find_valid_class, push_reload, + operands_match_p, decompose, immune_p, find_reloads, + find_reloads_toplev, find_reloads_address_1, subst_reloads, + copy_replacements, refers_to_regno_for_reload_p, + reg_overlap_mentioned_for_reload_p): Likewise. + * reload1.c (compute_use_by_pseudos, replace_pseudos_in, reload, + count_pseudo, find_reg, eliminate_regs, eliminate_regs_in_insn, + verify_initial_elim_offsets, finish_spills, clear_reload_reg_in_use, + reload_reg_free_p, reload_reg_reaches_end_p, reloads_conflict, + choose_reload_regs, merge_assigned_reloads, emit_input_reload_insns, + do_output_reload, fixup_abnormal_edges): Likewise. + * reorg.c (stop_search_p, emit_delay_sequence, get_jump_flags, + fill_slots_from_thread, relax_delay_slots): Likewise. + * resource.c (mark_referenced_resources, mark_set_resources): + Likewise. + * rtl.c (copy_rtx, rtx_equal_p): Likewise. + * rtlanal.c (insn_dependent_p, reg_overlap_mentioned_p, + dead_or_set_p, find_reg_fusage, remove_note, replace_rtx, + subreg_lsb_1, subreg_regno_offset, subreg_offset_representable_p, + find_first_parameter_load, can_hoist_insn_p, hoist_update_store, + hoist_insn_after, hoist_insn_to_edge, nonzero_bits1): Likewise. + * rtlhooks.c (gen_lowpart_general): Likewise. + * sbitmap.c (sbitmap_difference): Likewise. + * sched-deps.c (add_dependence, sched_analyze_1, sched_analyze_2, + sched_analyze, add_forward_dependence): Likewise. + * sched-ebb.c (fix_basic_block_boundaries, schedule_ebb): Likewise. + * sched-rgn.c (debug_regions, compute_trg_info, schedule_region, + schedule_insns): Likewise. + * sched-vis.c (print_pattern): Likewise. + * sdbout.c (sdbout_symbol, sdbout_toplevel_data): Likewise. + * simplify-rtx.c (simplify_unary_operation, simplify_binary_operation, + simplify_const_relational_operation, simplify_ternary_operation, + simplify_immed_subreg, simplify_subreg, simplify_gen_subreg): + Likewise. + * sreal.c (copy, sreal_sub, sreal_div): Likewise. + * stmt.c (force_label_rtx, expand_goto, expand_asm_operands, + resolve_operand_name_1, expand_return, expand_decl, + expand_anon_union_decl, emit_case_bit_tests, expand_case): Likewise. + * stor-layout.c (put_pending_size, smallest_mode_for_size, + int_mode_for_mode, layout_decl, finish_builtin_struct, layout_type, + set_sizetype, get_mode_bounds): Likewise. + 2004-09-09 Zack Weinberg <zack@codesourcery.com> * defaults.h (MULTIPLE_SYMBOL_SPACES): Provide default. @@ -16,7 +98,7 @@ * config/ia64/ia64.c (ia64_gimplify_va_arg): Ditto. * tree.h: Declare new function. -2004-09-08 Nathan Sidwell <nathan@codesourcery.com> +2004-09-09 Nathan Sidwell <nathan@codesourcery.com> * cgraphunit.c (cgraph_mark_functions_to_output): Renable node dumping for development builds. @@ -27,7 +109,7 @@ * tree.c (iterative_hash_expr): Replace gcc_unreachable with gcc_assert. -2004-09-08 Nathan Sidwell <nathan@codesourcery.com> +2004-09-09 Nathan Sidwell <nathan@codesourcery.com> * gcse.c (INSN_CUID, insert_set_in_table, find_avail_set, cprop_insn, do_local_cprop, local_cprop_pass, find_bypass_set, diff --git a/gcc/ra-build.c b/gcc/ra-build.c index 63fb24e0597..130b37606a2 100644 --- a/gcc/ra-build.c +++ b/gcc/ra-build.c @@ -228,8 +228,7 @@ copy_insn_p (rtx insn, rtx *source, rtx *target) unsigned int d_regno, s_regno; int uid = INSN_UID (insn); - if (!INSN_P (insn)) - abort (); + gcc_assert (INSN_P (insn)); /* First look, if we already saw this insn. */ if (copy_cache[uid].seen) @@ -541,24 +540,25 @@ remember_move (rtx insn) if (!TEST_BIT (move_handled, INSN_UID (insn))) { rtx s, d; + int ret; + struct df_link *slink = DF_INSN_USES (df, insn); + struct df_link *link = DF_INSN_DEFS (df, insn); + SET_BIT (move_handled, INSN_UID (insn)); - if (copy_insn_p (insn, &s, &d)) - { - /* Some sanity test for the copy insn. */ - struct df_link *slink = DF_INSN_USES (df, insn); - struct df_link *link = DF_INSN_DEFS (df, insn); - if (!link || !link->ref || !slink || !slink->ref) - abort (); - /* The following (link->next != 0) happens when a hardreg - is used in wider mode (REG:DI %eax). Then df.* creates - a def/use for each hardreg contained therein. We only - allow hardregs here. */ - if (link->next - && DF_REF_REGNO (link->next->ref) >= FIRST_PSEUDO_REGISTER) - abort (); - } - else - abort (); + ret = copy_insn_p (insn, &s, &d); + gcc_assert (ret); + + /* Some sanity test for the copy insn. */ + gcc_assert (link && link->ref); + gcc_assert (slink && slink->ref); + /* The following (link->next != 0) happens when a hardreg + is used in wider mode (REG:DI %eax). Then df.* creates + a def/use for each hardreg contained therein. We only + allow hardregs here. */ + gcc_assert (!link->next + || DF_REF_REGNO (link->next->ref) + < FIRST_PSEUDO_REGISTER); + /* XXX for now we don't remember move insns involving any subregs. Those would be difficult to coalesce (we would need to implement handling of all the subwebs in the allocator, including that such @@ -669,7 +669,7 @@ defuse_overlap_p_1 (rtx def, struct curr_use *use) return (old_u != use->undefined) ? 4 : -1; } default: - abort (); + gcc_unreachable (); } } @@ -821,8 +821,7 @@ live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn) { /* If this insn doesn't completely define the USE, increment also it's spanned deaths count (if this insn contains a death). */ - if (uid >= death_insns_max_uid) - abort (); + gcc_assert (uid < death_insns_max_uid); if (TEST_BIT (insns_with_deaths, uid)) wp->spanned_deaths++; use->undefined = final_undef; @@ -1206,8 +1205,7 @@ prune_hardregs_for_mode (HARD_REG_SET *s, enum machine_mode mode) static void init_one_web_common (struct web *web, rtx reg) { - if (!REG_P (reg)) - abort (); + gcc_assert (REG_P (reg)); /* web->id isn't initialized here. */ web->regno = REGNO (reg); web->orig_x = reg; @@ -1272,8 +1270,7 @@ init_one_web_common (struct web *web, rtx reg) #endif web->num_freedom = hard_regs_count (web->usable_regs); web->num_freedom -= web->add_hardregs; - if (!web->num_freedom) - abort(); + gcc_assert (web->num_freedom); } COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs); } @@ -1324,10 +1321,8 @@ reinit_one_web (struct web *web, rtx reg) web->stack_slot = NULL; web->pattern = NULL; web->alias = NULL; - if (web->moves) - abort (); - if (!web->useless_conflicts) - abort (); + gcc_assert (!web->moves); + gcc_assert (web->useless_conflicts); } /* Insert and returns a subweb corresponding to REG into WEB (which @@ -1337,8 +1332,7 @@ static struct web * add_subweb (struct web *web, rtx reg) { struct web *w; - if (GET_CODE (reg) != SUBREG) - abort (); + gcc_assert (GET_CODE (reg) == SUBREG); w = xmalloc (sizeof (struct web)); /* Copy most content from parent-web. */ *w = *web; @@ -1376,8 +1370,7 @@ add_subweb_2 (struct web *web, unsigned int size_word) mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0); if (mode == BLKmode) mode = mode_for_size (size, MODE_INT, 0); - if (mode == BLKmode) - abort (); + gcc_assert (mode != BLKmode); web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x, BYTE_BEGIN (size_word))); web->artificial = 1; @@ -1396,8 +1389,7 @@ init_web_parts (struct df *df) { if (df->defs[no]) { - if (no < last_def_id && web_parts[no].ref != df->defs[no]) - abort (); + gcc_assert (no >= last_def_id || web_parts[no].ref == df->defs[no]); web_parts[no].ref = df->defs[no]; /* Uplink might be set from the last iteration. */ if (!web_parts[no].uplink) @@ -1414,9 +1406,8 @@ init_web_parts (struct df *df) { if (df->uses[no]) { - if (no < last_use_id - && web_parts[no + df->def_id].ref != df->uses[no]) - abort (); + gcc_assert (no >= last_use_id + || web_parts[no + df->def_id].ref == df->uses[no]); web_parts[no + df->def_id].ref = df->uses[no]; if (!web_parts[no + df->def_id].uplink) num_webs++; @@ -1464,8 +1455,8 @@ static void copy_conflict_list (struct web *web) { struct conflict_link *cl; - if (web->orig_conflict_list || web->have_orig_conflicts) - abort (); + gcc_assert (!web->orig_conflict_list); + gcc_assert (!web->have_orig_conflicts); web->have_orig_conflicts = 1; for (cl = web->conflict_list; cl; cl = cl->next) { @@ -1572,8 +1563,7 @@ record_conflict (struct web *web1, struct web *web2) /* Trivial non-conflict or already recorded conflict. */ if (web1 == web2 || TEST_BIT (igraph, index)) return; - if (id1 == id2) - abort (); + gcc_assert (id1 != id2); /* As fixed_regs are no targets for allocation, conflicts with them are pointless. */ if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno]) @@ -1663,32 +1653,27 @@ compare_and_free_webs (struct web_link **link) { struct web *web1 = wl->web; struct web *web2 = ID2WEB (web1->id); - if (web1->regno != web2->regno - || web1->mode_changed != web2->mode_changed - || !rtx_equal_p (web1->orig_x, web2->orig_x) - || web1->type != web2->type - /* Only compare num_defs/num_uses with non-hardreg webs. - E.g. the number of uses of the framepointer changes due to - inserting spill code. */ - || (web1->type != PRECOLORED - && (web1->num_uses != web2->num_uses - || web1->num_defs != web2->num_defs)) - /* Similarly, if the framepointer was unreferenced originally - but we added spills, these fields may not match. */ - || (web1->type != PRECOLORED - && web1->crosses_call != web2->crosses_call) - || (web1->type != PRECOLORED - && web1->live_over_abnormal != web2->live_over_abnormal)) - abort (); + gcc_assert (web1->regno == web2->regno); + gcc_assert (web1->mode_changed == web2->mode_changed); + gcc_assert (rtx_equal_p (web1->orig_x, web2->orig_x)); + gcc_assert (web1->type == web2->type); if (web1->type != PRECOLORED) { unsigned int i; + + /* Only compare num_defs/num_uses with non-hardreg webs. + E.g. the number of uses of the framepointer changes due to + inserting spill code. */ + gcc_assert (web1->num_uses == web2->num_uses); + gcc_assert (web1->num_defs == web2->num_defs); + /* Similarly, if the framepointer was unreferenced originally + but we added spills, these fields may not match. */ + gcc_assert (web1->crosses_call == web2->crosses_call); + gcc_assert (web1->live_over_abnormal == web2->live_over_abnormal); for (i = 0; i < web1->num_defs; i++) - if (web1->defs[i] != web2->defs[i]) - abort (); + gcc_assert (web1->defs[i] == web2->defs[i]); for (i = 0; i < web1->num_uses; i++) - if (web1->uses[i] != web2->uses[i]) - abort (); + gcc_assert (web1->uses[i] == web2->uses[i]); } if (web1->type == PRECOLORED) { @@ -1733,8 +1718,8 @@ init_webs_defs_uses (void) web->uses[use_i++] = link->ref; } web->temp_refs = NULL; - if (def_i != web->num_defs || use_i != web->num_uses) - abort (); + gcc_assert (def_i == web->num_defs); + gcc_assert (use_i == web->num_uses); } } @@ -1834,11 +1819,13 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs, web->id = newid; web->temp_refs = NULL; webnum++; - if (web->regno < FIRST_PSEUDO_REGISTER && !hardreg2web[web->regno]) - hardreg2web[web->regno] = web; - else if (web->regno < FIRST_PSEUDO_REGISTER - && hardreg2web[web->regno] != web) - abort (); + if (web->regno < FIRST_PSEUDO_REGISTER) + { + if (!hardreg2web[web->regno]) + hardreg2web[web->regno] = web; + else + gcc_assert (hardreg2web[web->regno] == web); + } } /* If this reference already had a web assigned, we are done. @@ -1861,8 +1848,8 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs, web->live_over_abnormal = 1; /* And check, that it's not a newly allocated web. This would be an inconsistency. */ - if (!web->old_web || web->type == PRECOLORED) - abort (); + gcc_assert (web->old_web); + gcc_assert (web->type != PRECOLORED); continue; } /* In case this was no web part root, we need to initialize WEB @@ -1884,8 +1871,7 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs, /* And the test, that if def2web[i] was NULL above, that we are _not_ an old web. */ - if (web->old_web && web->type != PRECOLORED) - abort (); + gcc_assert (!web->old_web || web->type == PRECOLORED); /* Possible create a subweb, if this ref was a subreg. */ if (GET_CODE (reg) == SUBREG) @@ -1894,8 +1880,7 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs, if (!subweb) { subweb = add_subweb (web, reg); - if (web->old_web) - abort (); + gcc_assert (!web->old_web); } } else @@ -1917,14 +1902,9 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs, { struct web *compare = def2web[i]; if (i < last_def_id) - { - if (web->old_web && compare != subweb) - abort (); - } - if (!web->old_web && compare) - abort (); - if (compare && compare != subweb) - abort (); + gcc_assert (!web->old_web || compare == subweb); + gcc_assert (web->old_web || !compare); + gcc_assert (!compare || compare == subweb); } def2web[i] = subweb; web->num_defs++; @@ -1934,15 +1914,11 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs, if (ra_pass > 1) { struct web *compare = use2web[ref_id]; - if (ref_id < last_use_id) - { - if (web->old_web && compare != subweb) - abort (); - } - if (!web->old_web && compare) - abort (); - if (compare && compare != subweb) - abort (); + + gcc_assert (ref_id >= last_use_id + || !web->old_web || compare == subweb); + gcc_assert (web->old_web || !compare); + gcc_assert (!compare || compare == subweb); } use2web[ref_id] = subweb; web->num_uses++; @@ -1952,8 +1928,7 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs, } /* We better now have exactly as many webs as we had web part roots. */ - if (webnum != num_webs) - abort (); + gcc_assert (webnum == num_webs); return webnum; } @@ -2001,8 +1976,7 @@ parts_to_webs (struct df *df) struct web *web; if (wp->uplink || !wp->ref) { - if (wp->sub_conflicts) - abort (); + gcc_assert (!wp->sub_conflicts); continue; } web = def2web[i]; @@ -2087,8 +2061,7 @@ reset_conflicts (void) web->conflict_list = web->orig_conflict_list; web->orig_conflict_list = NULL; } - if (web->orig_conflict_list) - abort (); + gcc_assert (!web->orig_conflict_list); /* New non-precolored webs, have no conflict list. */ if (web->type != PRECOLORED && !web->old_web) @@ -2097,8 +2070,7 @@ reset_conflicts (void) /* Useless conflicts will be rebuilt completely. But check for cleanliness, as the web might have come from the free list. */ - if (bitmap_first_set_bit (web->useless_conflicts) >= 0) - abort (); + gcc_assert (bitmap_first_set_bit (web->useless_conflicts) < 0); } else { @@ -2153,8 +2125,7 @@ check_conflict_numbers (void) for (cl = web->conflict_list; cl; cl = cl->next) if (cl->t->type != SELECT && cl->t->type != COALESCED) new_conf += 1 + cl->t->add_hardregs; - if (web->type != PRECOLORED && new_conf != web->num_conflicts) - abort (); + gcc_assert (web->type == PRECOLORED || new_conf == web->num_conflicts); } } #endif @@ -2316,8 +2287,7 @@ remember_web_was_spilled (struct web *web) AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs); #endif web->num_freedom = hard_regs_count (web->usable_regs); - if (!web->num_freedom) - abort(); + gcc_assert (web->num_freedom); COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs); /* Now look for a class, which is subset of our constraints, to setup add_hardregs, and regclass for debug output. */ @@ -2345,8 +2315,7 @@ remember_web_was_spilled (struct web *web) web->add_hardregs = CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1; web->num_freedom -= web->add_hardregs; - if (!web->num_freedom) - abort(); + gcc_assert (web->num_freedom); adjust -= 0 * web->add_hardregs; web->num_conflicts -= adjust; } @@ -2853,10 +2822,8 @@ handle_asm_insn (struct df *df, rtx insn) link = link->next; if (!link || !link->ref) { - if (in_output) - in_output = 0; - else - abort (); + gcc_assert (in_output); + in_output = 0; } else break; @@ -3124,11 +3091,9 @@ ra_build_free (void) for (i = 0; i < num_webs; i++) { struct web *web = ID2WEB (i); - if (!web) - abort (); - if (i >= num_webs - num_subwebs - && (web->conflict_list || web->orig_conflict_list)) - abort (); + gcc_assert (web); + gcc_assert (i < num_webs - num_subwebs + || (!web->conflict_list && !web->orig_conflict_list)); web->moves = NULL; } /* All webs in the free list have no defs or uses anymore. */ diff --git a/gcc/ra-colorize.c b/gcc/ra-colorize.c index e3118a0cdac..0fe848af0f1 100644 --- a/gcc/ra-colorize.c +++ b/gcc/ra-colorize.c @@ -105,8 +105,8 @@ static struct dlist *mv_frozen, *mv_active; static void push_list (struct dlist *x, struct dlist **list) { - if (x->next || x->prev) - abort (); + gcc_assert (!x->next); + gcc_assert (!x->prev); x->next = *list; if (*list) (*list)->prev = x; @@ -116,8 +116,8 @@ push_list (struct dlist *x, struct dlist **list) static void push_list_end (struct dlist *x, struct dlist **list) { - if (x->prev || x->next) - abort (); + gcc_assert (!x->prev); + gcc_assert (!x->next); if (!*list) { *list = x; @@ -195,7 +195,7 @@ put_web (struct web *web, enum ra_node_type type) push_list (web->dlink, &WEBS(SIMPLIFY)); break; default: - abort (); + gcc_unreachable (); } web->type = type; } @@ -211,9 +211,13 @@ reset_lists (void) { struct dlist *d; unsigned int i; - if (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_SPILL) || WEBS(SIMPLIFY_FAT) - || WEBS(FREEZE) || WEBS(SPILL) || WEBS(SELECT)) - abort (); + + gcc_assert (!WEBS(SIMPLIFY)); + gcc_assert (!WEBS(SIMPLIFY_SPILL)); + gcc_assert (!WEBS(SIMPLIFY_FAT)); + gcc_assert (!WEBS(FREEZE)); + gcc_assert (!WEBS(SPILL)); + gcc_assert (!WEBS(SELECT)); while ((d = pop_list (&WEBS(COALESCED))) != NULL) { @@ -243,13 +247,16 @@ reset_lists (void) web->useless_conflicts = NULL; } - /* Sanity check, that we only have free, initial or precolored webs. */ +#ifdef ENABLE_CHECKING + /* Sanity check, that we only have free, initial or precolored webs. */ for (i = 0; i < num_webs; i++) { struct web *web = ID2WEB (i); - if (web->type != INITIAL && web->type != FREE && web->type != PRECOLORED) - abort (); + + gcc_assert (web->type == INITIAL || web->type == FREE + || web->type == PRECOLORED); } +#endif free_dlist (&mv_worklist); free_dlist (&mv_coalesced); free_dlist (&mv_constrained); @@ -265,8 +272,8 @@ put_web_at_end (struct web *web, enum ra_node_type type) { if (type == PRECOLORED) type = INITIAL; - else if (type == SIMPLIFY) - abort (); + else + gcc_assert (type != SIMPLIFY); push_list_end (web->dlink, &WEBS(type)); web->type = type; } @@ -306,7 +313,7 @@ put_move (struct move *move, enum move_type type) push_list (move->dlink, &mv_active); break; default: - abort (); + gcc_unreachable (); } move->type = type; } @@ -501,8 +508,7 @@ remove_move (struct web *web, struct move *move) struct move_list *ml; remove_move_1 (web, move); for (ml = web->moves; ml; ml = ml->next) - if (ml->move == move) - abort (); + gcc_assert (ml->move != move); } /* Merge the moves for the two webs into the first web's movelist. */ @@ -696,10 +702,10 @@ combine (struct web *u, struct web *v) { int i; struct conflict_link *wl; - if (u == v || v->type == COALESCED) - abort (); - if ((u->regno >= max_normal_pseudo) != (v->regno >= max_normal_pseudo)) - abort (); + gcc_assert (u != v); + gcc_assert (v->type != COALESCED); + gcc_assert ((u->regno >= max_normal_pseudo) + == (v->regno >= max_normal_pseudo)); remove_web_from_list (v); put_web (v, COALESCED); v->alias = u; @@ -793,10 +799,9 @@ combine (struct web *u, struct web *v) conflicts. */ u->num_freedom = hard_regs_count (u->usable_regs); u->num_freedom -= u->add_hardregs; - /* The next would mean an invalid coalesced move (both webs have no - possible hardreg in common), so abort. */ - if (!u->num_freedom) - abort(); + /* The next checks for an invalid coalesced move (both webs must have + possible hardregs in common). */ + gcc_assert (u->num_freedom); if (u->num_conflicts >= NUM_REGS (u) && (u->type == FREEZE || simplify_p (u->type))) @@ -970,8 +975,7 @@ select_spill (void) bestd = bestd2; best = best2; } - if (!bestd) - abort (); + gcc_assert (bestd); /* Note the potential spill. */ DLIST_WEB (bestd)->was_spilled = 1; @@ -1429,7 +1433,7 @@ colorize_one_web (struct web *web, int hard) if (c < 0) { /* Guard against a simplified node being spilled. */ - /* Don't abort. This can happen, when e.g. enough registers + /* Don't assert. This can happen, when e.g. enough registers are available in colors, but they are not consecutive. This is a very serious issue if this web is a short live one, because even if we spill this one here, the situation won't become better @@ -1440,8 +1444,7 @@ colorize_one_web (struct web *web, int hard) again. That's why we try to find a neighbor, which spans more instructions that ourself, and got a color, and try to spill _that_. - if (DLIST_WEB (d)->was_spilled < 0) - abort (); */ + gcc_assert (DLIST_WEB (d)->was_spilled >= 0); */ if (hard && (!web->was_spilled || web->spill_temp)) { unsigned int loop; @@ -1536,8 +1539,7 @@ colorize_one_web (struct web *web, int hard) int old_c = try->color; if (try->type == COALESCED) { - if (alias (try)->type != PRECOLORED) - abort (); + gcc_assert (alias (try)->type == PRECOLORED); ra_debug_msg (DUMP_COLORIZE, " breaking alias %d -> %d\n", try->id, alias (try)->id); break_precolored_alias (try); @@ -1800,9 +1802,8 @@ try_recolor_web (struct web *web) above what happens, when wide webs are involved, and why in that case there might actually be some webs spilled although thought to be colorable. */ - if (cost > cost_neighbors[newcol] - && nregs == 1 && !TEST_HARD_REG_BIT (wide_seen, newcol)) - abort (); + gcc_assert (cost <= cost_neighbors[newcol] + || nregs != 1 || TEST_HARD_REG_BIT (wide_seen, newcol)); /* But if the new spill-cost is higher than our own, then really loose. Respill us and recolor neighbors as before. */ if (cost > web->spill_cost) @@ -1817,26 +1818,29 @@ try_recolor_web (struct web *web) struct web *web2 = alias (wl->t); if (old_colors[web2->id]) { - if (web2->type == SPILLED) + switch (web2->type) { + case SPILLED: remove_list (web2->dlink, &WEBS(SPILLED)); web2->color = old_colors[web2->id] - 1; put_web (web2, COLORED); + break; + case COLORED: + web2->color = old_colors[web2->id] - 1; + break; + case SELECT: + /* This means, that WEB2 once was a part of a coalesced + web, which got spilled in the above colorize_one_web() + call, and whose parts then got split and put back + onto the SELECT stack. As the cause for that splitting + (the coloring of WEB) was worthless, we should again + coalesce the parts, as they were before. For now we + simply leave them SELECTed, for our caller to take + care. */ + break; + default: + gcc_unreachable (); } - else if (web2->type == COLORED) - web2->color = old_colors[web2->id] - 1; - else if (web2->type == SELECT) - /* This means, that WEB2 once was a part of a coalesced - web, which got spilled in the above colorize_one_web() - call, and whose parts then got split and put back - onto the SELECT stack. As the cause for that splitting - (the coloring of WEB) was worthless, we should again - coalesce the parts, as they were before. For now we - simply leave them SELECTed, for our caller to take - care. */ - ; - else - abort (); } } } @@ -1878,16 +1882,16 @@ insert_coalesced_conflicts (void) when first some webs were coalesced and conflicts propagated, then some combining narrowed usable_regs and further coalescing ignored those conflicts. Now there are - some edges to COALESCED webs but not to it's alias. - So abort only when they really should conflict. */ - if ((!(tweb->type == PRECOLORED - || TEST_BIT (sup_igraph, tweb->id * num_webs + wl->t->id)) - || !(wl->t->type == PRECOLORED - || TEST_BIT (sup_igraph, - wl->t->id * num_webs + tweb->id))) - && hard_regs_intersect_p (&tweb->usable_regs, - &wl->t->usable_regs)) - abort (); + some edges to COALESCED webs but not to its alias. + So assert they really don not conflict. */ + gcc_assert (((tweb->type == PRECOLORED + || TEST_BIT (sup_igraph, + tweb->id * num_webs + wl->t->id)) + && (wl->t->type == PRECOLORED + || TEST_BIT (sup_igraph, + wl->t->id * num_webs + tweb->id))) + || !hard_regs_intersect_p (&tweb->usable_regs, + &wl->t->usable_regs)); /*if (wl->sub == NULL) record_conflict (tweb, wl->t); else @@ -1970,18 +1974,32 @@ check_colors (void) struct web *aweb = alias (web); struct conflict_link *wl; int nregs, c; - if (aweb->type == SPILLED || web->regno >= max_normal_pseudo) + + if (web->regno >= max_normal_pseudo) continue; - else if (aweb->type == COLORED) - nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)]; - else if (aweb->type == PRECOLORED) - nregs = 1; - else - abort (); + + switch (aweb->type) + { + case SPILLED: + continue; + + case COLORED: + nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)]; + break; + + case PRECOLORED: + nregs = 1; + break; + + default: + gcc_unreachable (); + } + +#ifdef ENABLE_CHECKING /* The color must be valid for the original usable_regs. */ for (c = 0; c < nregs; c++) - if (!TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c)) - abort (); + gcc_assert (TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c)); +#endif /* Search the original (pre-coalesce) conflict list. In the current one some imprecise conflicts may be noted (due to combine() or insert_coalesced_conflicts() relocating partial conflicts) making @@ -2002,10 +2020,9 @@ check_colors (void) nregs2 = 1; else continue; - if (aweb->color >= web2->color + nregs2 - || web2->color >= aweb->color + nregs) - continue; - abort (); + gcc_assert (aweb->color >= web2->color + nregs2 + || web2->color >= aweb->color + nregs); + continue; } else { @@ -2026,10 +2043,9 @@ check_colors (void) && GET_MODE_SIZE (GET_MODE (sl->s->orig_x)) >= UNITS_PER_WORD) sofs = (SUBREG_BYTE (sl->s->orig_x) / UNITS_PER_WORD); - if ((tcol + tofs >= scol + sofs + ssize) - || (scol + sofs >= tcol + tofs + tsize)) - continue; - abort (); + gcc_assert ((tcol + tofs >= scol + sofs + ssize) + || (scol + sofs >= tcol + tofs + tsize)); + continue; } } } @@ -2070,8 +2086,7 @@ static void break_aliases_to_web (struct web *web) { struct dlist *d, *d_next; - if (web->type != SPILLED) - abort (); + gcc_assert (web->type == SPILLED); for (d = WEBS(COALESCED); d; d = d_next) { struct web *other = DLIST_WEB (d); @@ -2115,8 +2130,7 @@ break_precolored_alias (struct web *web) struct conflict_link *wl; unsigned int c = pre->color; unsigned int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)]; - if (pre->type != PRECOLORED) - abort (); + gcc_assert (pre->type == PRECOLORED); unalias_web (web); /* Now we need to look at each conflict X of WEB, if it conflicts with [PRE, PRE+nregs), and remove such conflicts, of X has not other @@ -2208,13 +2222,12 @@ restore_conflicts_from_coalesce (struct web *web) struct sub_conflict *sl; wl = *pcl; *pcl = wl->next; - if (!other->have_orig_conflicts && other->type != PRECOLORED) - abort (); + gcc_assert (other->have_orig_conflicts + || other->type == PRECOLORED); for (owl = other->orig_conflict_list; owl; owl = owl->next) if (owl->t == web) break; - if (owl) - abort (); + gcc_assert (!owl); opcl = &(other->conflict_list); while (*opcl) { @@ -2229,8 +2242,7 @@ restore_conflicts_from_coalesce (struct web *web) opcl = &((*opcl)->next); } } - if (!owl && other->type != PRECOLORED) - abort (); + gcc_assert (owl || other->type == PRECOLORED); /* wl and owl contain the edge data to be deleted. */ RESET_BIT (sup_igraph, web->id * num_webs + other->id); RESET_BIT (sup_igraph, other->id * num_webs + web->id); @@ -2429,8 +2441,7 @@ sort_and_combine_web_pairs (int for_move) sorted = xmalloc (num_web_pairs * sizeof (sorted[0])); for (p = web_pair_list, i = 0; p; p = p->next_list) sorted[i++] = p; - if (i != num_web_pairs) - abort (); + gcc_assert (i == num_web_pairs); qsort (sorted, num_web_pairs, sizeof (sorted[0]), comp_web_pairs); /* After combining one pair, we actually should adjust the savings @@ -2624,17 +2635,16 @@ check_uncoalesced_moves (void) s = t; t = h; } - if (s != t - && m->type != CONSTRAINED - /* Following can happen when a move was coalesced, but later - broken up again. Then s!=t, but m is still MV_COALESCED. */ - && m->type != MV_COALESCED - && t->type != PRECOLORED - && ((s->type == PRECOLORED && ok (t, s)) - || s->type != PRECOLORED) - && !TEST_BIT (sup_igraph, s->id * num_webs + t->id) - && !TEST_BIT (sup_igraph, t->id * num_webs + s->id)) - abort (); + gcc_assert (s == t + || m->type == CONSTRAINED + /* Following can happen when a move was coalesced, but + later broken up again. Then s!=t, but m is still + MV_COALESCED. */ + || m->type == MV_COALESCED + || t->type == PRECOLORED + || (s->type == PRECOLORED && !ok (t, s)) + || TEST_BIT (sup_igraph, s->id * num_webs + t->id) + || TEST_BIT (sup_igraph, t->id * num_webs + s->id)); } } diff --git a/gcc/ra-rewrite.c b/gcc/ra-rewrite.c index 8a418ac0ec0..adc39831686 100644 --- a/gcc/ra-rewrite.c +++ b/gcc/ra-rewrite.c @@ -119,8 +119,8 @@ spill_coalescing (sbitmap coalesce, sbitmap spilled) T from the web which was coalesced into T, which at the time of combine() were not already on the SELECT stack or were itself coalesced to something other. */ - if (t->type != SPILLED || s->type != SPILLED) - abort (); + gcc_assert (t->type == SPILLED + && s->type == SPILLED); remove_list (t->dlink, &WEBS(SPILLED)); put_web (t, COALESCED); t->alias = s; @@ -570,8 +570,7 @@ slots_overlap_p (rtx s1, rtx s2) return 0; return 1; } - if (!MEM_P (s1) || GET_CODE (s2) != MEM) - abort (); + gcc_assert (MEM_P (s1) && GET_CODE (s2) == MEM); s1 = XEXP (s1, 0); s2 = XEXP (s2, 0); if (GET_CODE (s1) != PLUS || !REG_P (XEXP (s1, 0)) @@ -873,8 +872,7 @@ emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn) if (!web) continue; supweb = find_web_for_subweb (web); - if (supweb->regno >= max_normal_pseudo) - abort (); + gcc_assert (supweb->regno < max_normal_pseudo); /* Check for web being a spilltemp, if we only want to load spilltemps. Also remember, that we emitted that load, which we don't need to do when we have a death, @@ -900,14 +898,12 @@ emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn) (at least then disallow spilling them, which we already ensure when flag_ra_break_aliases), or not take the pattern but a stackslot. */ - if (aweb != supweb) - abort (); + gcc_assert (aweb == supweb); slot = copy_rtx (supweb->pattern); reg = copy_rtx (supweb->orig_x); /* Sanity check. orig_x should be a REG rtx, which should be shared over all RTL, so copy_rtx should have no effect. */ - if (reg != supweb->orig_x) - abort (); + gcc_assert (reg == supweb->orig_x); } else { @@ -1022,8 +1018,7 @@ reloads_to_loads (struct rewrite_info *ri, struct ref **refs, { struct web *web2 = ID2WEB (j); struct web *aweb2 = alias (find_web_for_subweb (web2)); - if (spill_is_free (&(ri->colors_in_use), aweb2) == 0) - abort (); + gcc_assert (spill_is_free (&(ri->colors_in_use), aweb2) != 0); if (spill_same_color_p (supweb, aweb2) /* && interfere (web, web2) */) { @@ -1396,8 +1391,7 @@ rewrite_program2 (bitmap new_deaths) ri.need_load = 1; emit_loads (&ri, nl_first_reload, last_block_insn); - if (ri.nl_size != 0 /*|| ri.num_reloads != 0*/) - abort (); + gcc_assert (ri.nl_size == 0); if (!insn) break; } @@ -1677,8 +1671,8 @@ emit_colors (struct df *df) continue; if (web->type == COALESCED && alias (web)->type == COLORED) continue; - if (web->reg_rtx || web->regno < FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (!web->reg_rtx); + gcc_assert (web->regno >= FIRST_PSEUDO_REGISTER); if (web->regno >= max_normal_pseudo) { @@ -221,9 +221,11 @@ static int first_hard_reg (HARD_REG_SET rs) { int c; - for (c = 0; c < FIRST_PSEUDO_REGISTER && !TEST_HARD_REG_BIT (rs, c); c++) - if (c == FIRST_PSEUDO_REGISTER) - abort(); + + for (c = 0; c < FIRST_PSEUDO_REGISTER; c++) + if (TEST_HARD_REG_BIT (rs, c)) + break; + gcc_assert (c < FIRST_PSEUDO_REGISTER); return c; } @@ -291,8 +293,7 @@ create_insn_info (struct df *df) act_refs += n; insn_df[uid].num_uses = n; } - if (refs_for_insn_df + (df->def_id + df->use_id) < act_refs) - abort (); + gcc_assert (refs_for_insn_df + (df->def_id + df->use_id) >= act_refs); } /* Free the insn_df structures. */ @@ -315,8 +316,7 @@ struct web * find_subweb (struct web *web, rtx reg) { struct web *w; - if (GET_CODE (reg) != SUBREG) - abort (); + gcc_assert (GET_CODE (reg) == SUBREG); for (w = web->subreg_next; w; w = w->subreg_next) if (GET_MODE (w->orig_x) == GET_MODE (reg) && SUBREG_BYTE (w->orig_x) == SUBREG_BYTE (reg)) @@ -577,8 +577,7 @@ init_ra (void) an_unusable_color++) if (TEST_HARD_REG_BIT (never_use_colors, an_unusable_color)) break; - if (an_unusable_color == FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (an_unusable_color != FIRST_PSEUDO_REGISTER); orig_max_uid = get_max_uid (); compute_bb_for_insn (); @@ -589,7 +588,7 @@ init_ra (void) gcc_obstack_init (&ra_obstack); } -/* Check the consistency of DF. This aborts if it violates some +/* Check the consistency of DF. This asserts if it violates some invariances we expect. */ static void @@ -620,19 +619,21 @@ check_df (struct df *df) { bitmap_clear (b); for (link = DF_INSN_DEFS (df, insn); link; link = link->next) - if (!link->ref || bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)) - || bitmap_bit_p (b, DF_REF_ID (link->ref))) - abort (); - else + { + gcc_assert (link->ref); + gcc_assert (!bitmap_bit_p (empty_defs, DF_REF_ID (link->ref))); + gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref))); bitmap_set_bit (b, DF_REF_ID (link->ref)); + } bitmap_clear (b); for (link = DF_INSN_USES (df, insn); link; link = link->next) - if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)) - || bitmap_bit_p (b, DF_REF_ID (link->ref))) - abort (); - else + { + gcc_assert (link->ref); + gcc_assert (!bitmap_bit_p (empty_uses, DF_REF_ID (link->ref))); + gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref))); bitmap_set_bit (b, DF_REF_ID (link->ref)); + } } /* Now the same for the chains per register number. */ @@ -640,19 +641,21 @@ check_df (struct df *df) { bitmap_clear (b); for (link = df->regs[regno].defs; link; link = link->next) - if (!link->ref || bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)) - || bitmap_bit_p (b, DF_REF_ID (link->ref))) - abort (); - else + { + gcc_assert (link->ref); + gcc_assert (!bitmap_bit_p (empty_defs, DF_REF_ID (link->ref))); + gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref))); bitmap_set_bit (b, DF_REF_ID (link->ref)); + } bitmap_clear (b); for (link = df->regs[regno].uses; link; link = link->next) - if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)) - || bitmap_bit_p (b, DF_REF_ID (link->ref))) - abort (); - else + { + gcc_assert (link->ref); + gcc_assert (!bitmap_bit_p (empty_uses, DF_REF_ID (link->ref))); + gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref))); bitmap_set_bit (b, DF_REF_ID (link->ref)); + } } BITMAP_XFREE (empty_uses); diff --git a/gcc/real.c b/gcc/real.c index 2eb2019399f..871fae73f34 100644 --- a/gcc/real.c +++ b/gcc/real.c @@ -577,7 +577,7 @@ do_add (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, break; default: - abort (); + gcc_unreachable (); } /* Swap the arguments such that A has the larger exponent. */ @@ -708,7 +708,7 @@ do_multiply (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, break; default: - abort (); + gcc_unreachable (); } if (r == a || r == b) @@ -850,7 +850,7 @@ do_divide (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, break; default: - abort (); + gcc_unreachable (); } if (r == a || r == b) @@ -929,7 +929,7 @@ do_compare (const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b, break; default: - abort (); + gcc_unreachable (); } if (a->sign != b->sign) @@ -967,7 +967,7 @@ do_fix_trunc (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a) break; default: - abort (); + gcc_unreachable (); } } @@ -1031,7 +1031,7 @@ real_arithmetic (REAL_VALUE_TYPE *r, int icode, const REAL_VALUE_TYPE *op0, break; default: - abort (); + gcc_unreachable (); } } @@ -1084,7 +1084,7 @@ real_compare (int icode, const REAL_VALUE_TYPE *op0, return do_compare (op0, op1, 0) != 0; default: - abort (); + gcc_unreachable (); } } @@ -1103,7 +1103,7 @@ real_exponent (const REAL_VALUE_TYPE *r) case rvc_normal: return REAL_EXP (r); default: - abort (); + gcc_unreachable (); } } @@ -1131,7 +1131,7 @@ real_ldexp (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *op0, int exp) break; default: - abort (); + gcc_unreachable (); } } @@ -1199,7 +1199,7 @@ real_identical (const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b) break; default: - abort (); + gcc_unreachable (); } for (i = 0; i < SIGSZ; ++i) @@ -1279,14 +1279,13 @@ real_to_integer (const REAL_VALUE_TYPE *r) if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG) i = r->sig[SIGSZ-1]; - else if (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG) + else { + gcc_assert (HOST_BITS_PER_WIDE_INT == 2 * HOST_BITS_PER_LONG); i = r->sig[SIGSZ-1]; i = i << (HOST_BITS_PER_LONG - 1) << 1; i |= r->sig[SIGSZ-2]; } - else - abort (); i >>= HOST_BITS_PER_WIDE_INT - REAL_EXP (r); @@ -1295,7 +1294,7 @@ real_to_integer (const REAL_VALUE_TYPE *r) return i; default: - abort (); + gcc_unreachable (); } } @@ -1346,8 +1345,9 @@ real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, high = t.sig[SIGSZ-1]; low = t.sig[SIGSZ-2]; } - else if (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG) + else { + gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG); high = t.sig[SIGSZ-1]; high = high << (HOST_BITS_PER_LONG - 1) << 1; high |= t.sig[SIGSZ-2]; @@ -1356,8 +1356,6 @@ real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, low = low << (HOST_BITS_PER_LONG - 1) << 1; low |= t.sig[SIGSZ-4]; } - else - abort (); if (r->sign) { @@ -1369,7 +1367,7 @@ real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, break; default: - abort (); + gcc_unreachable (); } *plow = low; @@ -1446,7 +1444,7 @@ real_to_decimal (char *str, const REAL_VALUE_TYPE *r_orig, size_t buf_size, strcpy (str, (r.sign ? "-NaN" : "+NaN")); return; default: - abort (); + gcc_unreachable (); } /* Bound the number of digits printed by the size of the representation. */ @@ -1463,8 +1461,7 @@ real_to_decimal (char *str, const REAL_VALUE_TYPE *r_orig, size_t buf_size, /* Bound the number of digits printed by the size of the output buffer. */ max_digits = buf_size - 1 - 1 - 2 - max_digits - 1; - if (max_digits > buf_size) - abort (); + gcc_assert (max_digits <= buf_size); if (digits > max_digits) digits = max_digits; @@ -1607,8 +1604,7 @@ real_to_decimal (char *str, const REAL_VALUE_TYPE *r_orig, size_t buf_size, do_multiply (&r, &r, ten); digit = rtd_divmod (&r, &pten); dec_exp -= 1; - if (digit == 0) - abort (); + gcc_assert (digit != 0); } /* ... or overflow. */ @@ -1619,10 +1615,11 @@ real_to_decimal (char *str, const REAL_VALUE_TYPE *r_orig, size_t buf_size, *p++ = '0'; dec_exp += 1; } - else if (digit > 10) - abort (); else - *p++ = digit + '0'; + { + gcc_assert (digit <= 10); + *p++ = digit + '0'; + } /* Generate subsequent digits. */ while (--digits > 0) @@ -1713,7 +1710,7 @@ real_to_hexadecimal (char *str, const REAL_VALUE_TYPE *r, size_t buf_size, strcpy (str, (r->sign ? "-NaN" : "+NaN")); return; default: - abort (); + gcc_unreachable (); } if (digits == 0) @@ -1723,8 +1720,7 @@ real_to_hexadecimal (char *str, const REAL_VALUE_TYPE *r, size_t buf_size, sprintf (exp_buf, "p%+d", exp); max_digits = buf_size - strlen (exp_buf) - r->sign - 4 - 1; - if (max_digits > buf_size) - abort (); + gcc_assert (max_digits <= buf_size); if (digits > max_digits) digits = max_digits; @@ -1982,8 +1978,9 @@ real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode, r->sig[SIGSZ-2] = low; memset (r->sig, 0, sizeof(long)*(SIGSZ-2)); } - else if (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT) + else { + gcc_assert (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT); r->sig[SIGSZ-1] = high >> (HOST_BITS_PER_LONG - 1) >> 1; r->sig[SIGSZ-2] = high; r->sig[SIGSZ-3] = low >> (HOST_BITS_PER_LONG - 1) >> 1; @@ -1991,8 +1988,6 @@ real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode, if (SIGSZ > 4) memset (r->sig, 0, sizeof(long)*(SIGSZ-4)); } - else - abort (); normalize (r); } @@ -2008,8 +2003,8 @@ ten_to_ptwo (int n) { static REAL_VALUE_TYPE tens[EXP_BITS]; - if (n < 0 || n >= EXP_BITS) - abort (); + gcc_assert (n >= 0); + gcc_assert (n < EXP_BITS); if (tens[n].cl == rvc_zero) { @@ -2040,8 +2035,8 @@ ten_to_mptwo (int n) { static REAL_VALUE_TYPE tens[EXP_BITS]; - if (n < 0 || n >= EXP_BITS) - abort (); + gcc_assert (n >= 0); + gcc_assert (n < EXP_BITS); if (tens[n].cl == rvc_zero) do_divide (&tens[n], real_digit (1), ten_to_ptwo (n)); @@ -2056,8 +2051,8 @@ real_digit (int n) { static REAL_VALUE_TYPE num[10]; - if (n < 0 || n > 9) - abort (); + gcc_assert (n >= 0); + gcc_assert (n <= 9); if (n > 0 && num[n].cl == rvc_zero) real_from_integer (&num[n], VOIDmode, n, 0, 1); @@ -2111,8 +2106,7 @@ real_nan (REAL_VALUE_TYPE *r, const char *str, int quiet, const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); - if (fmt == NULL) - abort (); + gcc_assert (fmt); if (*str == 0) { @@ -2163,7 +2157,7 @@ real_nan (REAL_VALUE_TYPE *r, const char *str, int quiet, add_significands (r, r, &u); break; default: - abort (); + gcc_unreachable (); } get_zero (&u, 0); @@ -2201,8 +2195,7 @@ real_maxval (REAL_VALUE_TYPE *r, int sign, enum machine_mode mode) int np2; fmt = REAL_MODE_FORMAT (mode); - if (fmt == NULL) - abort (); + gcc_assert (fmt); r->cl = rvc_normal; r->sign = sign; @@ -2271,7 +2264,7 @@ round_for_format (const struct real_format *fmt, REAL_VALUE_TYPE *r) break; default: - abort (); + gcc_unreachable (); } /* If we're not base2, normalize the exponent to a multiple of @@ -2375,8 +2368,7 @@ real_convert (REAL_VALUE_TYPE *r, enum machine_mode mode, const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); - if (fmt == NULL) - abort (); + gcc_assert (fmt); *r = *a; round_for_format (fmt, r); @@ -2437,8 +2429,7 @@ real_to_target (long *buf, const REAL_VALUE_TYPE *r, enum machine_mode mode) const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); - if (fmt == NULL) - abort (); + gcc_assert (fmt); return real_to_target_fmt (buf, r, fmt); } @@ -2462,8 +2453,7 @@ real_from_target (REAL_VALUE_TYPE *r, const long *buf, enum machine_mode mode) const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); - if (fmt == NULL) - abort (); + gcc_assert (fmt); (*fmt->decode) (fmt, r, buf); } @@ -2512,7 +2502,7 @@ real_hash (const REAL_VALUE_TYPE *r) break; default: - abort (); + gcc_unreachable (); } if (sizeof(unsigned long) > sizeof(unsigned int)) @@ -2596,7 +2586,7 @@ encode_ieee_single (const struct real_format *fmt, long *buf, break; default: - abort (); + gcc_unreachable (); } buf[0] = image; @@ -2781,7 +2771,7 @@ encode_ieee_double (const struct real_format *fmt, long *buf, break; default: - abort (); + gcc_unreachable (); } if (FLOAT_WORDS_BIG_ENDIAN) @@ -3004,8 +2994,7 @@ encode_ieee_extended (const struct real_format *fmt, long *buf, else { exp += 16383 - 1; - if (exp < 0) - abort (); + gcc_assert (exp >= 0); } image_hi |= exp; @@ -3024,7 +3013,7 @@ encode_ieee_extended (const struct real_format *fmt, long *buf, break; default: - abort (); + gcc_unreachable (); } buf[0] = sig_lo, buf[1] = sig_hi, buf[2] = image_hi; @@ -3521,7 +3510,7 @@ encode_ieee_quad (const struct real_format *fmt, long *buf, break; default: - abort (); + gcc_unreachable (); } if (FLOAT_WORDS_BIG_ENDIAN) @@ -3738,7 +3727,7 @@ encode_vax_f (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, break; default: - abort (); + gcc_unreachable (); } buf[0] = image; @@ -3809,7 +3798,7 @@ encode_vax_d (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, break; default: - abort (); + gcc_unreachable (); } if (FLOAT_WORDS_BIG_ENDIAN) @@ -3909,7 +3898,7 @@ encode_vax_g (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, break; default: - abort (); + gcc_unreachable (); } if (FLOAT_WORDS_BIG_ENDIAN) @@ -4060,7 +4049,7 @@ encode_i370_single (const struct real_format *fmt ATTRIBUTE_UNUSED, break; default: - abort (); + gcc_unreachable (); } buf[0] = image; @@ -4129,7 +4118,7 @@ encode_i370_double (const struct real_format *fmt ATTRIBUTE_UNUSED, break; default: - abort (); + gcc_unreachable (); } if (FLOAT_WORDS_BIG_ENDIAN) @@ -4272,7 +4261,7 @@ encode_c4x_single (const struct real_format *fmt ATTRIBUTE_UNUSED, break; default: - abort (); + gcc_unreachable (); } image = ((exp & 0xff) << 24) | (sig & 0xffffff); @@ -4350,7 +4339,7 @@ encode_c4x_extended (const struct real_format *fmt ATTRIBUTE_UNUSED, break; default: - abort (); + gcc_unreachable (); } exp = (exp & 0xff) << 24; diff --git a/gcc/recog.c b/gcc/recog.c index 5bd8458baf6..7d6ce19c943 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -211,8 +211,7 @@ validate_change (rtx object, rtx *loc, rtx new, int in_group) if (old == new || rtx_equal_p (old, new)) return 1; - if (in_group == 0 && num_changes != 0) - abort (); + gcc_assert (in_group != 0 || num_changes == 0); *loc = new; @@ -489,9 +488,9 @@ validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object) && GET_CODE (SET_SRC (XVECEXP (x, 0, j))) == ASM_OPERANDS) { /* Verify that operands are really shared. */ - if (ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, 0))) != - ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, j)))) - abort (); + gcc_assert (ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, 0))) + == ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP + (x, 0, j)))); validate_replace_rtx_1 (&SET_DEST (XVECEXP (x, 0, j)), from, to, object); } @@ -1567,8 +1566,7 @@ asm_operand_ok (rtx op, const char *constraint) int result = 0; /* Use constrain_operands after reload. */ - if (reload_completed) - abort (); + gcc_assert (!reload_completed); while (*constraint) { @@ -2008,8 +2006,7 @@ extract_insn (rtx insn) /* This insn is an `asm' with operands. */ /* expand_asm_operands makes sure there aren't too many operands. */ - if (noperands > MAX_RECOG_OPERANDS) - abort (); + gcc_assert (noperands <= MAX_RECOG_OPERANDS); /* Now get the operand values and constraints out of the insn. */ decode_asm_operands (body, recog_data.operand, @@ -2057,8 +2054,7 @@ extract_insn (rtx insn) : recog_data.constraints[i][0] == '+' ? OP_INOUT : OP_IN); - if (recog_data.n_alternatives > MAX_RECOG_ALTERNATIVES) - abort (); + gcc_assert (recog_data.n_alternatives <= MAX_RECOG_ALTERNATIVES); } /* After calling extract_insn, you can use this function to extract some @@ -2815,8 +2811,7 @@ static int peep2_current; rtx peep2_next_insn (int n) { - if (n >= MAX_INSNS_PER_PEEP2 + 1) - abort (); + gcc_assert (n < MAX_INSNS_PER_PEEP2 + 1); n += peep2_current; if (n >= MAX_INSNS_PER_PEEP2 + 1) @@ -2833,15 +2828,13 @@ peep2_next_insn (int n) int peep2_regno_dead_p (int ofs, int regno) { - if (ofs >= MAX_INSNS_PER_PEEP2 + 1) - abort (); + gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1); ofs += peep2_current; if (ofs >= MAX_INSNS_PER_PEEP2 + 1) ofs -= MAX_INSNS_PER_PEEP2 + 1; - if (peep2_insn_data[ofs].insn == NULL_RTX) - abort (); + gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX); return ! REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno); } @@ -2853,15 +2846,13 @@ peep2_reg_dead_p (int ofs, rtx reg) { int regno, n; - if (ofs >= MAX_INSNS_PER_PEEP2 + 1) - abort (); + gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1); ofs += peep2_current; if (ofs >= MAX_INSNS_PER_PEEP2 + 1) ofs -= MAX_INSNS_PER_PEEP2 + 1; - if (peep2_insn_data[ofs].insn == NULL_RTX) - abort (); + gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX); regno = REGNO (reg); n = hard_regno_nregs[regno][GET_MODE (reg)]; @@ -2891,8 +2882,8 @@ peep2_find_free_register (int from, int to, const char *class_str, HARD_REG_SET live; int i; - if (from >= MAX_INSNS_PER_PEEP2 + 1 || to >= MAX_INSNS_PER_PEEP2 + 1) - abort (); + gcc_assert (from < MAX_INSNS_PER_PEEP2 + 1); + gcc_assert (to < MAX_INSNS_PER_PEEP2 + 1); from += peep2_current; if (from >= MAX_INSNS_PER_PEEP2 + 1) @@ -2901,8 +2892,7 @@ peep2_find_free_register (int from, int to, const char *class_str, if (to >= MAX_INSNS_PER_PEEP2 + 1) to -= MAX_INSNS_PER_PEEP2 + 1; - if (peep2_insn_data[from].insn == NULL_RTX) - abort (); + gcc_assert (peep2_insn_data[from].insn != NULL_RTX); REG_SET_TO_HARD_REG_SET (live, peep2_insn_data[from].live_before); while (from != to) @@ -2911,8 +2901,7 @@ peep2_find_free_register (int from, int to, const char *class_str, if (++from >= MAX_INSNS_PER_PEEP2 + 1) from = 0; - if (peep2_insn_data[from].insn == NULL_RTX) - abort (); + gcc_assert (peep2_insn_data[from].insn != NULL_RTX); REG_SET_TO_HARD_REG_SET (this_live, peep2_insn_data[from].live_before); IOR_HARD_REG_SET (live, this_live); } @@ -3076,8 +3065,7 @@ peephole2_optimize (FILE *dump_file ATTRIBUTE_UNUSED) new_insn = NEXT_INSN (new_insn); } - if (new_insn == NULL_RTX) - abort (); + gcc_assert (new_insn != NULL_RTX); CALL_INSN_FUNCTION_USAGE (new_insn) = CALL_INSN_FUNCTION_USAGE (old_insn); @@ -3106,8 +3094,7 @@ peephole2_optimize (FILE *dump_file ATTRIBUTE_UNUSED) if (j >= MAX_INSNS_PER_PEEP2 + 1) j -= MAX_INSNS_PER_PEEP2 + 1; old_insn = peep2_insn_data[j].insn; - if (CALL_P (old_insn)) - abort (); + gcc_assert (!CALL_P (old_insn)); } break; } @@ -3277,8 +3264,7 @@ store_data_bypass_p (rtx out_insn, rtx in_insn) rtx out_set, in_set; in_set = single_set (in_insn); - if (! in_set) - abort (); + gcc_assert (in_set); if (!MEM_P (SET_DEST (in_set))) return false; @@ -3295,8 +3281,7 @@ store_data_bypass_p (rtx out_insn, rtx in_insn) int i; out_pat = PATTERN (out_insn); - if (GET_CODE (out_pat) != PARALLEL) - abort (); + gcc_assert (GET_CODE (out_pat) == PARALLEL); for (i = 0; i < XVECLEN (out_pat, 0); i++) { @@ -3305,8 +3290,7 @@ store_data_bypass_p (rtx out_insn, rtx in_insn) if (GET_CODE (exp) == CLOBBER) continue; - if (GET_CODE (exp) != SET) - abort (); + gcc_assert (GET_CODE (exp) == SET); if (reg_mentioned_p (SET_DEST (exp), SET_DEST (in_set))) return false; @@ -3329,9 +3313,8 @@ if_test_bypass_p (rtx out_insn, rtx in_insn) in_set = single_set (in_insn); if (! in_set) { - if (JUMP_P (in_insn) || CALL_P (in_insn)) - return false; - abort (); + gcc_assert (JUMP_P (in_insn) || CALL_P (in_insn)); + return false; } if (GET_CODE (SET_SRC (in_set)) != IF_THEN_ELSE) @@ -3351,8 +3334,7 @@ if_test_bypass_p (rtx out_insn, rtx in_insn) int i; out_pat = PATTERN (out_insn); - if (GET_CODE (out_pat) != PARALLEL) - abort (); + gcc_assert (GET_CODE (out_pat) == PARALLEL); for (i = 0; i < XVECLEN (out_pat, 0); i++) { @@ -3361,8 +3343,7 @@ if_test_bypass_p (rtx out_insn, rtx in_insn) if (GET_CODE (exp) == CLOBBER) continue; - if (GET_CODE (exp) != SET) - abort (); + gcc_assert (GET_CODE (exp) == SET); if (reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 1)) || reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 2))) diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c index fc13759af49..f8fca7322cd 100644 --- a/gcc/reg-stack.c +++ b/gcc/reg-stack.c @@ -506,8 +506,7 @@ record_label_references (rtx insn, rtx pat) rtx label = XEXP (pat, 0); rtx ref; - if (!LABEL_P (label)) - abort (); + gcc_assert (LABEL_P (label)); /* If this is an undefined label, LABEL_REFS (label) contains garbage. */ @@ -782,21 +781,21 @@ check_asm_stack_operands (rtx insn) static int get_asm_operand_n_inputs (rtx body) { - if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS) - return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body)); - - else if (GET_CODE (body) == ASM_OPERANDS) - return ASM_OPERANDS_INPUT_LENGTH (body); - - else if (GET_CODE (body) == PARALLEL - && GET_CODE (XVECEXP (body, 0, 0)) == SET) - return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0))); - - else if (GET_CODE (body) == PARALLEL - && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS) - return ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0)); - - abort (); + switch (GET_CODE (body)) + { + case SET: + gcc_assert (GET_CODE (SET_SRC (body)) == ASM_OPERANDS); + return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body)); + + case ASM_OPERANDS: + return ASM_OPERANDS_INPUT_LENGTH (body); + + case PARALLEL: + return get_asm_operand_n_inputs (XVECEXP (body, 0, 0)); + + default: + gcc_unreachable (); + } } /* If current function returns its result in an fp stack register, @@ -838,16 +837,12 @@ stack_result (tree decl) static void replace_reg (rtx *reg, int regno) { - if (regno < FIRST_STACK_REG || regno > LAST_STACK_REG - || ! STACK_REG_P (*reg)) - abort (); + gcc_assert (regno >= FIRST_STACK_REG); + gcc_assert (regno <= LAST_STACK_REG); + gcc_assert (STACK_REG_P (*reg)); - switch (GET_MODE_CLASS (GET_MODE (*reg))) - { - default: abort (); - case MODE_FLOAT: - case MODE_COMPLEX_FLOAT:; - } + gcc_assert (GET_MODE_CLASS (GET_MODE (*reg)) == MODE_FLOAT + || GET_MODE_CLASS (GET_MODE (*reg)) == MODE_COMPLEX_FLOAT); *reg = FP_MODE_REG (regno, GET_MODE (*reg)); } @@ -871,7 +866,7 @@ remove_regno_note (rtx insn, enum reg_note note, unsigned int regno) else note_link = &XEXP (this, 1); - abort (); + gcc_unreachable (); } /* Find the hard register number of virtual register REG in REGSTACK. @@ -883,8 +878,7 @@ get_hard_regnum (stack regstack, rtx reg) { int i; - if (! STACK_REG_P (reg)) - abort (); + gcc_assert (STACK_REG_P (reg)); for (i = regstack->top; i >= 0; i--) if (regstack->reg[i] == REGNO (reg)) @@ -918,15 +912,13 @@ emit_pop_insn (rtx insn, stack regstack, rtx reg, enum emit_where where) pop_insn = emit_pop_insn (insn, regstack, reg1, where); if (get_hard_regnum (regstack, reg2) >= 0) pop_insn = emit_pop_insn (insn, regstack, reg2, where); - if (!pop_insn) - abort (); + gcc_assert (pop_insn); return pop_insn; } hard_regno = get_hard_regnum (regstack, reg); - if (hard_regno < FIRST_STACK_REG) - abort (); + gcc_assert (hard_regno >= FIRST_STACK_REG); pop_rtx = gen_rtx_SET (VOIDmode, FP_MODE_REG (hard_regno, DFmode), FP_MODE_REG (FIRST_STACK_REG, DFmode)); @@ -966,8 +958,7 @@ emit_swap_insn (rtx insn, stack regstack, rtx reg) hard_regno = get_hard_regnum (regstack, reg); - if (hard_regno < FIRST_STACK_REG) - abort (); + gcc_assert (hard_regno >= FIRST_STACK_REG); if (hard_regno == FIRST_STACK_REG) return; @@ -1055,8 +1046,7 @@ swap_to_top (rtx insn, stack regstack, rtx src1, rtx src2) /* Place operand 1 at the top of stack. */ regno = get_hard_regnum (&temp_stack, src1); - if (regno < 0) - abort (); + gcc_assert (regno >= 0); if (regno != FIRST_STACK_REG) { k = temp_stack.top - (regno - FIRST_STACK_REG); @@ -1069,8 +1059,7 @@ swap_to_top (rtx insn, stack regstack, rtx src1, rtx src2) /* Place operand 2 next on the stack. */ regno = get_hard_regnum (&temp_stack, src2); - if (regno < 0) - abort (); + gcc_assert (regno >= 0); if (regno != FIRST_STACK_REG + 1) { k = temp_stack.top - (regno - FIRST_STACK_REG); @@ -1110,16 +1099,15 @@ move_for_stack_reg (rtx insn, stack regstack, rtx pat) int i; /* If this is a no-op move, there must not be a REG_DEAD note. */ - if (REGNO (src) == REGNO (dest)) - abort (); + gcc_assert (REGNO (src) != REGNO (dest)); for (i = regstack->top; i >= 0; i--) if (regstack->reg[i] == REGNO (src)) break; /* The source must be live, and the dest must be dead. */ - if (i < 0 || get_hard_regnum (regstack, dest) >= FIRST_STACK_REG) - abort (); + gcc_assert (i >= 0); + gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG); /* It is possible that the dest is unused after this insn. If so, just pop the src. */ @@ -1156,8 +1144,7 @@ move_for_stack_reg (rtx insn, stack regstack, rtx pat) } /* The destination ought to be dead. */ - if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG) - abort (); + gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG); replace_reg (psrc, get_hard_regnum (regstack, src)); @@ -1201,26 +1188,24 @@ move_for_stack_reg (rtx insn, stack regstack, rtx pat) replace_reg (psrc, FIRST_STACK_REG); } - else if (STACK_REG_P (dest)) + else { + gcc_assert (STACK_REG_P (dest)); + /* Load from MEM, or possibly integer REG or constant, into the stack regs. The actual target is always the top of the stack. The stack mapping is changed to reflect that DEST is now at top of stack. */ /* The destination ought to be dead. */ - if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG) - abort (); + gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG); - if (regstack->top >= REG_STACK_SIZE) - abort (); + gcc_assert (regstack->top < REG_STACK_SIZE); regstack->reg[++regstack->top] = REGNO (dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest)); replace_reg (pdest, FIRST_STACK_REG); } - else - abort (); return control_flow_insn_deleted; } @@ -1456,8 +1441,8 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) return control_flow_insn_deleted; } /* ??? Uninitialized USE should not happen. */ - else if (get_hard_regnum (regstack, *src) == -1) - abort (); + else + gcc_assert (get_hard_regnum (regstack, *src) != -1); break; case CLOBBER: @@ -1483,8 +1468,7 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) else { note = find_reg_note (insn, REG_UNUSED, *dest); - if (!note) - abort (); + gcc_assert (note); } remove_note (insn, note); replace_reg (dest, FIRST_STACK_REG + 1); @@ -1560,8 +1544,7 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) case REG: /* This is a `tstM2' case. */ - if (*dest != cc0_rtx) - abort (); + gcc_assert (*dest == cc0_rtx); src1 = src; /* Fall through. */ @@ -1636,8 +1619,8 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) src1_hard_regnum = get_hard_regnum (regstack, *src1); src2_hard_regnum = get_hard_regnum (regstack, *src2); - if (src1_hard_regnum == -1 || src2_hard_regnum == -1) - abort (); + gcc_assert (src1_hard_regnum != -1); + gcc_assert (src2_hard_regnum != -1); if (src1_hard_regnum != FIRST_STACK_REG && src2_hard_regnum != FIRST_STACK_REG) @@ -1743,8 +1726,7 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) /* Input should never die, it is replaced with output. */ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); - if (src1_note) - abort(); + gcc_assert (!src1_note); if (STACK_REG_P (*dest)) replace_reg (dest, FIRST_STACK_REG); @@ -1800,8 +1782,8 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) /* Inputs should never die, they are replaced with outputs. */ - if ((src1_note) || (src2_note)) - abort(); + gcc_assert (!src1_note); + gcc_assert (!src2_note); swap_to_top (insn, regstack, *src1, *src2); @@ -1831,8 +1813,8 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) /* Inputs should never die, they are replaced with outputs. */ - if ((src1_note) || (src2_note)) - abort(); + gcc_assert (!src1_note); + gcc_assert (!src2_note); swap_to_top (insn, regstack, *src1, *src2); @@ -1861,8 +1843,7 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) /* Input should never die, it is replaced with output. */ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); - if (src1_note) - abort(); + gcc_assert (!src1_note); /* Push the result back onto stack. Empty stack slot will be filled in second part of insn. */ @@ -1888,8 +1869,7 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) /* Input should never die, it is replaced with output. */ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); - if (src1_note) - abort(); + gcc_assert (!src1_note); /* Push the result back onto stack. Fill empty slot from first part of insn and fix top of stack pointer. */ @@ -1909,9 +1889,8 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) The combination matches the PPRO fcomi instruction. */ pat_src = XVECEXP (pat_src, 0, 0); - if (GET_CODE (pat_src) != UNSPEC - || XINT (pat_src, 1) != UNSPEC_FNSTSW) - abort (); + gcc_assert (GET_CODE (pat_src) == UNSPEC); + gcc_assert (XINT (pat_src, 1) == UNSPEC_FNSTSW); /* Fall through. */ case UNSPEC_FNSTSW: @@ -1920,14 +1899,13 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) up before now. */ pat_src = XVECEXP (pat_src, 0, 0); - if (GET_CODE (pat_src) != COMPARE) - abort (); + gcc_assert (GET_CODE (pat_src) == COMPARE); compare_for_stack_reg (insn, regstack, pat_src); break; default: - abort (); + gcc_unreachable (); } break; @@ -1994,17 +1972,13 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) int regno = REGNO (XEXP (src_note[i], 0)); /* If the register that dies is not at the top of - stack, then move the top of stack to the dead reg */ - if (regno != regstack->reg[regstack->top]) - { - remove_regno_note (insn, REG_DEAD, regno); - emit_pop_insn (insn, regstack, XEXP (src_note[i], 0), - EMIT_AFTER); - } - else - /* Top of stack never dies, as it is the - destination. */ - abort (); + stack, then move the top of stack to the dead reg. + Top of stack should never die, as it is the + destination. */ + gcc_assert (regno != regstack->reg[regstack->top]); + remove_regno_note (insn, REG_DEAD, regno); + emit_pop_insn (insn, regstack, XEXP (src_note[i], 0), + EMIT_AFTER); } } @@ -2017,7 +1991,7 @@ subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) break; default: - abort (); + gcc_unreachable (); } break; } @@ -2074,8 +2048,7 @@ subst_asm_stack_regs (rtx insn, stack regstack) n_inputs = get_asm_operand_n_inputs (body); n_outputs = recog_data.n_operands - n_inputs; - if (alt < 0) - abort (); + gcc_assert (alt >= 0); /* Strip SUBREGs here to make the following code simpler. */ for (i = 0; i < recog_data.n_operands; i++) @@ -2168,8 +2141,7 @@ subst_asm_stack_regs (rtx insn, stack regstack) int regno = get_hard_regnum (&temp_stack, recog_data.operand[i]); - if (regno < 0) - abort (); + gcc_assert (regno >= 0); if ((unsigned int) regno != REGNO (recog_data.operand[i])) { @@ -2202,8 +2174,7 @@ subst_asm_stack_regs (rtx insn, stack regstack) { int regnum = get_hard_regnum (regstack, recog_data.operand[i]); - if (regnum < 0) - abort (); + gcc_assert (regnum >= 0); replace_reg (recog_data.operand_loc[i], regnum); } @@ -2213,8 +2184,7 @@ subst_asm_stack_regs (rtx insn, stack regstack) { int regnum = get_hard_regnum (regstack, note_reg[i]); - if (regnum < 0) - abort (); + gcc_assert (regnum >= 0); replace_reg (note_loc[i], regnum); } @@ -2228,7 +2198,7 @@ subst_asm_stack_regs (rtx insn, stack regstack) if (regnum >= 0) { /* Sigh - clobbers always have QImode. But replace_reg knows - that these regs can't be MODE_INT and will abort. Just put + that these regs can't be MODE_INT and will assert. Just put the right reg there without calling replace_reg. */ *clobber_loc[i] = FP_MODE_REG (regnum, DFmode); @@ -2469,10 +2439,9 @@ change_stack (rtx insn, stack old, stack new, enum emit_where where) not their depth or liveliness. */ GO_IF_HARD_REG_EQUAL (old->reg_set, new->reg_set, win); - abort (); + gcc_unreachable (); win: - if (old->top != new->top) - abort (); + gcc_assert (old->top == new->top); /* If the stack is not empty (new->top != -1), loop here emitting swaps until the stack is correct. @@ -2495,8 +2464,7 @@ change_stack (rtx insn, stack old, stack new, enum emit_where where) if (new->reg[reg] == old->reg[old->top]) break; - if (reg == -1) - abort (); + gcc_assert (reg != -1); emit_swap_insn (insn, old, FP_MODE_REG (old->reg[reg], DFmode)); @@ -2518,8 +2486,7 @@ change_stack (rtx insn, stack old, stack new, enum emit_where where) /* At this point there must be no differences. */ for (reg = old->top; reg >= 0; reg--) - if (old->reg[reg] != new->reg[reg]) - abort (); + gcc_assert (old->reg[reg] == new->reg[reg]); } if (update_end) @@ -2731,7 +2698,7 @@ compensate_edge (edge e, FILE *file) CLEAR_HARD_REG_SET (tmp); GO_IF_HARD_REG_EQUAL (target_stack->reg_set, tmp, eh1); - abort (); + gcc_unreachable (); eh1: /* We are sure that there is st(0) live, otherwise we won't compensate. @@ -2740,7 +2707,7 @@ compensate_edge (edge e, FILE *file) if (TEST_HARD_REG_BIT (regstack.reg_set, FIRST_STACK_REG + 1)) SET_HARD_REG_BIT (tmp, FIRST_STACK_REG + 1); GO_IF_HARD_REG_EQUAL (regstack.reg_set, tmp, eh2); - abort (); + gcc_unreachable (); eh2: target_stack->top = -1; @@ -2766,8 +2733,7 @@ compensate_edge (edge e, FILE *file) /* We don't support abnormal edges. Global takes care to avoid any live register across them, so we should never have to insert instructions on such edges. */ - if (e->flags & EDGE_ABNORMAL) - abort (); + gcc_assert (!(e->flags & EDGE_ABNORMAL)); current_block = NULL; start_sequence (); @@ -2870,8 +2836,7 @@ convert_regs_1 (FILE *file, basic_block block) next = NEXT_INSN (insn); /* Ensure we have not missed a block boundary. */ - if (next == NULL) - abort (); + gcc_assert (next); if (insn == BB_END (block)) next = NULL; @@ -2953,8 +2918,7 @@ convert_regs_1 (FILE *file, basic_block block) asms, we zapped the instruction itself, but that didn't produce the same pattern of register kills as before. */ GO_IF_HARD_REG_EQUAL (regstack.reg_set, bi->out_reg_set, win); - if (!any_malformed_asm) - abort (); + gcc_assert (any_malformed_asm); win: bi->stack_out = regstack; @@ -2964,9 +2928,8 @@ convert_regs_1 (FILE *file, basic_block block) if (e->flags & EDGE_DFS_BACK || (e->dest == EXIT_BLOCK_PTR)) { - if (!BLOCK_INFO (e->dest)->done - && e->dest != block) - abort (); + gcc_assert (BLOCK_INFO (e->dest)->done + || e->dest == block); inserted |= compensate_edge (e, file); } } @@ -2975,8 +2938,7 @@ convert_regs_1 (FILE *file, basic_block block) if (e != beste && !(e->flags & EDGE_DFS_BACK) && e->src != ENTRY_BLOCK_PTR) { - if (!BLOCK_INFO (e->src)->done) - abort (); + gcc_assert (BLOCK_INFO (e->src)->done); inserted |= compensate_edge (e, file); } } diff --git a/gcc/regclass.c b/gcc/regclass.c index 7d6f3f4ec54..a73d8865bac 100644 --- a/gcc/regclass.c +++ b/gcc/regclass.c @@ -284,9 +284,8 @@ init_reg_sets (void) /* Sanity check: make sure the target macros FIXED_REGISTERS and CALL_USED_REGISTERS had the right number of initializers. */ - if (sizeof fixed_regs != sizeof initial_fixed_regs - || sizeof call_used_regs != sizeof initial_call_used_regs) - abort(); + gcc_assert (sizeof fixed_regs == sizeof initial_fixed_regs); + gcc_assert (sizeof call_used_regs == sizeof initial_call_used_regs); memcpy (fixed_regs, initial_fixed_regs, sizeof fixed_regs); memcpy (call_used_regs, initial_call_used_regs, sizeof call_used_regs); @@ -427,15 +426,11 @@ init_reg_sets_1 (void) for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { -#ifdef ENABLE_CHECKING /* call_used_regs must include fixed_regs. */ - if (fixed_regs[i] && !call_used_regs[i]) - abort (); + gcc_assert (!fixed_regs[i] || call_used_regs[i]); #ifdef CALL_REALLY_USED_REGISTERS /* call_used_regs must include call_really_used_regs. */ - if (call_really_used_regs[i] && !call_used_regs[i]) - abort (); -#endif + gcc_assert (!call_really_used_regs[i] || call_used_regs[i]); #endif if (fixed_regs[i]) @@ -656,7 +651,7 @@ memory_move_secondary_cost (enum machine_mode mode, enum reg_class class, int in what it is, so MEMORY_MOVE_COST really ought not to be calling here in that case. - I'm tempted to put in an abort here, but returning this will + I'm tempted to put in an assert here, but returning this will probably only give poor estimates, which is what we would've had before this code anyways. */ return partial_cost; diff --git a/gcc/regrename.c b/gcc/regrename.c index b8f1955db91..c2e773d875f 100644 --- a/gcc/regrename.c +++ b/gcc/regrename.c @@ -105,8 +105,7 @@ note_sets (rtx x, rtx set ATTRIBUTE_UNUSED, void *data) nregs = hard_regno_nregs[regno][GET_MODE (x)]; /* There must not be pseudos at this point. */ - if (regno + nregs > FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER); while (nregs-- > 0) SET_HARD_REG_BIT (*pset, regno + nregs); @@ -127,8 +126,7 @@ clear_dead_regs (HARD_REG_SET *pset, enum machine_mode kind, rtx notes) int nregs = hard_regno_nregs[regno][GET_MODE (reg)]; /* There must not be pseudos at this point. */ - if (regno + nregs > FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER); while (nregs-- > 0) CLEAR_HARD_REG_BIT (*pset, regno + nregs); @@ -442,8 +440,7 @@ scan_rtx_reg (rtx insn, rtx *loc, enum reg_class cl, if (action == mark_read) { - if (! exact_match) - abort (); + gcc_assert (exact_match); /* ??? Class NO_REGS can happen if the md file makes use of EXTRA_CONSTRAINTS to match registers. Which is arguably @@ -692,7 +689,7 @@ scan_rtx (rtx insn, rtx *loc, enum reg_class cl, case POST_MODIFY: case PRE_MODIFY: /* Should only happen inside MEM. */ - abort (); + gcc_unreachable (); case CLOBBER: scan_rtx (insn, &SET_DEST (x), cl, action, OP_OUT, 1); diff --git a/gcc/reload.c b/gcc/reload.c index 950d6fd3088..46b85f7f01f 100644 --- a/gcc/reload.c +++ b/gcc/reload.c @@ -388,17 +388,15 @@ push_secondary_reload (int in_p, rtx x, int opnum, int optional, : REG_CLASS_FROM_CONSTRAINT ((unsigned char) insn_letter, insn_constraint)); - if (insn_class == NO_REGS) - abort (); - if (in_p - && insn_data[(int) icode].operand[!in_p].constraint[0] != '=') - abort (); + gcc_assert (insn_class != NO_REGS); + gcc_assert (!in_p + || insn_data[(int) icode].operand[!in_p].constraint[0] + == '='); } /* The scratch register's constraint must start with "=&". */ - if (insn_data[(int) icode].operand[2].constraint[0] != '=' - || insn_data[(int) icode].operand[2].constraint[1] != '&') - abort (); + gcc_assert (insn_data[(int) icode].operand[2].constraint[0] == '=' + && insn_data[(int) icode].operand[2].constraint[1] == '&'); if (reg_class_subset_p (reload_class, insn_class)) mode = insn_data[(int) icode].operand[2].mode; @@ -431,9 +429,8 @@ push_secondary_reload (int in_p, rtx x, int opnum, int optional, Allow this when a reload_in/out pattern is being used. I.e. assume that the generated code handles this case. */ - if (in_p && class == reload_class && icode == CODE_FOR_nothing - && t_icode == CODE_FOR_nothing) - abort (); + gcc_assert (!in_p || class != reload_class || icode != CODE_FOR_nothing + || t_icode != CODE_FOR_nothing); /* If we need a tertiary reload, see if we have one we can reuse or else make a new one. */ @@ -706,8 +703,7 @@ find_valid_class (enum machine_mode m1 ATTRIBUTE_UNUSED, int n, } } - if (best_size == 0) - abort (); + gcc_assert (best_size != 0); return best_class; } @@ -1071,8 +1067,7 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, if (MEM_P (in)) /* This is supposed to happen only for paradoxical subregs made by combine.c. (SUBREG (MEM)) isn't supposed to occur other ways. */ - if (GET_MODE_SIZE (GET_MODE (in)) > GET_MODE_SIZE (inmode)) - abort (); + gcc_assert (GET_MODE_SIZE (GET_MODE (in)) <= GET_MODE_SIZE (inmode)); #endif inmode = GET_MODE (in); } @@ -1166,9 +1161,9 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, outloc = &SUBREG_REG (out); out = *outloc; #if ! defined (LOAD_EXTEND_OP) && ! defined (WORD_REGISTER_OPERATIONS) - if (MEM_P (out) - && GET_MODE_SIZE (GET_MODE (out)) > GET_MODE_SIZE (outmode)) - abort (); + gcc_assert (!MEM_P (out) + || GET_MODE_SIZE (GET_MODE (out)) + <= GET_MODE_SIZE (outmode)); #endif outmode = GET_MODE (out); } @@ -1289,9 +1284,8 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, /* Optional output reloads are always OK even if we have no register class, since the function of these reloads is only to have spill_reg_store etc. set, so that the storing insn can be deleted later. */ - if (class == NO_REGS - && (optional == 0 || type != RELOAD_FOR_OUTPUT)) - abort (); + gcc_assert (class != NO_REGS + || (optional != 0 && type == RELOAD_FOR_OUTPUT)); i = find_reusable_reload (&in, out, class, type, opnum, dont_share); @@ -1445,8 +1439,7 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, /* If we did not find a nonzero amount-to-increment-by, that contradicts the belief that IN is being incremented in an address in this insn. */ - if (rld[i].inc == 0) - abort (); + gcc_assert (rld[i].inc != 0); } #endif @@ -2246,7 +2239,7 @@ operands_match_p (rtx x, rtx y) contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: - abort (); + gcc_unreachable (); } } return 1 + success_2; @@ -2268,98 +2261,99 @@ decompose (rtx x) memset (&val, 0, sizeof (val)); - if (MEM_P (x)) - { - rtx base = NULL_RTX, offset = 0; - rtx addr = XEXP (x, 0); - - if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC - || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC) - { - val.base = XEXP (addr, 0); - val.start = -GET_MODE_SIZE (GET_MODE (x)); - val.end = GET_MODE_SIZE (GET_MODE (x)); - val.safe = REGNO (val.base) == STACK_POINTER_REGNUM; - return val; - } - - if (GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY) - { - if (GET_CODE (XEXP (addr, 1)) == PLUS - && XEXP (addr, 0) == XEXP (XEXP (addr, 1), 0) - && CONSTANT_P (XEXP (XEXP (addr, 1), 1))) - { - val.base = XEXP (addr, 0); - val.start = -INTVAL (XEXP (XEXP (addr, 1), 1)); - val.end = INTVAL (XEXP (XEXP (addr, 1), 1)); - val.safe = REGNO (val.base) == STACK_POINTER_REGNUM; - return val; - } - } - - if (GET_CODE (addr) == CONST) - { - addr = XEXP (addr, 0); - all_const = 1; - } - if (GET_CODE (addr) == PLUS) - { - if (CONSTANT_P (XEXP (addr, 0))) - { - base = XEXP (addr, 1); - offset = XEXP (addr, 0); - } - else if (CONSTANT_P (XEXP (addr, 1))) - { - base = XEXP (addr, 0); - offset = XEXP (addr, 1); - } - } - - if (offset == 0) - { - base = addr; - offset = const0_rtx; - } - if (GET_CODE (offset) == CONST) - offset = XEXP (offset, 0); - if (GET_CODE (offset) == PLUS) - { - if (GET_CODE (XEXP (offset, 0)) == CONST_INT) - { - base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 1)); - offset = XEXP (offset, 0); - } - else if (GET_CODE (XEXP (offset, 1)) == CONST_INT) - { - base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 0)); - offset = XEXP (offset, 1); - } - else - { - base = gen_rtx_PLUS (GET_MODE (base), base, offset); - offset = const0_rtx; - } - } - else if (GET_CODE (offset) != CONST_INT) - { - base = gen_rtx_PLUS (GET_MODE (base), base, offset); - offset = const0_rtx; - } - - if (all_const && GET_CODE (base) == PLUS) - base = gen_rtx_CONST (GET_MODE (base), base); - - if (GET_CODE (offset) != CONST_INT) - abort (); - - val.start = INTVAL (offset); - val.end = val.start + GET_MODE_SIZE (GET_MODE (x)); - val.base = base; - return val; - } - else if (REG_P (x)) + switch (GET_CODE (x)) { + case MEM: + { + rtx base = NULL_RTX, offset = 0; + rtx addr = XEXP (x, 0); + + if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC + || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC) + { + val.base = XEXP (addr, 0); + val.start = -GET_MODE_SIZE (GET_MODE (x)); + val.end = GET_MODE_SIZE (GET_MODE (x)); + val.safe = REGNO (val.base) == STACK_POINTER_REGNUM; + return val; + } + + if (GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY) + { + if (GET_CODE (XEXP (addr, 1)) == PLUS + && XEXP (addr, 0) == XEXP (XEXP (addr, 1), 0) + && CONSTANT_P (XEXP (XEXP (addr, 1), 1))) + { + val.base = XEXP (addr, 0); + val.start = -INTVAL (XEXP (XEXP (addr, 1), 1)); + val.end = INTVAL (XEXP (XEXP (addr, 1), 1)); + val.safe = REGNO (val.base) == STACK_POINTER_REGNUM; + return val; + } + } + + if (GET_CODE (addr) == CONST) + { + addr = XEXP (addr, 0); + all_const = 1; + } + if (GET_CODE (addr) == PLUS) + { + if (CONSTANT_P (XEXP (addr, 0))) + { + base = XEXP (addr, 1); + offset = XEXP (addr, 0); + } + else if (CONSTANT_P (XEXP (addr, 1))) + { + base = XEXP (addr, 0); + offset = XEXP (addr, 1); + } + } + + if (offset == 0) + { + base = addr; + offset = const0_rtx; + } + if (GET_CODE (offset) == CONST) + offset = XEXP (offset, 0); + if (GET_CODE (offset) == PLUS) + { + if (GET_CODE (XEXP (offset, 0)) == CONST_INT) + { + base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 1)); + offset = XEXP (offset, 0); + } + else if (GET_CODE (XEXP (offset, 1)) == CONST_INT) + { + base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 0)); + offset = XEXP (offset, 1); + } + else + { + base = gen_rtx_PLUS (GET_MODE (base), base, offset); + offset = const0_rtx; + } + } + else if (GET_CODE (offset) != CONST_INT) + { + base = gen_rtx_PLUS (GET_MODE (base), base, offset); + offset = const0_rtx; + } + + if (all_const && GET_CODE (base) == PLUS) + base = gen_rtx_CONST (GET_MODE (base), base); + + gcc_assert (GET_CODE (offset) == CONST_INT); + + val.start = INTVAL (offset); + val.end = val.start + GET_MODE_SIZE (GET_MODE (x)); + val.base = base; + } + break; + + case REG: val.reg_flag = 1; val.start = true_regnum (x); if (val.start < 0) @@ -2371,9 +2365,9 @@ decompose (rtx x) else /* A hard reg. */ val.end = val.start + hard_regno_nregs[val.start][GET_MODE (x)]; - } - else if (GET_CODE (x) == SUBREG) - { + break; + + case SUBREG: if (!REG_P (SUBREG_REG (x))) /* This could be more precise, but it's good enough. */ return decompose (SUBREG_REG (x)); @@ -2384,13 +2378,18 @@ decompose (rtx x) else /* A hard reg. */ val.end = val.start + hard_regno_nregs[val.start][GET_MODE (x)]; + break; + + case SCRATCH: + /* This hasn't been assigned yet, so it can't conflict yet. */ + val.safe = 1; + break; + + default: + gcc_assert (CONSTANT_P (x)); + val.safe = 1; + break; } - else if (CONSTANT_P (x) - /* This hasn't been assigned yet, so it can't conflict yet. */ - || GET_CODE (x) == SCRATCH) - val.safe = 1; - else - abort (); return val; } @@ -2407,8 +2406,7 @@ immune_p (rtx x, rtx y, struct decomposition ydata) if (ydata.safe) return 1; - if (!MEM_P (y)) - abort (); + gcc_assert (MEM_P (y)); /* If Y is memory and X is not, Y can't affect X. */ if (!MEM_P (x)) return 1; @@ -2623,8 +2621,7 @@ find_reloads (rtx insn, int replace, int ind_levels, int live_known, case '%': { /* The last operand should not be marked commutative. */ - if (i == noperands - 1) - abort (); + gcc_assert (i != noperands - 1); /* We currently only support one commutative pair of operands. Some existing asm code currently uses more @@ -2635,8 +2632,8 @@ find_reloads (rtx insn, int replace, int ind_levels, int live_known, future we may handle it correctly. */ if (commutative < 0) commutative = i; - else if (!this_insn_is_asm) - abort (); + else + gcc_assert (this_insn_is_asm); } break; /* Use of ISDIGIT is tempting here, but it may get expensive because @@ -2651,8 +2648,7 @@ find_reloads (rtx insn, int replace, int ind_levels, int live_known, recog_data.operand[i]); /* An operand may not match itself. */ - if (c == i) - abort (); + gcc_assert (c != i); /* If C can be commuted with C+1, and C might need to match I, then C+1 might also need to match I. */ @@ -3510,17 +3506,14 @@ find_reloads (rtx insn, int replace, int ind_levels, int live_known, early_data = decompose (recog_data.operand[i]); - if (modified[i] == RELOAD_READ) - abort (); + gcc_assert (modified[i] != RELOAD_READ); if (this_alternative[i] == NO_REGS) { this_alternative_earlyclobber[i] = 0; - if (this_insn_is_asm) - error_for_asm (this_insn, - "`&' constraint used with no register class"); - else - abort (); + gcc_assert (this_insn_is_asm); + error_for_asm (this_insn, + "`&' constraint used with no register class"); } for (j = 0; j < noperands; j++) @@ -3895,10 +3888,9 @@ find_reloads (rtx insn, int replace, int ind_levels, int live_known, 0, 0, i, RELOAD_OTHER); operand_reloadnum[i] = output_reloadnum; } - else if (insn_code_number >= 0) - abort (); else { + gcc_assert (insn_code_number < 0); error_for_asm (insn, "inconsistent operand constraints in an `asm'"); /* Avoid further trouble with this insn. */ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx); @@ -4354,10 +4346,9 @@ find_reloads (rtx insn, int replace, int ind_levels, int live_known, do after the insn (such as for output addresses) are fine. */ if (no_input_reloads) for (i = 0; i < n_reloads; i++) - if (rld[i].in != 0 - && rld[i].when_needed != RELOAD_FOR_OUTADDR_ADDRESS - && rld[i].when_needed != RELOAD_FOR_OUTPUT_ADDRESS) - abort (); + gcc_assert (rld[i].in == 0 + || rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS + || rld[i].when_needed == RELOAD_FOR_OUTPUT_ADDRESS); #endif /* Compute reload_mode and reload_nregs. */ @@ -4533,8 +4524,7 @@ find_reloads_toplev (rtx x, int opnum, enum reload_type type, tem = simplify_gen_subreg (GET_MODE (x), reg_equiv_constant[regno], GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); - if (!tem) - abort (); + gcc_assert (tem); return tem; } @@ -5392,6 +5382,8 @@ find_reloads_address_1 (enum machine_mode mode, rtx x, int context, { rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); + int regno; + int reloadnum; if (GET_CODE (op1) != PLUS && GET_CODE (op1) != MINUS) return 0; @@ -5400,8 +5392,7 @@ find_reloads_address_1 (enum machine_mode mode, rtx x, int context, where a base register is {inc,dec}remented by the contents of another register or by a constant value. Thus, these operands must match. */ - if (op0 != XEXP (op1, 0)) - abort (); + gcc_assert (op0 == XEXP (op1, 0)); /* Require index register (or constant). Let's just handle the register case in the meantime... If the target allows @@ -5412,67 +5403,62 @@ find_reloads_address_1 (enum machine_mode mode, rtx x, int context, find_reloads_address_1 (mode, XEXP (op1, 1), 1, &XEXP (op1, 1), opnum, type, ind_levels, insn); - if (REG_P (XEXP (op1, 0))) - { - int regno = REGNO (XEXP (op1, 0)); - int reloadnum; - - /* A register that is incremented cannot be constant! */ - if (regno >= FIRST_PSEUDO_REGISTER - && reg_equiv_constant[regno] != 0) - abort (); - - /* Handle a register that is equivalent to a memory location - which cannot be addressed directly. */ - if (reg_equiv_memory_loc[regno] != 0 - && (reg_equiv_address[regno] != 0 - || num_not_at_initial_offset)) - { - rtx tem = make_memloc (XEXP (x, 0), regno); + gcc_assert (REG_P (XEXP (op1, 0))); - if (reg_equiv_address[regno] - || ! rtx_equal_p (tem, reg_equiv_mem[regno])) - { - /* First reload the memory location's address. - We can't use ADDR_TYPE (type) here, because we need to - write back the value after reading it, hence we actually - need two registers. */ - find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0), - &XEXP (tem, 0), opnum, - RELOAD_OTHER, - ind_levels, insn); - - /* Then reload the memory location into a base - register. */ - reloadnum = push_reload (tem, tem, &XEXP (x, 0), - &XEXP (op1, 0), - MODE_BASE_REG_CLASS (mode), - GET_MODE (x), GET_MODE (x), 0, - 0, opnum, RELOAD_OTHER); - - update_auto_inc_notes (this_insn, regno, reloadnum); - return 0; - } - } + regno = REGNO (XEXP (op1, 0)); + + /* A register that is incremented cannot be constant! */ + gcc_assert (regno < FIRST_PSEUDO_REGISTER + || reg_equiv_constant[regno] == 0); - if (reg_renumber[regno] >= 0) - regno = reg_renumber[regno]; + /* Handle a register that is equivalent to a memory location + which cannot be addressed directly. */ + if (reg_equiv_memory_loc[regno] != 0 + && (reg_equiv_address[regno] != 0 + || num_not_at_initial_offset)) + { + rtx tem = make_memloc (XEXP (x, 0), regno); - /* We require a base register here... */ - if (!REGNO_MODE_OK_FOR_BASE_P (regno, GET_MODE (x))) + if (reg_equiv_address[regno] + || ! rtx_equal_p (tem, reg_equiv_mem[regno])) { - reloadnum = push_reload (XEXP (op1, 0), XEXP (x, 0), - &XEXP (op1, 0), &XEXP (x, 0), - MODE_BASE_REG_CLASS (mode), - GET_MODE (x), GET_MODE (x), 0, 0, - opnum, RELOAD_OTHER); + /* First reload the memory location's address. + We can't use ADDR_TYPE (type) here, because we need to + write back the value after reading it, hence we actually + need two registers. */ + find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0), + &XEXP (tem, 0), opnum, + RELOAD_OTHER, + ind_levels, insn); + + /* Then reload the memory location into a base + register. */ + reloadnum = push_reload (tem, tem, &XEXP (x, 0), + &XEXP (op1, 0), + MODE_BASE_REG_CLASS (mode), + GET_MODE (x), GET_MODE (x), 0, + 0, opnum, RELOAD_OTHER); update_auto_inc_notes (this_insn, regno, reloadnum); return 0; } } - else - abort (); + + if (reg_renumber[regno] >= 0) + regno = reg_renumber[regno]; + + /* We require a base register here... */ + if (!REGNO_MODE_OK_FOR_BASE_P (regno, GET_MODE (x))) + { + reloadnum = push_reload (XEXP (op1, 0), XEXP (x, 0), + &XEXP (op1, 0), &XEXP (x, 0), + MODE_BASE_REG_CLASS (mode), + GET_MODE (x), GET_MODE (x), 0, 0, + opnum, RELOAD_OTHER); + + update_auto_inc_notes (this_insn, regno, reloadnum); + return 0; + } } return 0; @@ -5487,9 +5473,8 @@ find_reloads_address_1 (enum machine_mode mode, rtx x, int context, rtx x_orig = x; /* A register that is incremented cannot be constant! */ - if (regno >= FIRST_PSEUDO_REGISTER - && reg_equiv_constant[regno] != 0) - abort (); + gcc_assert (regno < FIRST_PSEUDO_REGISTER + || reg_equiv_constant[regno] == 0); /* Handle a register that is equivalent to a memory location which cannot be addressed directly. */ @@ -5953,10 +5938,9 @@ subst_reloads (rtx insn) for (check_regno = 0; check_regno < max_regno; check_regno++) { #define CHECK_MODF(ARRAY) \ - if (ARRAY[check_regno] \ - && loc_mentioned_in_p (r->where, \ - ARRAY[check_regno])) \ - abort () + gcc_assert (!ARRAY[check_regno] \ + || !loc_mentioned_in_p (r->where, \ + ARRAY[check_regno])) CHECK_MODF (reg_equiv_constant); CHECK_MODF (reg_equiv_memory_loc); @@ -6011,8 +5995,8 @@ subst_reloads (rtx insn) *r->where = reloadreg; } /* If reload got no reg and isn't optional, something's wrong. */ - else if (! rld[r->what].optional) - abort (); + else + gcc_assert (rld[r->what].optional); } } @@ -6024,8 +6008,7 @@ copy_replacements (rtx x, rtx y) { /* We can't support X being a SUBREG because we might then need to know its location if something inside it was replaced. */ - if (GET_CODE (x) == SUBREG) - abort (); + gcc_assert (GET_CODE (x) != SUBREG); copy_replacements_1 (&x, &y, n_replacements); } @@ -6194,10 +6177,8 @@ refers_to_regno_for_reload_p (unsigned int regno, unsigned int endregno, reg_equiv_memory_loc[r], (rtx*) 0); - if (reg_equiv_constant[r]) - return 0; - - abort (); + gcc_assert (reg_equiv_constant[r]); + return 0; } return (endregno > r @@ -6322,9 +6303,8 @@ reg_overlap_mentioned_for_reload_p (rtx x, rtx in) { if (reg_equiv_memory_loc[regno]) return refers_to_mem_for_reload_p (in); - else if (reg_equiv_constant[regno]) - return 0; - abort (); + gcc_assert (reg_equiv_constant[regno]); + return 0; } } else if (MEM_P (x)) @@ -6332,8 +6312,10 @@ reg_overlap_mentioned_for_reload_p (rtx x, rtx in) else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC || GET_CODE (x) == CC0) return reg_mentioned_p (x, in); - else if (GET_CODE (x) == PLUS) + else { + gcc_assert (GET_CODE (x) == PLUS); + /* We actually want to know if X is mentioned somewhere inside IN. We must not say that (plus (sp) (const_int 124)) is in (plus (sp) (const_int 64)), since that can lead to incorrect reload @@ -6349,8 +6331,6 @@ reg_overlap_mentioned_for_reload_p (rtx x, rtx in) else return (reg_overlap_mentioned_for_reload_p (XEXP (x, 0), in) || reg_overlap_mentioned_for_reload_p (XEXP (x, 1), in)); } - else - abort (); endregno = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (x)] : 1); diff --git a/gcc/reload1.c b/gcc/reload1.c index 4dd1153dfd8..63f829fff77 100644 --- a/gcc/reload1.c +++ b/gcc/reload1.c @@ -536,8 +536,7 @@ compute_use_by_pseudos (HARD_REG_SET *to, regset from) BASIC_BLOCK->global_live_at_start, which might still contain registers that have not actually been allocated since they have an equivalence. */ - if (! reload_completed) - abort (); + gcc_assert (reload_completed); } else { @@ -584,11 +583,12 @@ replace_pseudos_in (rtx *loc, enum machine_mode mem_mode, rtx usage) *loc = reg_equiv_mem[regno]; else if (reg_equiv_address[regno]) *loc = gen_rtx_MEM (GET_MODE (x), reg_equiv_address[regno]); - else if (!REG_P (regno_reg_rtx[regno]) - || REGNO (regno_reg_rtx[regno]) != regno) - *loc = regno_reg_rtx[regno]; else - abort (); + { + gcc_assert (!REG_P (regno_reg_rtx[regno]) + || REGNO (regno_reg_rtx[regno]) != regno); + *loc = regno_reg_rtx[regno]; + } return; } @@ -1071,8 +1071,7 @@ reload (rtx first, int global) reload_as_needed (global); - if (old_frame_size != get_frame_size ()) - abort (); + gcc_assert (old_frame_size == get_frame_size ()); if (num_eliminable) verify_initial_elim_offsets (); @@ -1576,8 +1575,7 @@ count_pseudo (int reg) SET_REGNO_REG_SET (&pseudos_counted, reg); - if (r < 0) - abort (); + gcc_assert (r >= 0); spill_add_cost[r] += freq; @@ -1750,9 +1748,8 @@ find_reg (struct insn_chain *chain, int order) for (i = 0; i < rl->nregs; i++) { - if (spill_cost[best_reg + i] != 0 - || spill_add_cost[best_reg + i] != 0) - abort (); + gcc_assert (spill_cost[best_reg + i] == 0); + gcc_assert (spill_add_cost[best_reg + i] == 0); SET_HARD_REG_BIT (used_spill_regs_local, best_reg + i); } return 1; @@ -2555,7 +2552,7 @@ eliminate_regs (rtx x, enum machine_mode mem_mode, rtx insn) case CLOBBER: case ASM_OPERANDS: case SET: - abort (); + gcc_unreachable (); default: break; @@ -2867,13 +2864,12 @@ eliminate_regs_in_insn (rtx insn, int replace) if (! insn_is_asm && icode < 0) { - if (GET_CODE (PATTERN (insn)) == USE - || GET_CODE (PATTERN (insn)) == CLOBBER - || GET_CODE (PATTERN (insn)) == ADDR_VEC - || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC - || GET_CODE (PATTERN (insn)) == ASM_INPUT) - return 0; - abort (); + gcc_assert (GET_CODE (PATTERN (insn)) == USE + || GET_CODE (PATTERN (insn)) == CLOBBER + || GET_CODE (PATTERN (insn)) == ADDR_VEC + || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC + || GET_CODE (PATTERN (insn)) == ASM_INPUT); + return 0; } if (old_set != 0 && REG_P (SET_DEST (old_set)) @@ -3027,8 +3023,7 @@ eliminate_regs_in_insn (rtx insn, int replace) PATTERN (insn) = gen_rtx_PARALLEL (VOIDmode, vec); add_clobbers (PATTERN (insn), INSN_CODE (insn)); } - if (INSN_CODE (insn) < 0) - abort (); + gcc_assert (INSN_CODE (insn) >= 0); } /* If we have a nonzero offset, and the source is already a simple REG, the following transformation would @@ -3299,13 +3294,11 @@ verify_initial_elim_offsets (void) for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) { INITIAL_ELIMINATION_OFFSET (ep->from, ep->to, t); - if (t != ep->initial_offset) - abort (); + gcc_assert (t == ep->initial_offset); } #else INITIAL_FRAME_POINTER_OFFSET (t); - if (t != reg_eliminate[0].initial_offset) - abort (); + gcc_assert (t == reg_eliminate[0].initial_offset); #endif } @@ -3590,8 +3583,7 @@ finish_spills (int global) /* Record the current hard register the pseudo is allocated to in pseudo_previous_regs so we avoid reallocating it to the same hard reg in a later pass. */ - if (reg_renumber[i] < 0) - abort (); + gcc_assert (reg_renumber[i] >= 0); SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]); /* Mark it as no longer having a hard register home. */ @@ -3670,7 +3662,7 @@ finish_spills (int global) /* Make sure we only enlarge the set. */ GO_IF_HARD_REG_SUBSET (used_by_pseudos2, chain->used_spill_regs, ok); - abort (); + gcc_unreachable (); ok:; } } @@ -4265,7 +4257,7 @@ clear_reload_reg_in_use (unsigned int regno, int opnum, used_in_set = &reload_reg_used_in_insn; break; default: - abort (); + gcc_unreachable (); } /* We resolve conflicts with remaining reloads of the same type by excluding the intervals of reload registers by them from the @@ -4461,8 +4453,10 @@ reload_reg_free_p (unsigned int regno, int opnum, enum reload_type type) case RELOAD_FOR_OTHER_ADDRESS: return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno); + + default: + gcc_unreachable (); } - abort (); } /* Return 1 if the value in reload reg REGNO, as used by a reload @@ -4594,9 +4588,10 @@ reload_reg_reaches_end_p (unsigned int regno, int opnum, enum reload_type type) return 0; return 1; - } - abort (); + default: + gcc_unreachable (); + } } /* Return 1 if the reloads denoted by R1 and R2 cannot share a register. @@ -4671,7 +4666,7 @@ reloads_conflict (int r1, int r2) return 1; default: - abort (); + gcc_unreachable (); } } @@ -5591,17 +5586,16 @@ choose_reload_regs (struct insn_chain *chain) { if (REG_P (equiv)) regno = REGNO (equiv); - else if (GET_CODE (equiv) == SUBREG) + else { /* This must be a SUBREG of a hard register. Make a new REG since this might be used in an address and not all machines support SUBREGs there. */ + gcc_assert (GET_CODE (equiv) == SUBREG); regno = subreg_regno (equiv); equiv = gen_rtx_REG (rld[r].mode, regno); } - else - abort (); } /* If we found a spill reg, reject it unless it is free @@ -5805,15 +5799,13 @@ choose_reload_regs (struct insn_chain *chain) /* Some sanity tests to verify that the reloads found in the first pass are identical to the ones we have now. */ - if (chain->n_reloads != n_reloads) - abort (); + gcc_assert (chain->n_reloads == n_reloads); for (i = 0; i < n_reloads; i++) { if (chain->rld[i].regno < 0 || chain->rld[i].reg_rtx != 0) continue; - if (chain->rld[i].when_needed != rld[i].when_needed) - abort (); + gcc_assert (chain->rld[i].when_needed == rld[i].when_needed); for (j = 0; j < n_spills; j++) if (spill_regs[j] == chain->rld[i].regno) if (! set_reload_reg (j, i)) @@ -5926,10 +5918,9 @@ choose_reload_regs (struct insn_chain *chain) SET_HARD_REG_BIT (reg_is_output_reload, i + nr); } - if (rld[r].when_needed != RELOAD_OTHER - && rld[r].when_needed != RELOAD_FOR_OUTPUT - && rld[r].when_needed != RELOAD_FOR_INSN) - abort (); + gcc_assert (rld[r].when_needed == RELOAD_OTHER + || rld[r].when_needed == RELOAD_FOR_OUTPUT + || rld[r].when_needed == RELOAD_FOR_INSN); } } } @@ -6075,11 +6066,12 @@ merge_assigned_reloads (rtx insn) so abort. */ if (rld[j].reg_rtx) for (k = 0; k < j; k++) - if (rld[k].in != 0 && rld[k].reg_rtx != 0 - && rld[k].when_needed == rld[j].when_needed - && rtx_equal_p (rld[k].reg_rtx, rld[j].reg_rtx) - && ! rtx_equal_p (rld[k].in, rld[j].in)) - abort (); + gcc_assert (rld[k].in == 0 || rld[k].reg_rtx == 0 + || rld[k].when_needed != rld[j].when_needed + || !rtx_equal_p (rld[k].reg_rtx, + rld[j].reg_rtx) + || rtx_equal_p (rld[k].in, + rld[j].in)); } } } @@ -6293,7 +6285,7 @@ emit_input_reload_insns (struct insn_chain *chain, struct reload *rl, where = &other_input_address_reload_insns; break; default: - abort (); + gcc_unreachable (); } push_to_sequence (*where); @@ -6304,8 +6296,7 @@ emit_input_reload_insns (struct insn_chain *chain, struct reload *rl, /* We are not going to bother supporting the case where a incremented register can't be copied directly from OLDEQUIV since this seems highly unlikely. */ - if (rl->secondary_in_reload >= 0) - abort (); + gcc_assert (rl->secondary_in_reload < 0); if (reload_inherited[j]) oldequiv = reloadreg; @@ -6921,8 +6912,7 @@ do_output_reload (struct insn_chain *chain, struct reload *rl, int j) return; /* If is a JUMP_INSN, we can't support output reloads yet. */ - if (JUMP_P (insn)) - abort (); + gcc_assert (!JUMP_P (insn)); emit_output_reload_insns (chain, rld + j, j); } @@ -8066,8 +8056,7 @@ fixup_abnormal_edges (void) && !can_throw_internal (insn) && insn != BB_HEAD (bb)) insn = PREV_INSN (insn); - if (!CALL_P (insn) && !can_throw_internal (insn)) - abort (); + gcc_assert (CALL_P (insn) || can_throw_internal (insn)); BB_END (bb) = insn; inserted = true; insn = NEXT_INSN (insn); diff --git a/gcc/reorg.c b/gcc/reorg.c index d464a323958..fbaf968df73 100644 --- a/gcc/reorg.c +++ b/gcc/reorg.c @@ -253,7 +253,7 @@ stop_search_p (rtx insn, int labels_p) || asm_noperands (PATTERN (insn)) >= 0); default: - abort (); + gcc_unreachable (); } } @@ -564,8 +564,7 @@ emit_delay_sequence (rtx insn, rtx list, int length) if (had_barrier) emit_barrier_after (seq_insn); - if (i != length + 1) - abort (); + gcc_assert (i == length + 1); return seq_insn; } @@ -887,7 +886,7 @@ get_jump_flags (rtx insn, rtx label) break; default: - abort (); + gcc_unreachable (); } } else @@ -2549,9 +2548,8 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, int flags; /* Validate our arguments. */ - if ((condition == const_true_rtx && ! thread_if_true) - || (! own_thread && ! thread_if_true)) - abort (); + gcc_assert(condition != const_true_rtx || thread_if_true); + gcc_assert(own_thread || thread_if_true); flags = get_jump_flags (insn, JUMP_LABEL (insn)); @@ -2921,8 +2919,7 @@ fill_slots_from_thread (rtx insn, rtx condition, rtx thread, { rtx label; - if (! thread_if_true) - abort (); + gcc_assert (thread_if_true); if (new_thread && JUMP_P (new_thread) && (simplejump_p (new_thread) @@ -3228,8 +3225,7 @@ relax_delay_slots (rtx first) trial = PREV_INSN (insn); delete_related_insns (insn); - if (GET_CODE (pat) != SEQUENCE) - abort (); + gcc_assert (GET_CODE (pat) == SEQUENCE); after = trial; for (i = 0; i < XVECLEN (pat, 0); i++) { @@ -3347,8 +3343,7 @@ relax_delay_slots (rtx first) trial = PREV_INSN (insn); delete_related_insns (insn); - if (GET_CODE (pat) != SEQUENCE) - abort (); + gcc_assert (GET_CODE (pat) == SEQUENCE); after = trial; for (i = 0; i < XVECLEN (pat, 0); i++) { diff --git a/gcc/resource.c b/gcc/resource.c index 05ab25b63c4..7d6bbb759c5 100644 --- a/gcc/resource.c +++ b/gcc/resource.c @@ -233,8 +233,7 @@ mark_referenced_resources (rtx x, struct resources *res, unsigned int last_regno = regno + hard_regno_nregs[regno][GET_MODE (x)]; - if (last_regno > FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (last_regno <= FIRST_PSEUDO_REGISTER); for (r = regno; r < last_regno; r++) SET_HARD_REG_BIT (res->regs, r); } @@ -246,8 +245,7 @@ mark_referenced_resources (rtx x, struct resources *res, unsigned int last_regno = regno + hard_regno_nregs[regno][GET_MODE (x)]; - if (last_regno > FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (last_regno <= FIRST_PSEUDO_REGISTER); for (r = regno; r < last_regno; r++) SET_HARD_REG_BIT (res->regs, r); } @@ -340,8 +338,7 @@ mark_referenced_resources (rtx x, struct resources *res, { sequence = PATTERN (NEXT_INSN (insn)); seq_size = XVECLEN (sequence, 0); - if (GET_CODE (sequence) != SEQUENCE) - abort (); + gcc_assert (GET_CODE (sequence) == SEQUENCE); } res->memory = 1; @@ -771,8 +768,7 @@ mark_set_resources (rtx x, struct resources *res, int in_dest, unsigned int last_regno = regno + hard_regno_nregs[regno][GET_MODE (x)]; - if (last_regno > FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (last_regno <= FIRST_PSEUDO_REGISTER); for (r = regno; r < last_regno; r++) SET_HARD_REG_BIT (res->regs, r); } @@ -786,8 +782,7 @@ mark_set_resources (rtx x, struct resources *res, int in_dest, unsigned int last_regno = regno + hard_regno_nregs[regno][GET_MODE (x)]; - if (last_regno > FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (last_regno <= FIRST_PSEUDO_REGISTER); for (r = regno; r < last_regno; r++) SET_HARD_REG_BIT (res->regs, r); } diff --git a/gcc/rtl.c b/gcc/rtl.c index 8136514ea09..05075dd015a 100644 --- a/gcc/rtl.c +++ b/gcc/rtl.c @@ -296,7 +296,7 @@ copy_rtx (rtx orig) break; default: - abort (); + gcc_unreachable (); } } return copy; @@ -427,7 +427,7 @@ rtx_equal_p (rtx x, rtx y) contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: - abort (); + gcc_unreachable (); } } return 1; diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 91fe437a973..21879d63684 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -1123,8 +1123,8 @@ insn_dependent_p (rtx x, rtx y) { rtx tmp; - if (! INSN_P (x) || ! INSN_P (y)) - abort (); + gcc_assert (INSN_P (x)); + gcc_assert (INSN_P (y)); tmp = PATTERN (y); note_stores (PATTERN (x), insn_dependent_p_1, &tmp); @@ -1578,11 +1578,7 @@ reg_overlap_mentioned_p (rtx x, rtx in) } default: -#ifdef ENABLE_CHECKING - if (!CONSTANT_P (x)) - abort (); -#endif - + gcc_assert (CONSTANT_P (x)); return 0; } } @@ -1744,8 +1740,7 @@ dead_or_set_p (rtx insn, rtx x) if (GET_CODE (x) == CC0) return 1; - if (!REG_P (x)) - abort (); + gcc_assert (REG_P (x)); regno = REGNO (x); last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno @@ -1927,8 +1922,7 @@ find_reg_fusage (rtx insn, enum rtx_code code, rtx datum) if (!CALL_P (insn)) return 0; - if (! datum) - abort (); + gcc_assert (datum); if (!REG_P (datum)) { @@ -2040,7 +2034,7 @@ remove_note (rtx insn, rtx note) return; } - abort (); + gcc_unreachable (); } /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and @@ -2520,8 +2514,7 @@ replace_rtx (rtx x, rtx from, rtx to) x = simplify_subreg (GET_MODE (x), new, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); - if (! x) - abort (); + gcc_assert (x); } else SUBREG_REG (x) = new; @@ -2536,8 +2529,7 @@ replace_rtx (rtx x, rtx from, rtx to) { x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), new, GET_MODE (XEXP (x, 0))); - if (! x) - abort (); + gcc_assert (x); } else XEXP (x, 0) = new; @@ -3189,11 +3181,10 @@ subreg_lsb_1 (enum machine_mode outer_mode, if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN) /* If the subreg crosses a word boundary ensure that it also begins and ends on a word boundary. */ - if ((subreg_byte % UNITS_PER_WORD - + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD - && (subreg_byte % UNITS_PER_WORD - || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)) - abort (); + gcc_assert (!((subreg_byte % UNITS_PER_WORD + + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD + && (subreg_byte % UNITS_PER_WORD + || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD))); if (WORDS_BIG_ENDIAN) word = (GET_MODE_SIZE (inner_mode) @@ -3236,8 +3227,7 @@ subreg_regno_offset (unsigned int xregno, enum machine_mode xmode, int mode_multiple, nregs_multiple; int y_offset; - if (xregno >= FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (xregno < FIRST_PSEUDO_REGISTER); nregs_xmode = hard_regno_nregs[xregno][xmode]; nregs_ymode = hard_regno_nregs[xregno][ymode]; @@ -3256,8 +3246,7 @@ subreg_regno_offset (unsigned int xregno, enum machine_mode xmode, /* size of ymode must not be greater than the size of xmode. */ mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode); - if (mode_multiple == 0) - abort (); + gcc_assert (mode_multiple != 0); y_offset = offset / GET_MODE_SIZE (ymode); nregs_multiple = nregs_xmode / nregs_ymode; @@ -3279,8 +3268,7 @@ subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode, int mode_multiple, nregs_multiple; int y_offset; - if (xregno >= FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (xregno < FIRST_PSEUDO_REGISTER); nregs_xmode = hard_regno_nregs[xregno][xmode]; nregs_ymode = hard_regno_nregs[xregno][ymode]; @@ -3296,15 +3284,12 @@ subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode, if (offset == subreg_lowpart_offset (ymode, xmode)) return true; -#ifdef ENABLE_CHECKING /* This should always pass, otherwise we don't know how to verify the constraint. These conditions may be relaxed but subreg_offset would need to be redesigned. */ - if (GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode) - || GET_MODE_SIZE (ymode) % nregs_ymode - || nregs_xmode % nregs_ymode) - abort (); -#endif + gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0); + gcc_assert ((GET_MODE_SIZE (ymode) % nregs_ymode) == 0); + gcc_assert ((nregs_xmode % nregs_ymode) == 0); /* The XMODE value can be seen as a vector of NREGS_XMODE values. The subreg must represent a lowpart of given field. @@ -3316,16 +3301,14 @@ subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode, /* size of ymode must not be greater than the size of xmode. */ mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode); - if (mode_multiple == 0) - abort (); + gcc_assert (mode_multiple != 0); y_offset = offset / GET_MODE_SIZE (ymode); nregs_multiple = nregs_xmode / nregs_ymode; -#ifdef ENABLE_CHECKING - if (offset % GET_MODE_SIZE (ymode) - || mode_multiple % nregs_multiple) - abort (); -#endif + + gcc_assert ((offset % GET_MODE_SIZE (ymode)) == 0); + gcc_assert ((mode_multiple % nregs_multiple) == 0); + return (!(y_offset % (mode_multiple / nregs_multiple))); } @@ -3380,8 +3363,7 @@ find_first_parameter_load (rtx call_insn, rtx boundary) if (GET_CODE (XEXP (p, 0)) == USE && REG_P (XEXP (XEXP (p, 0), 0))) { - if (REGNO (XEXP (XEXP (p, 0), 0)) >= FIRST_PSEUDO_REGISTER) - abort (); + gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER); /* We only care about registers which can hold function arguments. */ @@ -3409,8 +3391,7 @@ find_first_parameter_load (rtx call_insn, rtx boundary) CODE_LABEL. */ if (LABEL_P (before)) { - if (before != boundary) - abort (); + gcc_assert (before == boundary); break; } @@ -3571,7 +3552,7 @@ can_hoist_insn_p (rtx insn, rtx val, regset live) } break; default: - abort (); + gcc_unreachable (); } return true; } @@ -3603,8 +3584,7 @@ hoist_update_store (rtx insn, rtx *xp, rtx val, rtx new) x = *xp; } - if (!REG_P (x)) - abort (); + gcc_assert (REG_P (x)); /* We've verified that hard registers are dead, so we may keep the side effect. Otherwise replace it by new pseudo. */ @@ -3623,6 +3603,7 @@ hoist_insn_after (rtx insn, rtx after, rtx val, rtx new) rtx pat; int i; rtx note; + int applied; insn = emit_copy_of_insn_after (insn, after); pat = PATTERN (insn); @@ -3673,10 +3654,10 @@ hoist_insn_after (rtx insn, rtx after, rtx val, rtx new) } break; default: - abort (); + gcc_unreachable (); } - if (!apply_change_group ()) - abort (); + applied = apply_change_group (); + gcc_assert (applied); return insn; } @@ -3688,8 +3669,7 @@ hoist_insn_to_edge (rtx insn, edge e, rtx val, rtx new) /* We cannot insert instructions on an abnormal critical edge. It will be easier to find the culprit if we die now. */ - if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)) - abort (); + gcc_assert (!(e->flags & EDGE_ABNORMAL) || !EDGE_CRITICAL_P (e)); /* Do not use emit_insn_on_edge as we want to preserve notes and similar stuff. We also emit CALL_INSNS and firends. */ @@ -4178,7 +4158,7 @@ nonzero_bits1 (rtx x, enum machine_mode mode, rtx known_x, result_low = MIN (low0, low1); break; default: - abort (); + gcc_unreachable (); } if (result_width < mode_width) diff --git a/gcc/rtlhooks.c b/gcc/rtlhooks.c index 5cb14efd140..49d4cfe7c59 100644 --- a/gcc/rtlhooks.c +++ b/gcc/rtlhooks.c @@ -47,15 +47,16 @@ gen_lowpart_general (enum machine_mode mode, rtx x) { /* Must be a hard reg that's not valid in MODE. */ result = gen_lowpart_common (mode, copy_to_reg (x)); - if (result == 0) - abort (); + gcc_assert (result != 0); return result; } - else if (MEM_P (x)) + else { - /* The only additional case we can do is MEM. */ int offset = 0; + /* The only additional case we can do is MEM. */ + gcc_assert (MEM_P (x)); + /* The following exposes the use of "x" to CSE. */ if (GET_MODE_SIZE (GET_MODE (x)) <= UNITS_PER_WORD && SCALAR_INT_MODE_P (GET_MODE (x)) @@ -76,8 +77,6 @@ gen_lowpart_general (enum machine_mode mode, rtx x) return adjust_address (x, mode, offset); } - else - abort (); } rtx diff --git a/gcc/sbitmap.c b/gcc/sbitmap.c index dfd764528cb..f1d9c86ee30 100644 --- a/gcc/sbitmap.c +++ b/gcc/sbitmap.c @@ -301,8 +301,7 @@ sbitmap_difference (sbitmap dst, sbitmap a, sbitmap b) sbitmap_ptr bp = b->elms; /* A should be at least as large as DEST, to have a defined source. */ - if (a->size < dst_size) - abort (); + gcc_assert (a->size >= dst_size); /* If minuend is smaller, we simply pretend it to be zero bits, i.e. only copy the subtrahend into dest. */ if (b->size < min_size) diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c index b76e217df81..6e68bde56bd 100644 --- a/gcc/sched-deps.c +++ b/gcc/sched-deps.c @@ -252,8 +252,8 @@ add_dependence (rtx insn, rtx elem, enum reg_note dep_type) { enum reg_note present_dep_type = 0; - if (anti_dependency_cache == NULL || output_dependency_cache == NULL) - abort (); + gcc_assert (anti_dependency_cache); + gcc_assert (output_dependency_cache); if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem))) /* Do nothing (present_set_type is already 0). */ @@ -281,15 +281,21 @@ add_dependence (rtx insn, rtx elem, enum reg_note dep_type) may be changed. */ if (true_dependency_cache != NULL) { - if (REG_NOTE_KIND (link) == REG_DEP_ANTI) - bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)], - INSN_LUID (elem)); - else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT - && output_dependency_cache) - bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)], - INSN_LUID (elem)); - else - abort (); + enum reg_note kind = REG_NOTE_KIND (link); + switch (kind) + { + case REG_DEP_ANTI: + bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)], + INSN_LUID (elem)); + break; + case REG_DEP_OUTPUT: + gcc_assert (output_dependency_cache); + bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)], + INSN_LUID (elem)); + break; + default: + gcc_unreachable (); + } } #endif @@ -518,9 +524,8 @@ sched_analyze_1 (struct deps *deps, rtx x, rtx insn) purpose already. */ else if (regno >= deps->max_reg) { - if (GET_CODE (PATTERN (insn)) != USE - && GET_CODE (PATTERN (insn)) != CLOBBER) - abort (); + gcc_assert (GET_CODE (PATTERN (insn)) == USE + || GET_CODE (PATTERN (insn)) == CLOBBER); } else { @@ -659,9 +664,8 @@ sched_analyze_2 (struct deps *deps, rtx x, rtx insn) purpose already. */ else if (regno >= deps->max_reg) { - if (GET_CODE (PATTERN (insn)) != USE - && GET_CODE (PATTERN (insn)) != CLOBBER) - abort (); + gcc_assert (GET_CODE (PATTERN (insn)) == USE + || GET_CODE (PATTERN (insn)) == CLOBBER); } else { @@ -1363,7 +1367,7 @@ sched_analyze (struct deps *deps, rtx head, rtx tail) return; } } - abort (); + gcc_unreachable (); } @@ -1382,14 +1386,15 @@ add_forward_dependence (rtx from, rtx to, enum reg_note dep_type) However, if we have enabled checking we might as well go ahead and verify that add_dependence worked properly. */ - if (NOTE_P (from) - || INSN_DELETED_P (from) - || (forward_dependency_cache != NULL - && bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)], - INSN_LUID (to))) - || (forward_dependency_cache == NULL - && find_insn_list (to, INSN_DEPEND (from)))) - abort (); + gcc_assert (!NOTE_P (from)); + gcc_assert (!INSN_DELETED_P (from)); + if (forward_dependency_cache) + gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)], + INSN_LUID (to))); + else + gcc_assert (!find_insn_list (to, INSN_DEPEND (from))); + + /* ??? If bitmap_bit_p is a predicate, what is this supposed to do? */ if (forward_dependency_cache != NULL) bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)], INSN_LUID (to)); diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c index 6cbdc0124b3..2344d1e9de4 100644 --- a/gcc/sched-ebb.c +++ b/gcc/sched-ebb.c @@ -240,8 +240,7 @@ fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, for (; insn != aftertail; insn = NEXT_INSN (insn)) { - if (LABEL_P (insn)) - abort (); + gcc_assert (!LABEL_P (insn)); /* Create new basic blocks just before first insn. */ if (inside_basic_block_p (insn)) { @@ -542,8 +541,7 @@ schedule_ebb (rtx head, rtx tail) schedule_block (-1, n_insns); /* Sanity check: verify that all region insns were scheduled. */ - if (sched_n_insns != n_insns) - abort (); + gcc_assert (sched_n_insns == n_insns); head = current_sched_info->head; tail = current_sched_info->tail; diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index f2e5773e797..9bd6d527feb 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -517,9 +517,7 @@ debug_regions (void) { current_blocks = RGN_BLOCKS (rgn); - if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb))) - abort (); - + gcc_assert (bb == BLOCK_TO_BB (BB_TO_BLOCK (bb))); fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb)); } @@ -1211,8 +1209,7 @@ compute_trg_info (int trg) sp->update_bbs.nr_members = update_idx; /* Make sure we didn't overrun the end of bblst_table. */ - if (bblst_last > bblst_size) - abort (); + gcc_assert (bblst_last <= bblst_size); } else { @@ -2501,8 +2498,7 @@ schedule_region (int rgn) } /* Sanity check: verify that all region insns were scheduled. */ - if (sched_rgn_n_insns != rgn_n_insns) - abort (); + gcc_assert (sched_rgn_n_insns == rgn_n_insns); /* Restore line notes. */ if (write_symbols != NO_DEBUG) @@ -2708,9 +2704,8 @@ schedule_insns (FILE *dump_file) sbitmap_zero (blocks); SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]); - if (deaths_in_region[rgn] - != count_or_remove_death_notes (blocks, 0)) - abort (); + gcc_assert (deaths_in_region[rgn] + == count_or_remove_death_notes (blocks, 0)); } free (deaths_in_region); } @@ -2733,10 +2728,7 @@ schedule_insns (FILE *dump_file) nr_inter, nr_spec); } else - { - if (nr_inter > 0) - abort (); - } + gcc_assert (nr_inter <= 0); fprintf (sched_dump, "\n\n"); } diff --git a/gcc/sched-vis.c b/gcc/sched-vis.c index c7c5427b863..aa228317ba0 100644 --- a/gcc/sched-vis.c +++ b/gcc/sched-vis.c @@ -566,8 +566,7 @@ print_pattern (char *buf, rtx x, int verbose) break; case SEQUENCE: /* Should never see SEQUENCE codes until after reorg. */ - abort (); - break; + gcc_unreachable (); case ASM_INPUT: sprintf (buf, "asm {%s}", XSTR (x, 0)); break; diff --git a/gcc/sdbout.c b/gcc/sdbout.c index ef254888f35..8689cc09be4 100644 --- a/gcc/sdbout.c +++ b/gcc/sdbout.c @@ -758,7 +758,7 @@ sdbout_symbol (tree decl, int local) case PARM_DECL: /* Parm decls go in their own separate chains and are output by sdbout_reg_parms and sdbout_parms. */ - abort (); + gcc_unreachable (); case VAR_DECL: /* Don't mention a variable that is external. @@ -942,10 +942,9 @@ sdbout_toplevel_data (tree decl) if (DECL_IGNORED_P (decl)) return; - if (! (TREE_CODE (decl) == VAR_DECL - && MEM_P (DECL_RTL (decl)) - && DECL_INITIAL (decl))) - abort (); + gcc_assert (TREE_CODE (decl) == VAR_DECL); + gcc_assert (MEM_P (DECL_RTL (decl))); + gcc_assert (DECL_INITIAL (decl)); PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))); PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0)); diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 285f898de80..eec2a5816a3 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -360,16 +360,15 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, if (code == VEC_DUPLICATE) { - if (!VECTOR_MODE_P (mode)) - abort (); - if (GET_MODE (trueop) != VOIDmode - && !VECTOR_MODE_P (GET_MODE (trueop)) - && GET_MODE_INNER (mode) != GET_MODE (trueop)) - abort (); - if (GET_MODE (trueop) != VOIDmode - && VECTOR_MODE_P (GET_MODE (trueop)) - && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop))) - abort (); + gcc_assert (VECTOR_MODE_P (mode)); + if (GET_MODE (trueop) != VOIDmode) + { + if (!VECTOR_MODE_P (GET_MODE (trueop))) + gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop)); + else + gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER + (GET_MODE (trueop))); + } if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_VECTOR) { @@ -387,8 +386,8 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode)); unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size); - if (in_n_elts >= n_elts || n_elts % in_n_elts) - abort (); + gcc_assert (in_n_elts < n_elts); + gcc_assert ((n_elts % in_n_elts) == 0); for (i = 0; i < n_elts; i++) RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts); } @@ -408,9 +407,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, rtvec v = rtvec_alloc (n_elts); unsigned int i; - if (op_n_elts != n_elts) - abort (); - + gcc_assert (op_n_elts == n_elts); for (i = 0; i < n_elts; i++) { rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode), @@ -541,15 +538,13 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, case ZERO_EXTEND: /* When zero-extending a CONST_INT, we need to know its original mode. */ - if (op_mode == VOIDmode) - abort (); + gcc_assert (op_mode != VOIDmode); if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) { /* If we were really extending the mode, we would have to distinguish between zero-extension and sign-extension. */ - if (width != GET_MODE_BITSIZE (op_mode)) - abort (); + gcc_assert (width == GET_MODE_BITSIZE (op_mode)); val = arg0; } else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) @@ -566,8 +561,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, /* If we were really extending the mode, we would have to distinguish between zero-extension and sign-extension. */ - if (width != GET_MODE_BITSIZE (op_mode)) - abort (); + gcc_assert (width == GET_MODE_BITSIZE (op_mode)); val = arg0; } else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) @@ -590,7 +584,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, return 0; default: - abort (); + gcc_unreachable (); } val = trunc_int_for_mode (val, mode); @@ -690,8 +684,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, break; case ZERO_EXTEND: - if (op_mode == VOIDmode) - abort (); + gcc_assert (op_mode != VOIDmode); if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) return 0; @@ -766,7 +759,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, real_from_target (&d, tmp, mode); } default: - abort (); + gcc_unreachable (); } return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } @@ -864,7 +857,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode, break; default: - abort (); + gcc_unreachable (); } return immed_double_const (xl, xh, mode); } @@ -1175,16 +1168,12 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, rtx trueop0, trueop1; rtx tem; -#ifdef ENABLE_CHECKING /* Relational operations don't work here. We must know the mode of the operands in order to do the comparison correctly. Assuming a full word can give incorrect results. Consider comparing 128 with -128 in QImode. */ - - if (GET_RTX_CLASS (code) == RTX_COMPARE - || GET_RTX_CLASS (code) == RTX_COMM_COMPARE) - abort (); -#endif + gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE); + gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE); /* Make sure the constant is second. */ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH @@ -1211,9 +1200,8 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, rtvec v = rtvec_alloc (n_elts); unsigned int i; - if (op0_n_elts != n_elts || op1_n_elts != n_elts) - abort (); - + gcc_assert (op0_n_elts == n_elts); + gcc_assert (op1_n_elts == n_elts); for (i = 0; i < n_elts; i++) { rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode), @@ -1247,14 +1235,20 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, GET_MODE (op1)); for (i = 0; i < 4; i++) { - if (code == AND) + switch (code) + { + case AND: tmp0[i] &= tmp1[i]; - else if (code == IOR) + break; + case IOR: tmp0[i] |= tmp1[i]; - else if (code == XOR) + break; + case XOR: tmp0[i] ^= tmp1[i]; - else - abort (); + break; + default: + gcc_unreachable (); + } } real_from_target (&r, tmp0, mode); return CONST_DOUBLE_FROM_REAL_VALUE (r, mode); @@ -2142,24 +2136,22 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, case VEC_SELECT: if (!VECTOR_MODE_P (mode)) { - if (!VECTOR_MODE_P (GET_MODE (trueop0)) - || (mode - != GET_MODE_INNER (GET_MODE (trueop0))) - || GET_CODE (trueop1) != PARALLEL - || XVECLEN (trueop1, 0) != 1 - || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT) - abort (); + gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0))); + gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0))); + gcc_assert (GET_CODE (trueop1) == PARALLEL); + gcc_assert (XVECLEN (trueop1, 0) == 1); + gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT); if (GET_CODE (trueop0) == CONST_VECTOR) - return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0))); + return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP + (trueop1, 0, 0))); } else { - if (!VECTOR_MODE_P (GET_MODE (trueop0)) - || (GET_MODE_INNER (mode) - != GET_MODE_INNER (GET_MODE (trueop0))) - || GET_CODE (trueop1) != PARALLEL) - abort (); + gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0))); + gcc_assert (GET_MODE_INNER (mode) + == GET_MODE_INNER (GET_MODE (trueop0))); + gcc_assert (GET_CODE (trueop1) == PARALLEL); if (GET_CODE (trueop0) == CONST_VECTOR) { @@ -2168,15 +2160,14 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, rtvec v = rtvec_alloc (n_elts); unsigned int i; - if (XVECLEN (trueop1, 0) != (int) n_elts) - abort (); + gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts); for (i = 0; i < n_elts; i++) { rtx x = XVECEXP (trueop1, 0, i); - if (GET_CODE (x) != CONST_INT) - abort (); - RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x)); + gcc_assert (GET_CODE (x) == CONST_INT); + RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, + INTVAL (x)); } return gen_rtx_CONST_VECTOR (mode, v); @@ -2192,24 +2183,21 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, ? GET_MODE (trueop1) : GET_MODE_INNER (mode)); - if (!VECTOR_MODE_P (mode) - || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode) - != GET_MODE_SIZE (mode))) - abort (); - - if ((VECTOR_MODE_P (op0_mode) - && (GET_MODE_INNER (mode) - != GET_MODE_INNER (op0_mode))) - || (!VECTOR_MODE_P (op0_mode) - && GET_MODE_INNER (mode) != op0_mode)) - abort (); - - if ((VECTOR_MODE_P (op1_mode) - && (GET_MODE_INNER (mode) - != GET_MODE_INNER (op1_mode))) - || (!VECTOR_MODE_P (op1_mode) - && GET_MODE_INNER (mode) != op1_mode)) - abort (); + gcc_assert (VECTOR_MODE_P (mode)); + gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode) + == GET_MODE_SIZE (mode)); + + if (VECTOR_MODE_P (op0_mode)) + gcc_assert (GET_MODE_INNER (mode) + == GET_MODE_INNER (op0_mode)); + else + gcc_assert (GET_MODE_INNER (mode) == op0_mode); + + if (VECTOR_MODE_P (op1_mode)) + gcc_assert (GET_MODE_INNER (mode) + == GET_MODE_INNER (op1_mode)); + else + gcc_assert (GET_MODE_INNER (mode) == op1_mode); if ((GET_CODE (trueop0) == CONST_VECTOR || GET_CODE (trueop0) == CONST_INT @@ -2251,7 +2239,7 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, return 0; default: - abort (); + gcc_unreachable (); } return 0; @@ -2413,7 +2401,7 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode, return 0; default: - abort (); + gcc_unreachable (); } val = trunc_int_for_mode (val, mode); @@ -2799,10 +2787,9 @@ simplify_const_relational_operation (enum rtx_code code, rtx trueop0; rtx trueop1; - if (mode == VOIDmode - && (GET_MODE (op0) != VOIDmode - || GET_MODE (op1) != VOIDmode)) - abort (); + gcc_assert (mode != VOIDmode + || (GET_MODE (op0) == VOIDmode + && GET_MODE (op1) == VOIDmode)); /* If op0 is a compare, extract the comparison arguments from it. */ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) @@ -3101,7 +3088,7 @@ simplify_const_relational_operation (enum rtx_code code, case UNORDERED: return const0_rtx; default: - abort (); + gcc_unreachable (); } } @@ -3240,10 +3227,9 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, break; case VEC_MERGE: - if (GET_MODE (op0) != mode - || GET_MODE (op1) != mode - || !VECTOR_MODE_P (mode)) - abort (); + gcc_assert (GET_MODE (op0) == mode); + gcc_assert (GET_MODE (op1) == mode); + gcc_assert (VECTOR_MODE_P (mode)); op2 = avoid_constant_pool_reference (op2); if (GET_CODE (op2) == CONST_INT) { @@ -3274,7 +3260,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, break; default: - abort (); + gcc_unreachable (); } return 0; @@ -3328,11 +3314,10 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, elems = &op; elem_bitsize = max_bitsize; } - - if (BITS_PER_UNIT % value_bit != 0) - abort (); /* Too complicated; reducing value_bit may help. */ - if (elem_bitsize % BITS_PER_UNIT != 0) - abort (); /* I don't know how to handle endianness of sub-units. */ + /* If this asserts, it is too complicated; reducing value_bit may help. */ + gcc_assert (BITS_PER_UNIT % value_bit == 0); + /* I don't know how to handle endianness of sub-units. */ + gcc_assert (elem_bitsize % BITS_PER_UNIT == 0); for (elem = 0; elem < num_elem; elem++) { @@ -3369,8 +3354,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, { /* If this triggers, someone should have generated a CONST_INT instead. */ - if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) - abort (); + gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT); for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit) *vp++ = CONST_DOUBLE_LOW (el) >> i; @@ -3385,15 +3369,14 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, for (; i < max_bitsize; i += value_bit) *vp++ = 0; } - else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT) + else { long tmp[max_bitsize / 32]; int bitsize = GET_MODE_BITSIZE (GET_MODE (el)); - - if (bitsize > elem_bitsize) - abort (); - if (bitsize % value_bit != 0) - abort (); + + gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT); + gcc_assert (bitsize <= elem_bitsize); + gcc_assert (bitsize % value_bit == 0); real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el), GET_MODE (el)); @@ -3417,12 +3400,10 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, for (; i < elem_bitsize; i += value_bit) *vp++ = 0; } - else - abort (); break; default: - abort (); + gcc_unreachable (); } } @@ -3442,8 +3423,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, /* BYTE should still be inside OP. (Note that BYTE is unsigned, so if it's become negative it will instead be very large.) */ - if (byte >= GET_MODE_SIZE (innermode)) - abort (); + gcc_assert (byte < GET_MODE_SIZE (innermode)); /* Convert from bytes to chunks of size value_bit. */ value_start = byte * (BITS_PER_UNIT / value_bit); @@ -3467,10 +3447,8 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, outer_class = GET_MODE_CLASS (outer_submode); elem_bitsize = GET_MODE_BITSIZE (outer_submode); - if (elem_bitsize % value_bit != 0) - abort (); - if (elem_bitsize + value_start * value_bit > max_bitsize) - abort (); + gcc_assert (elem_bitsize % value_bit == 0); + gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize); for (elem = 0; elem < num_elem; elem++) { @@ -3540,7 +3518,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, break; default: - abort (); + gcc_unreachable (); } } if (VECTOR_MODE_P (outermode)) @@ -3556,17 +3534,16 @@ simplify_subreg (enum machine_mode outermode, rtx op, enum machine_mode innermode, unsigned int byte) { /* Little bit of sanity checking. */ - if (innermode == VOIDmode || outermode == VOIDmode - || innermode == BLKmode || outermode == BLKmode) - abort (); + gcc_assert (innermode != VOIDmode); + gcc_assert (outermode != VOIDmode); + gcc_assert (innermode != BLKmode); + gcc_assert (outermode != BLKmode); - if (GET_MODE (op) != innermode - && GET_MODE (op) != VOIDmode) - abort (); + gcc_assert (GET_MODE (op) == innermode + || GET_MODE (op) == VOIDmode); - if (byte % GET_MODE_SIZE (outermode) - || byte >= GET_MODE_SIZE (innermode)) - abort (); + gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0); + gcc_assert (byte < GET_MODE_SIZE (innermode)); if (outermode == innermode && !byte) return op; @@ -3772,17 +3749,16 @@ simplify_gen_subreg (enum machine_mode outermode, rtx op, { rtx newx; /* Little bit of sanity checking. */ - if (innermode == VOIDmode || outermode == VOIDmode - || innermode == BLKmode || outermode == BLKmode) - abort (); + gcc_assert (innermode != VOIDmode); + gcc_assert (outermode != VOIDmode); + gcc_assert (innermode != BLKmode); + gcc_assert (outermode != BLKmode); - if (GET_MODE (op) != innermode - && GET_MODE (op) != VOIDmode) - abort (); + gcc_assert (GET_MODE (op) == innermode + || GET_MODE (op) == VOIDmode); - if (byte % GET_MODE_SIZE (outermode) - || byte >= GET_MODE_SIZE (innermode)) - abort (); + gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0); + gcc_assert (byte < GET_MODE_SIZE (innermode)); newx = simplify_subreg (outermode, op, innermode, byte); if (newx) diff --git a/gcc/sreal.c b/gcc/sreal.c index 8980659c99b..662d27d5548 100644 --- a/gcc/sreal.c +++ b/gcc/sreal.c @@ -94,17 +94,12 @@ copy (sreal *r, sreal *a) static inline void shift_right (sreal *x, int s) { -#ifdef ENABLE_CHECKING - if (s <= 0 || s > SREAL_BITS) - abort (); - if (x->exp + s > SREAL_MAX_EXP) - { - /* Exponent should never be so large because shift_right is used only by - sreal_add and sreal_sub ant thus the number cannot be shifted out from - exponent range. */ - abort (); - } -#endif + gcc_assert (s > 0); + gcc_assert (s <= SREAL_BITS); + /* Exponent should never be so large because shift_right is used only by + sreal_add and sreal_sub ant thus the number cannot be shifted out from + exponent range. */ + gcc_assert (x->exp + s <= SREAL_MAX_EXP); x->exp += s; @@ -401,10 +396,7 @@ sreal_sub (sreal *r, sreal *a, sreal *b) sreal tmp; sreal *bb; - if (sreal_compare (a, b) < 0) - { - abort (); - } + gcc_assert (sreal_compare (a, b) >= 0); dexp = a->exp - b->exp; r->exp = a->exp; @@ -509,11 +501,8 @@ sreal_div (sreal *r, sreal *a, sreal *b) #if SREAL_PART_BITS < 32 unsigned HOST_WIDE_INT tmp, tmp1, tmp2; - if (b->sig_hi < SREAL_MIN_SIG) - { - abort (); - } - else if (a->sig_hi < SREAL_MIN_SIG) + gcc_assert (b->sig_hi >= SREAL_MIN_SIG); + if (a->sig_hi < SREAL_MIN_SIG) { r->sig_hi = 0; r->sig_lo = 0; @@ -546,16 +535,10 @@ sreal_div (sreal *r, sreal *a, sreal *b) normalize (r); } #else - if (b->sig == 0) - { - abort (); - } - else - { - r->sig = (a->sig << SREAL_PART_BITS) / b->sig; - r->exp = a->exp - b->exp - SREAL_PART_BITS; - normalize (r); - } + gcc_assert (b->sig != 0); + r->sig = (a->sig << SREAL_PART_BITS) / b->sig; + r->exp = a->exp - b->exp - SREAL_PART_BITS; + normalize (r); #endif return r; } diff --git a/gcc/stmt.c b/gcc/stmt.c index 02e6664b82c..dc2ca0d3b1b 100644 --- a/gcc/stmt.c +++ b/gcc/stmt.c @@ -132,8 +132,7 @@ static struct case_node *add_case_node (struct case_node *, tree, tree, tree); rtx label_rtx (tree label) { - if (TREE_CODE (label) != LABEL_DECL) - abort (); + gcc_assert (TREE_CODE (label) == LABEL_DECL); if (!DECL_RTL_SET_P (label)) { @@ -155,8 +154,7 @@ force_label_rtx (tree label) tree function = decl_function_context (label); struct function *p; - if (!function) - abort (); + gcc_assert (function); if (function != current_function_decl) p = find_function_data (function); @@ -241,8 +239,7 @@ expand_goto (tree label) /* Check for a nonlocal goto to a containing function. Should have gotten translated to __builtin_nonlocal_goto. */ tree context = decl_function_context (label); - if (context != 0 && context != current_function_decl) - abort (); + gcc_assert (!context || context == current_function_decl); #endif emit_jump (label_rtx (label)); @@ -785,11 +782,12 @@ expand_asm_operands (tree string, tree outputs, tree inputs, bool allows_reg; bool allows_mem; rtx op; + bool ok; - if (!parse_output_constraint (&constraints[i], i, ninputs, + ok = parse_output_constraint (&constraints[i], i, ninputs, noutputs, &allows_mem, &allows_reg, - &is_inout)) - abort (); + &is_inout); + gcc_assert (ok); /* If an output operand is not a decl or indirect ref and our constraint allows a register, make a temporary to act as an intermediate. @@ -866,11 +864,12 @@ expand_asm_operands (tree string, tree outputs, tree inputs, const char *constraint; tree val, type; rtx op; + bool ok; constraint = constraints[i + noutputs]; - if (! parse_input_constraint (&constraint, i, ninputs, noutputs, ninout, - constraints, &allows_mem, &allows_reg)) - abort (); + ok = parse_input_constraint (&constraint, i, ninputs, noutputs, ninout, + constraints, &allows_mem, &allows_reg); + gcc_assert (ok); generating_concat_p = 0; @@ -1330,8 +1329,7 @@ resolve_operand_name_1 (char *p, tree outputs, tree inputs) p = strchr (p, '\0'); /* Verify the no extra buffer space assumption. */ - if (p > q) - abort (); + gcc_assert (p <= q); /* Shift the rest of the buffer down to fill the gap. */ memmove (p, q + 1, strlen (q + 1) + 1); @@ -1733,9 +1731,8 @@ expand_return (tree retval) if (GET_MODE_SIZE (tmpmode) >= bytes) break; - /* No suitable mode found. */ - if (tmpmode == VOIDmode) - abort (); + /* A suitable mode should have been found. */ + gcc_assert (tmpmode != VOIDmode); PUT_MODE (result_rtl, tmpmode); } @@ -1974,9 +1971,8 @@ expand_decl (tree decl) to the proper address. */ if (DECL_RTL_SET_P (decl)) { - if (!MEM_P (DECL_RTL (decl)) - || !REG_P (XEXP (DECL_RTL (decl), 0))) - abort (); + gcc_assert (MEM_P (DECL_RTL (decl))); + gcc_assert (REG_P (XEXP (DECL_RTL (decl), 0))); oldaddr = XEXP (DECL_RTL (decl), 0); } @@ -2122,6 +2118,7 @@ expand_anon_union_decl (tree decl, tree cleanup ATTRIBUTE_UNUSED, { tree decl_elt = TREE_VALUE (t); enum machine_mode mode = TYPE_MODE (TREE_TYPE (decl_elt)); + rtx decl_rtl; /* If any of the elements are addressable, so is the entire union. */ @@ -2139,24 +2136,18 @@ expand_anon_union_decl (tree decl, tree cleanup ATTRIBUTE_UNUSED, DECL_MODE (decl_elt) = mode = mode_for_size_tree (DECL_SIZE (decl_elt), MODE_INT, 1); - /* (SUBREG (MEM ...)) at RTL generation time is invalid, so we - instead create a new MEM rtx with the proper mode. */ - if (MEM_P (x)) + if (mode == GET_MODE (x)) + decl_rtl = x; + else if (MEM_P (x)) + /* (SUBREG (MEM ...)) at RTL generation time is invalid, so we + instead create a new MEM rtx with the proper mode. */ + decl_rtl = adjust_address_nv (x, mode, 0); + else { - if (mode == GET_MODE (x)) - SET_DECL_RTL (decl_elt, x); - else - SET_DECL_RTL (decl_elt, adjust_address_nv (x, mode, 0)); + gcc_assert (REG_P (x)); + decl_rtl = gen_lowpart_SUBREG (mode, x); } - else if (REG_P (x)) - { - if (mode == GET_MODE (x)) - SET_DECL_RTL (decl_elt, x); - else - SET_DECL_RTL (decl_elt, gen_lowpart_SUBREG (mode, x)); - } - else - abort (); + SET_DECL_RTL (decl_elt, decl_rtl); } } @@ -2280,10 +2271,9 @@ emit_case_bit_tests (tree index_type, tree index_expr, tree minval, if (i == count) { - if (count >= MAX_CASE_BIT_TESTS) - abort (); - test[i].hi = 0; - test[i].lo = 0; + gcc_assert (count < MAX_CASE_BIT_TESTS); + test[i].hi = 0; + test[i].lo = 0; test[i].label = label; test[i].bits = 1; count++; @@ -2378,8 +2368,8 @@ expand_case (tree exp) /* The switch body is lowered in gimplify.c, we should never have switches with a non-NULL SWITCH_BODY here. */ - if (SWITCH_BODY (exp) || !SWITCH_LABELS (exp)) - abort (); + gcc_assert (!SWITCH_BODY (exp)); + gcc_assert (SWITCH_LABELS (exp)); for (i = TREE_VEC_LENGTH (vec); --i >= 0; ) { @@ -2388,15 +2378,12 @@ expand_case (tree exp) /* Handle default labels specially. */ if (!CASE_HIGH (elt) && !CASE_LOW (elt)) { -#ifdef ENABLE_CHECKING - if (default_label_decl != 0) - abort (); -#endif - default_label_decl = CASE_LABEL (elt); + gcc_assert (!default_label_decl); + default_label_decl = CASE_LABEL (elt); } else case_list = add_case_node (case_list, CASE_LOW (elt), CASE_HIGH (elt), - CASE_LABEL (elt)); + CASE_LABEL (elt)); } do_pending_stack_adjust (); @@ -2411,6 +2398,8 @@ expand_case (tree exp) /* An ERROR_MARK occurs for various reasons including invalid data type. */ if (index_type != error_mark_node) { + int fail; + /* If we don't have a default-label, create one here, after the body of the switch. */ if (default_label_decl == 0) @@ -2431,10 +2420,8 @@ expand_case (tree exp) for (n = case_list; n; n = n->right) { /* Check low and high label values are integers. */ - if (TREE_CODE (n->low) != INTEGER_CST) - abort (); - if (TREE_CODE (n->high) != INTEGER_CST) - abort (); + gcc_assert (TREE_CODE (n->low) == INTEGER_CST); + gcc_assert (TREE_CODE (n->high) == INTEGER_CST); n->low = convert (index_type, n->low); n->high = convert (index_type, n->high); @@ -2605,6 +2592,7 @@ expand_case (tree exp) if (! try_casesi (index_type, index_expr, minval, range, table_label, default_label)) { + bool ok; index_type = integer_type_node; /* Index jumptables from zero for suitable values of @@ -2617,9 +2605,9 @@ expand_case (tree exp) range = maxval; } - if (! try_tablejump (index_type, index_expr, minval, range, - table_label, default_label)) - abort (); + ok = try_tablejump (index_type, index_expr, minval, range, + table_label, default_label); + gcc_assert (ok); } /* Get table of labels to jump to, in order of case index. */ @@ -2675,8 +2663,8 @@ expand_case (tree exp) before_case = NEXT_INSN (before_case); end = get_last_insn (); - if (squeeze_notes (&before_case, &end)) - abort (); + fail = squeeze_notes (&before_case, &end); + gcc_assert (!fail); reorder_insns (before_case, end, start); } diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c index 01c9947a56f..12d6d484bc3 100644 --- a/gcc/stor-layout.c +++ b/gcc/stor-layout.c @@ -108,9 +108,7 @@ put_pending_size (tree expr) void put_pending_sizes (tree chain) { - if (pending_sizes) - abort (); - + gcc_assert (!pending_sizes); pending_sizes = chain; } @@ -220,7 +218,7 @@ smallest_mode_for_size (unsigned int size, enum mode_class class) if (GET_MODE_PRECISION (mode) >= size) return mode; - abort (); + gcc_unreachable (); } /* Find an integer mode of the exact same size, or BLKmode on failure. */ @@ -250,7 +248,7 @@ int_mode_for_mode (enum machine_mode mode) case MODE_CC: default: - abort (); + gcc_unreachable (); } return mode; @@ -302,10 +300,10 @@ layout_decl (tree decl, unsigned int known_align) if (code == CONST_DECL) return; - else if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL - && code != TYPE_DECL && code != FIELD_DECL) - abort (); - + + gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL + || code == TYPE_DECL ||code == FIELD_DECL); + rtl = DECL_RTL_IF_SET (decl); if (type == error_mark_node) @@ -1477,8 +1475,7 @@ finish_builtin_struct (tree type, const char *name, tree fields, void layout_type (tree type) { - if (type == 0) - abort (); + gcc_assert (type); if (type == error_mark_node) return; @@ -1492,7 +1489,7 @@ layout_type (tree type) case LANG_TYPE: /* This kind of type is the responsibility of the language-specific code. */ - abort (); + gcc_unreachable (); case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */ if (TYPE_PRECISION (type) == 0) @@ -1536,8 +1533,7 @@ layout_type (tree type) tree nunits_tree = build_int_cst (NULL_TREE, nunits); tree innertype = TREE_TYPE (type); - if (nunits & (nunits - 1)) - abort (); + gcc_assert (!(nunits & (nunits - 1))); /* Find an appropriate mode for the vector type. */ if (TYPE_MODE (type) == VOIDmode) @@ -1758,33 +1754,37 @@ layout_type (tree type) break; case SET_TYPE: /* Used by Chill and Pascal. */ - if (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST - || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST) - abort (); - else - { + { + unsigned int alignment; + HOST_WIDE_INT size_in_bits; + HOST_WIDE_INT rounded_size; + + gcc_assert (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + == INTEGER_CST); + gcc_assert (TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) + == INTEGER_CST); + #ifndef SET_WORD_SIZE #define SET_WORD_SIZE BITS_PER_WORD #endif - unsigned int alignment - = set_alignment ? set_alignment : SET_WORD_SIZE; - HOST_WIDE_INT size_in_bits - = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) - - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1); - HOST_WIDE_INT rounded_size - = ((size_in_bits + alignment - 1) / alignment) * alignment; - - if (rounded_size > (int) alignment) - TYPE_MODE (type) = BLKmode; - else - TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1); - - TYPE_SIZE (type) = bitsize_int (rounded_size); - TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT); - TYPE_ALIGN (type) = alignment; - TYPE_USER_ALIGN (type) = 0; - TYPE_PRECISION (type) = size_in_bits; - } + alignment = set_alignment ? set_alignment : SET_WORD_SIZE; + size_in_bits + = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) + - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1); + rounded_size + = ((size_in_bits + alignment - 1) / alignment) * alignment; + + if (rounded_size > (int) alignment) + TYPE_MODE (type) = BLKmode; + else + TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1); + + TYPE_SIZE (type) = bitsize_int (rounded_size); + TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT); + TYPE_ALIGN (type) = alignment; + TYPE_USER_ALIGN (type) = 0; + TYPE_PRECISION (type) = size_in_bits; + } break; case FILE_TYPE: @@ -1796,7 +1796,7 @@ layout_type (tree type) break; default: - abort (); + gcc_unreachable (); } /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For @@ -1886,8 +1886,7 @@ set_sizetype (tree type) 2 * HOST_BITS_PER_WIDE_INT); tree t; - if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (sizetype)) - abort (); + gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype)); t = build_distinct_type_copy (type); /* We do want to use sizetype's cache, as we will be replacing that @@ -2109,8 +2108,7 @@ get_mode_bounds (enum machine_mode mode, int sign, unsigned size = GET_MODE_BITSIZE (mode); unsigned HOST_WIDE_INT min_val, max_val; - if (size > HOST_BITS_PER_WIDE_INT) - abort (); + gcc_assert (size <= HOST_BITS_PER_WIDE_INT); if (sign) { |