diff options
author | mrs <mrs@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-11-06 02:15:30 +0000 |
---|---|---|
committer | mrs <mrs@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-11-06 02:15:30 +0000 |
commit | 701c3ea94860c878112a2357bf576cce476f5223 (patch) | |
tree | e42c3f93e644d6029160f633bc071200be69bcc7 /gcc | |
parent | a04a7bec5afd7a0c4d7f32d84f7fa4832600ce70 (diff) | |
parent | 793f83aeb2332046c68f1ea901230f353610fe46 (diff) | |
download | gcc-701c3ea94860c878112a2357bf576cce476f5223.tar.gz |
Merge in trunk.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/wide-int@204436 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
118 files changed, 3715 insertions, 1555 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 7e0b752c3ed..d5c8ceb1c5e 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,299 @@ +2013-11-05 Ian Lance Taylor <iant@google.com> + + * config/i386/sync.md (atomic_compare_and_swap<dwi>_doubleword): + If possible, add .cfi directives to record change to bx. + * config/i386/i386.c (ix86_emit_cfi): New function. + * config/i386/i386-protos.h (ix86_emit_cfi): Declare. + +2013-11-05 Steven Bosscher <steven@gcc.gnu.org> + + + * rtlanal.c (tablejump_p): Expect a JUMP_TABLE_DATA to always follow + immediately after a label for a tablejump pattern. + + * config/arm/arm.c (is_jump_table): Remove. + (create_fix_barrier): Use tablejump_p instead. + (arm_reorg): Likewise. + (thumb1_output_casesi): Expect JUMP_TABLE_DATA to always be NEXT_INSN. + (thumb2_output_casesi): Likewise. + * config/aarch64/aarch64.c (aarch64_output_casesi): Likewise. + * config/sh/sh.md (casesi_worker_1, casesi_worker_2, + casesi_shift_media, casesi_load_media): Likewise. + * config/iq2000/iq2000.md: Likewise (in anonymous define_insn). + * config/microblaze/microblaze.md: Likewise. + +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * doc/invoke.texi (-Wdate-time): Document. + +2013-11-05 Richard Sandiford <rdsandiford@googlemail.com> + + * double-int.c (lshift_double, rshift_double): Remove + SHIFT_COUNT_TRUNCATED handling. + +2013-11-05 Jeff Law <law@redhat.com> + + * Makefile.in (OBJS): Add gimple-ssa-isolate-paths.o + * common.opt (-fisolate-erroneous-paths): Add option and + documentation. + * gimple-ssa-isolate-paths.c: New file. + * gimple.c (check_loadstore): New function. + (infer_nonnull_range): Moved into gimple.c from tree-vrp.c + Verify OP is in the argument list and the argument corresponding + to OP is a pointer type. Use operand_equal_p rather than + pointer equality when testing if OP is on the nonnull list. + Use check_loadstore rather than count_ptr_derefs. Handle + GIMPLE_RETURN statements. + * tree-vrp.c (infer_nonnull_range): Remove. + * gimple.h (infer_nonnull_range): Declare. + * opts.c (default_options_table): Add OPT_fisolate_erroneous_paths. + * passes.def: Add pass_isolate_erroneous_paths. + * timevar.def (TV_ISOLATE_ERRONEOUS_PATHS): New timevar. + * tree-pass.h (make_pass_isolate_erroneous_paths): Declare. + * tree-ssa.c (struct count_ptr_d): Remove. + (count_ptr_derefs, count_uses_and_derefs): Remove. + * tree-ssa.h (count_uses_and_derefs): Remove. + +2013-11-05 Jakub Jelinek <jakub@redhat.com> + + PR rtl-optimization/58997 + * loop-iv.c (iv_subreg): For IV_UNKNOWN_EXTEND, expect + get_iv_value to be in iv->mode rather than iv->extend_mode. + (iv_extend): Likewise. Otherwise, if iv->extend != extend, + use lowpart_subreg on get_iv_value before calling simplify_gen_unary. + * loop-unswitch.c (may_unswitch_on): Make sure op[i] is in the right + mode. + +2013-11-05 Andrew MacLeod <amacleod@redhat.com> + + * gimple.h: Move some prototypes to gimple-expr.h and add to include + list. + (extract_ops_from_tree, gimple_call_addr_fndecl, is_gimple_reg_type): + Move to gimple-expr.h. + * gimple-expr.h: New file. Relocate some prototypes from gimple.h. + (types_compatible_p, is_gimple_reg_type, is_gimple_variable, + is_gimple_id, virtual_operand_p, is_gimple_addressable, + is_gimple_constant, extract_ops_from_tree, gimple_call_addr_fndecl): + Relocate here. + * gimple.c (extract_ops_from_tree_1, gimple_cond_get_ops_from_tree, + gimple_set_body, gimple_body, gimple_has_body_p, is_gimple_lvalue, + is_gimple_condexpr, is_gimple_addressable, is_gimple_constant, + is_gimple_address, is_gimple_invariant_address, + is_gimple_ip_invariant_address, is_gimple_min_invariant, + is_gimple_ip_invariant, is_gimple_variable, is_gimple_id, + virtual_operand_p, is_gimple_reg, is_gimple_val, is_gimple_asm_val, + is_gimple_min_lval, is_gimple_call_addr, is_gimple_mem_ref_addr, + gimple_decl_printable_name, useless_type_conversion_p, + types_compatible_p, gimple_can_coalesce_p, copy_var_decl): Move to + gimple-expr.[ch]. + * gimple-expr.c: New File. + (useless_type_conversion_p, gimple_set_body, gimple_body, + gimple_has_body_p, gimple_decl_printable_name, copy_var_decl, + gimple_can_coalesce_p, extract_ops_from_tree_1, + gimple_cond_get_ops_from_tree, is_gimple_lvalue, is_gimple_condexpr, + is_gimple_address, is_gimple_invariant_address, + is_gimple_ip_invariant_address, is_gimple_min_invariant, + is_gimple_ip_invariant, is_gimple_reg, is_gimple_val, + is_gimple_asm_val, is_gimple_min_lval, is_gimple_call_addr, + is_gimple_mem_ref_addr): Relocate here. + * Makefile.in (OBJS): Add gimple-expr.o. + +2013-11-05 David Malcolm <dmalcolm@redhat.com> + + * gengtype-parse.c (struct_field_seq): Support empty structs. + +2013-11-05 Uros Bizjak <ubizjak@gmail.com> + + * config/i386/t-rtems (MULTILIB_MATCHES): Fix option typos. + +2013-11-05 Uros Bizjak <ubizjak@gmail.com> + + * config/i386/i386-c.c (ix86_target_macros): Define _SOFT_FLOAT + for !TARGET_80387. + * config/i386/rtemself.h (TARGET_OS_CPP_BUILTINS): Do not define + _SOFT_FLOAT here. + (LONG_DOUBLE_TYPE_SIZE): New define. + (LIBGCC2_LONG_DOUBLE_TYPE_SIZE): Ditto. + +2013-11-05 Paolo Carlini <paolo.carlini@oracle.com> + + PR c++/58724 + * doc/extend.texi [visibility ("visibility_type")]: Add example + about visibility attribute on namespace declaration. + +2013-11-05 Richard Biener <rguenther@suse.de> + + PR ipa/58492 + * passes.def (all_passes): Start with pass_fixup_cfg again. + +2013-11-05 Richard Biener <rguenther@suse.de> + + PR tree-optimization/58955 + * tree-loop-distribution.c (pg_add_dependence_edges): Fix + edge direction. + +2013-11-05 Bill Schmidt <wschmidt@linux.vnet.ibm.com> + + * config/rs6000/vector.md (vec_pack_sfix_trunc_v2df): Adjust for + little endian. + (vec_pack_ufix_trunc_v2df): Likewise. + +2013-11-05 H.J. Lu <hongjiu.lu@intel.com> + + PR middle-end/58981 + * doc/md.texi (@code{movmem@var{m}}): Specify Pmode as mode of + pattern, instead of word_mode. + + * expr.c (emit_block_move_via_movmem): Don't use mode wider than + Pmode for size. + (set_storage_via_setmem): Likewise. + +2013-11-05 Andrew MacLeod <amacleod@redhat.com> + + * tree-outof-ssa.c (queue_phi_copy_p): Combine phi_ssa_name_p from + gimple.h and the rest of the condition in eliminate_build. + (eliminate_build): Call new routine. + * gimple.h (phi_ssa_name_p): Delete. + +2013-11-05 Trevor Saunders <tsaunders@mozilla.com> + + * vec.c (vec_prefix::calculate_allocation): Don't try to handle the + case of no prefix and reserving zero slots, because when that's the + case we'll never get here. + * vec.h (va_heap::reserve): Don't try and handle + vec_prefix::calculate_allocation returning zero because that should + never happen. + +2013-11-05 Richard Biener <rguenther@suse.de> + + PR middle-end/58941 + * tree-dfa.c (get_ref_base_and_extent): Merge common code + in MEM_REF and TARGET_MEM_REF handling. Make sure to + process trailing array detection before diving into the + view-converted object (and possibly apply some extra offset). + +2013-11-05 Joseph Myers <joseph@codesourcery.com> + + * config/i386/i386.c (ix86_float_exceptions_rounding_supported_p): + New function. + (TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P): Define. + +2013-11-05 Marc Glisse <marc.glisse@inria.fr> + + PR tree-optimization/58958 + * tree-ssa-alias.c (ao_ref_init_from_ptr_and_size): Use + get_addr_base_and_unit_offset instead of get_ref_base_and_extent. + +2013-11-05 Marc Glisse <marc.glisse@inria.fr> + + * tree-ssa-alias.h (ranges_overlap_p): Handle negative offsets. + * tree-ssa-alias.c (ao_ref_init_from_ptr_and_size): Likewise. + +2013-11-05 Jakub Jelinek <jakub@redhat.com> + + PR tree-optimization/58984 + * ipa-prop.c (ipa_load_from_parm_agg_1): Add SIZE_P argument, + set *SIZE_P if non-NULL on success. + (ipa_load_from_parm_agg, ipa_analyze_indirect_call_uses): Adjust + callers. + (ipcp_transform_function): Likewise. Punt if size of access + is different from TYPE_SIZE on v->value's type. + +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * doc/invoke.texi (-fopenmp-simd): Document new option. + * gimplify.c (gimplify_body): Accept -fopenmp-simd. + * omp-low.c (execute_expand_omp, execute_lower_omp): Ditto. + * tree.c (attribute_value_equal): Ditto. + +2013-11-04 Wei Mi <wmi@google.com> + + * sched-rgn.c (add_branch_dependences): Keep insns in + a SCHED_GROUP at the end of BB to remain their location. + +2013-11-04 Wei Mi <wmi@google.com> + + * gcc/config/i386/i386.c (memory_address_length): Extract a part + of code to rip_relative_addr_p. + (rip_relative_addr_p): New Function. + (ix86_macro_fusion_p): Ditto. + (ix86_macro_fusion_pair_p): Ditto. + * gcc/config/i386/i386.h: Add new tune features about macro-fusion. + * gcc/config/i386/x86-tune.def (DEF_TUNE): Ditto. + * gcc/doc/tm.texi: Generated. + * gcc/doc/tm.texi.in: Ditto. + * gcc/haifa-sched.c (try_group_insn): New Function. + (group_insns_for_macro_fusion): Ditto. + (sched_init): Call group_insns_for_macro_fusion. + * gcc/target.def: Add two hooks: macro_fusion_p and + macro_fusion_pair_p. + +2013-11-04 Kostya Serebryany <kcc@google.com> + + Update to match the changed asan API. + * asan.c (asan_function_start): New function. + (asan_emit_stack_protection): Update the string stored in the + stack red zone to match new API. Store the PC of the current + function in the red zone. + (asan_global_struct): Update the __asan_global definition to match + the new API. + (asan_add_global): Ditto. + * asan.h (asan_function_start): New prototype. + * final.c (final_start_function): Call asan_function_start. + * sanitizer.def (BUILT_IN_ASAN_INIT): Rename __asan_init_v1 + to __asan_init_v3. + +2013-11-04 Wei Mi <wmi@google.com> + + * gcc/config/i386/i386-c.c (ix86_target_macros_internal): Separate + PROCESSOR_COREI7_AVX out from PROCESSOR_COREI7. + * gcc/config/i386/i386.c (ix86_option_override_internal): Ditto. + (ix86_issue_rate): Ditto. + (ix86_adjust_cost): Ditto. + (ia32_multipass_dfa_lookahead): Ditto. + (ix86_sched_init_global): Ditto. + (get_builtin_code_for_version): Ditto. + * gcc/config/i386/i386.h (enum target_cpu_default): Ditto. + (enum processor_type): Ditto. + * gcc/config/i386/x86-tune.def (DEF_TUNE): Ditto. + +2013-11-04 Vladimir Makarov <vmakarov@redhat.com> + + PR rtl-optimization/58967 + * config/rs6000/rs6000.c (legitimate_lo_sum_address_p): Remove + !lra_in_progress for mode sizes bigger word. + +2013-11-04 Bill Schmidt <wschmidt@linux.vnet.ibm.com> + + * config/rs6000/altivec.md (vec_widen_umult_hi_v16qi): Swap + arguments to merge instruction for little endian. + (vec_widen_umult_lo_v16qi): Likewise. + (vec_widen_smult_hi_v16qi): Likewise. + (vec_widen_smult_lo_v16qi): Likewise. + (vec_widen_umult_hi_v8hi): Likewise. + (vec_widen_umult_lo_v8hi): Likewise. + (vec_widen_smult_hi_v8hi): Likewise. + (vec_widen_smult_lo_v8hi): Likewise. + +2013-11-04 Ian Lance Taylor <iant@google.com> + + * builtins.def (ATTR_NOTHROWCALL_LEAF_LIST): Define. + * sync-builtins.def: Use ATTR_NOTHROWCALL_LEAF_LIST for all sync + builtins that take pointers. + * lto-opts.c (lto_write_options): Write -fnon-call-exceptions if set. + * lto-wrapper.c (merge_and_complain): Collect OPT_fnon_call_exceptions. + (run_gcc): Pass -fnon-call-exceptions. + +2013-11-04 Jakub Jelinek <jakub@redhat.com> + + * optabs.c (expand_vec_perm): Revert one incorrect line from + 2013-10-31 change. + + PR tree-optimization/58978 + * tree-vrp.c (all_imm_uses_in_stmt_or_feed_cond): Don't modify + use_stmt by single_imm_use directly. Only call single_imm_use + on SSA_NAMEs. + 2013-11-04 Vladimir Makarov <vmakarov@redhat.com> PR rtl-optimization/58968 @@ -6,13 +302,11 @@ 2013-11-04 Joseph Myers <joseph@codesourcery.com> - * doc/cpp.texi (__GCC_IEC_559, __GCC_IEC_559_COMPLEX): Document - macros. + * doc/cpp.texi (__GCC_IEC_559, __GCC_IEC_559_COMPLEX): Document macros. * target.def (float_exceptions_rounding_supported_p): New hook. * targhooks.c (default_float_exceptions_rounding_supported_p): New function. - * targhooks.h (default_float_exceptions_rounding_supported_p): - Declare. + * targhooks.h (default_float_exceptions_rounding_supported_p): Declare. * doc/tm.texi.in (TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P): New @hook. * doc/tm.texi: Regenerate. @@ -65,8 +359,7 @@ Implement -fsanitize=vla-bound. * opts.c (common_handle_option): Handle vla-bound. - * sanitizer.def (BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE): - Define. + * sanitizer.def (BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE): Define. * flag-types.h (enum sanitize_code): Add SANITIZE_VLA. * asan.c (initialize_sanitizer_builtins): Build BT_FN_VOID_PTR_PTR. diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP index 6e5286a0dd2..5a256115674 100644 --- a/gcc/DATESTAMP +++ b/gcc/DATESTAMP @@ -1 +1 @@ -20131104 +20131106 diff --git a/gcc/Makefile.in b/gcc/Makefile.in index 48c6be26246..e4436cd5d47 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -1230,10 +1230,12 @@ OBJS = \ ggc-common.o \ gimple.o \ gimple-builder.o \ + gimple-expr.o \ gimple-iterator.o \ gimple-fold.o \ gimple-low.o \ gimple-pretty-print.o \ + gimple-ssa-isolate-paths.o \ gimple-ssa-strength-reduction.o \ gimple-streamer-in.o \ gimple-streamer-out.o \ diff --git a/gcc/asan.c b/gcc/asan.c index 5dcc981cd06..028c08e0e87 100644 --- a/gcc/asan.c +++ b/gcc/asan.c @@ -59,11 +59,13 @@ along with GCC; see the file COPYING3. If not see if ((X & 7) + N - 1 > ShadowValue) __asan_report_loadN(X); Stores are instrumented similarly, but using __asan_report_storeN functions. - A call too __asan_init() is inserted to the list of module CTORs. + A call too __asan_init_vN() is inserted to the list of module CTORs. + N is the version number of the AddressSanitizer API. The changes between the + API versions are listed in libsanitizer/asan/asan_interface_internal.h. The run-time library redefines malloc (so that redzone are inserted around the allocated memory) and free (so that reuse of free-ed memory is delayed), - provides __asan_report* and __asan_init functions. + provides __asan_report* and __asan_init_vN functions. Read more: http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm @@ -125,9 +127,11 @@ along with GCC; see the file COPYING3. If not see where '(...){n}' means the content inside the parenthesis occurs 'n' times, with 'n' being the number of variables on the stack. + + 3/ The following 8 bytes contain the PC of the current function which + will be used by the run-time library to print an error message. - 3/ The following 16 bytes of the red zone have no particular - format. + 4/ The following 8 bytes are reserved for internal use by the run-time. The shadow memory for that stack layout is going to look like this: @@ -205,6 +209,9 @@ along with GCC; see the file COPYING3. If not see // Name of the global variable. const void *__name; + // Name of the module where the global variable is declared. + const void *__module_name; + // This is always set to NULL for now. uptr __has_dynamic_init; } @@ -914,6 +921,15 @@ asan_clear_shadow (rtx shadow_mem, HOST_WIDE_INT len) add_int_reg_note (jump, REG_BR_PROB, REG_BR_PROB_BASE * 80 / 100); } +void +asan_function_start (void) +{ + section *fnsec = function_section (current_function_decl); + switch_to_section (fnsec); + ASM_OUTPUT_DEBUG_LABEL (asm_out_file, "LASANPC", + current_function_funcdef_no); +} + /* Insert code to protect stack vars. The prologue sequence should be emitted directly, epilogue sequence returned. BASE is the register holding the stack base, against which OFFSETS array offsets are relative to, OFFSETS @@ -929,12 +945,13 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls, int length) { rtx shadow_base, shadow_mem, ret, mem; + char buf[30]; unsigned char shadow_bytes[4]; HOST_WIDE_INT base_offset = offsets[length - 1], offset, prev_offset; HOST_WIDE_INT last_offset, last_size; int l; unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT; - tree str_cst; + tree str_cst, decl, id; if (shadow_ptr_types[0] == NULL_TREE) asan_init_shadow_ptr_types (); @@ -942,11 +959,6 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls, /* First of all, prepare the description string. */ pretty_printer asan_pp; - if (DECL_NAME (current_function_decl)) - pp_tree_identifier (&asan_pp, DECL_NAME (current_function_decl)); - else - pp_string (&asan_pp, "<unknown>"); - pp_space (&asan_pp); pp_decimal_int (&asan_pp, length / 2 - 1); pp_space (&asan_pp); for (l = length - 2; l; l -= 2) @@ -976,6 +988,20 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls, emit_move_insn (mem, gen_int_mode (ASAN_STACK_FRAME_MAGIC, ptr_mode)); mem = adjust_address (mem, VOIDmode, GET_MODE_SIZE (ptr_mode)); emit_move_insn (mem, expand_normal (str_cst)); + mem = adjust_address (mem, VOIDmode, GET_MODE_SIZE (ptr_mode)); + ASM_GENERATE_INTERNAL_LABEL (buf, "LASANPC", current_function_funcdef_no); + id = get_identifier (buf); + decl = build_decl (DECL_SOURCE_LOCATION (current_function_decl), + VAR_DECL, id, char_type_node); + SET_DECL_ASSEMBLER_NAME (decl, id); + TREE_ADDRESSABLE (decl) = 1; + TREE_READONLY (decl) = 1; + DECL_ARTIFICIAL (decl) = 1; + DECL_IGNORED_P (decl) = 1; + TREE_STATIC (decl) = 1; + TREE_PUBLIC (decl) = 0; + TREE_USED (decl) = 1; + emit_move_insn (mem, expand_normal (build_fold_addr_expr (decl))); shadow_base = expand_binop (Pmode, lshr_optab, base, GEN_INT (ASAN_SHADOW_SHIFT), NULL_RTX, 1, OPTAB_DIRECT); @@ -1924,20 +1950,21 @@ transform_statements (void) uptr __size; uptr __size_with_redzone; const void *__name; + const void *__module_name; uptr __has_dynamic_init; } type. */ static tree asan_global_struct (void) { - static const char *field_names[5] + static const char *field_names[6] = { "__beg", "__size", "__size_with_redzone", - "__name", "__has_dynamic_init" }; - tree fields[5], ret; + "__name", "__module_name", "__has_dynamic_init" }; + tree fields[6], ret; int i; ret = make_node (RECORD_TYPE); - for (i = 0; i < 5; i++) + for (i = 0; i < 6; i++) { fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL, @@ -1962,21 +1989,20 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v) { tree init, uptr = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type))); unsigned HOST_WIDE_INT size; - tree str_cst, refdecl = decl; + tree str_cst, module_name_cst, refdecl = decl; vec<constructor_elt, va_gc> *vinner = NULL; - pretty_printer asan_pp; + pretty_printer asan_pp, module_name_pp; if (DECL_NAME (decl)) pp_tree_identifier (&asan_pp, DECL_NAME (decl)); else pp_string (&asan_pp, "<unknown>"); - pp_space (&asan_pp); - pp_left_paren (&asan_pp); - pp_string (&asan_pp, main_input_filename); - pp_right_paren (&asan_pp); str_cst = asan_pp_string (&asan_pp); + pp_string (&module_name_pp, main_input_filename); + module_name_cst = asan_pp_string (&module_name_pp); + if (asan_needs_local_alias (decl)) { char buf[20]; @@ -2004,6 +2030,8 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v) CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, size)); CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, fold_convert (const_ptr_type_node, str_cst)); + CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, + fold_convert (const_ptr_type_node, module_name_cst)); CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, 0)); init = build_constructor (type, vinner); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init); @@ -2158,7 +2186,7 @@ add_string_csts (void **slot, void *data) static GTY(()) tree asan_ctor_statements; /* Module-level instrumentation. - - Insert __asan_init() into the list of CTORs. + - Insert __asan_init_vN() into the list of CTORs. - TODO: insert redzones around globals. */ diff --git a/gcc/asan.h b/gcc/asan.h index 62dbe984967..e56468424c9 100644 --- a/gcc/asan.h +++ b/gcc/asan.h @@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see #ifndef TREE_ASAN #define TREE_ASAN +extern void asan_function_start (void); extern void asan_finish_file (void); extern rtx asan_emit_stack_protection (rtx, HOST_WIDE_INT *, tree *, int); extern bool asan_protect_global (tree); diff --git a/gcc/builtins.def b/gcc/builtins.def index e2d8849c768..3082548731d 100644 --- a/gcc/builtins.def +++ b/gcc/builtins.def @@ -213,6 +213,12 @@ along with GCC; see the file COPYING3. If not see #undef ATTR_MATHFN_FPROUNDING_STORE #define ATTR_MATHFN_FPROUNDING_STORE ATTR_NOTHROW_LEAF_LIST +/* Define an attribute list for leaf functions that do not throw + exceptions normally, but may throw exceptions when using + -fnon-call-exceptions. */ +#define ATTR_NOTHROWCALL_LEAF_LIST (flag_non_call_exceptions ? \ + ATTR_LEAF_LIST : ATTR_NOTHROW_LEAF_LIST) + /* Make sure 0 is not a legitimate builtin. */ DEF_BUILTIN_STUB(BUILT_IN_NONE, (const char *)0) diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog index 067c1341180..68929521087 100644 --- a/gcc/c-family/ChangeLog +++ b/gcc/c-family/ChangeLog @@ -1,3 +1,21 @@ +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * c.opt (-Wdate-time): New option + * c-opts.c (sanitize_cpp_opts): Pass on to libcpp. + +2013-11-05 Joseph Myers <joseph@codesourcery.com> + + * c-cppbuiltin.c (cpp_iec_559_value): Test + flag_excess_precision_cmdline not flag_excess_precision. + +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * c.opt (fopenmp-simd): New option. + * c-pragma.c (omp_pragmas): Move pragmas which can contain simd to ... + (omp_pragmas): ... this new struct. + (c_pp_lookup_pragma): Also walk omp_pragmas. + (init_pragma): Init pragmas for -fopenmp-simd. + 2013-11-04 Marek Polacek <polacek@redhat.com> PR c++/58979 diff --git a/gcc/c-family/c-cppbuiltin.c b/gcc/c-family/c-cppbuiltin.c index 94f72eceb2e..ec8b1834b7e 100644 --- a/gcc/c-family/c-cppbuiltin.c +++ b/gcc/c-family/c-cppbuiltin.c @@ -734,7 +734,7 @@ cpp_iec_559_value (void) if (flag_iso && !c_dialect_cxx () && TARGET_FLT_EVAL_METHOD != 0 - && flag_excess_precision != EXCESS_PRECISION_STANDARD) + && flag_excess_precision_cmdline != EXCESS_PRECISION_STANDARD) ret = 0; /* Various options are contrary to IEEE 754 semantics. */ diff --git a/gcc/c-family/c-opts.c b/gcc/c-family/c-opts.c index 702fe1a8bdf..2de5425e654 100644 --- a/gcc/c-family/c-opts.c +++ b/gcc/c-family/c-opts.c @@ -1198,6 +1198,7 @@ sanitize_cpp_opts (void) cpp_opts->unsigned_char = !flag_signed_char; cpp_opts->stdc_0_in_system_headers = STDC_0_IN_SYSTEM_HEADERS; + cpp_opts->warn_date_time = cpp_warn_date_time; /* Wlong-long is disabled by default. It is enabled by: [-Wpedantic | -Wtraditional] -std=[gnu|c]++98 ; or diff --git a/gcc/c-family/c-pragma.c b/gcc/c-family/c-pragma.c index 8915f441d7d..8bda6eddfda 100644 --- a/gcc/c-family/c-pragma.c +++ b/gcc/c-family/c-pragma.c @@ -1172,31 +1172,35 @@ static const struct omp_pragma_def omp_pragmas[] = { { "cancel", PRAGMA_OMP_CANCEL }, { "cancellation", PRAGMA_OMP_CANCELLATION_POINT }, { "critical", PRAGMA_OMP_CRITICAL }, - { "declare", PRAGMA_OMP_DECLARE_REDUCTION }, - { "distribute", PRAGMA_OMP_DISTRIBUTE }, { "end", PRAGMA_OMP_END_DECLARE_TARGET }, { "flush", PRAGMA_OMP_FLUSH }, - { "for", PRAGMA_OMP_FOR }, { "master", PRAGMA_OMP_MASTER }, { "ordered", PRAGMA_OMP_ORDERED }, - { "parallel", PRAGMA_OMP_PARALLEL }, { "section", PRAGMA_OMP_SECTION }, { "sections", PRAGMA_OMP_SECTIONS }, - { "simd", PRAGMA_OMP_SIMD }, { "single", PRAGMA_OMP_SINGLE }, - { "target", PRAGMA_OMP_TARGET }, - { "task", PRAGMA_OMP_TASK }, { "taskgroup", PRAGMA_OMP_TASKGROUP }, { "taskwait", PRAGMA_OMP_TASKWAIT }, { "taskyield", PRAGMA_OMP_TASKYIELD }, - { "teams", PRAGMA_OMP_TEAMS }, { "threadprivate", PRAGMA_OMP_THREADPRIVATE } }; +static const struct omp_pragma_def omp_pragmas_simd[] = { + { "declare", PRAGMA_OMP_DECLARE_REDUCTION }, + { "distribute", PRAGMA_OMP_DISTRIBUTE }, + { "for", PRAGMA_OMP_FOR }, + { "parallel", PRAGMA_OMP_PARALLEL }, + { "simd", PRAGMA_OMP_SIMD }, + { "target", PRAGMA_OMP_TARGET }, + { "task", PRAGMA_OMP_TASK }, + { "teams", PRAGMA_OMP_TEAMS }, +}; void c_pp_lookup_pragma (unsigned int id, const char **space, const char **name) { const int n_omp_pragmas = sizeof (omp_pragmas) / sizeof (*omp_pragmas); + const int n_omp_pragmas_simd = sizeof (omp_pragmas_simd) + / sizeof (*omp_pragmas); int i; for (i = 0; i < n_omp_pragmas; ++i) @@ -1207,6 +1211,14 @@ c_pp_lookup_pragma (unsigned int id, const char **space, const char **name) return; } + for (i = 0; i < n_omp_pragmas_simd; ++i) + if (omp_pragmas_simd[i].id == id) + { + *space = "omp"; + *name = omp_pragmas_simd[i].name; + return; + } + if (id >= PRAGMA_FIRST_EXTERNAL && (id < PRAGMA_FIRST_EXTERNAL + registered_pp_pragmas.length ())) { @@ -1359,6 +1371,16 @@ init_pragma (void) cpp_register_deferred_pragma (parse_in, "omp", omp_pragmas[i].name, omp_pragmas[i].id, true, true); } + if (flag_openmp || flag_openmp_simd) + { + const int n_omp_pragmas_simd = sizeof (omp_pragmas_simd) + / sizeof (*omp_pragmas); + int i; + + for (i = 0; i < n_omp_pragmas_simd; ++i) + cpp_register_deferred_pragma (parse_in, "omp", omp_pragmas_simd[i].name, + omp_pragmas_simd[i].id, true, true); + } if (!flag_preprocess_only) cpp_register_deferred_pragma (parse_in, "GCC", "pch_preprocess", diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt index b862eb9e276..46391fa496c 100644 --- a/gcc/c-family/c.opt +++ b/gcc/c-family/c.opt @@ -640,6 +640,10 @@ Wpragmas C ObjC C++ ObjC++ Var(warn_pragmas) Init(1) Warning Warn about misuses of pragmas +Wdate-time +Common Var(cpp_warn_date_time) Warning +Warn about __TIME__, __DATE__ and __TIMESTAMP__ usage + Wproperty-assign-default ObjC ObjC++ Var(warn_property_assign_default) Init(1) Warning Warn if a property for an Objective-C object has no assign semantics specified @@ -1069,6 +1073,10 @@ fopenmp C ObjC C++ ObjC++ Var(flag_openmp) Enable OpenMP (implies -frecursive in Fortran) +fopenmp-simd +C ObjC C++ ObjC++ Var(flag_openmp_simd) +Enable OpenMP's SIMD directives + foperator-names C++ ObjC++ Recognize C++ keywords like \"compl\" and \"xor\" diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog index efb4ba88c38..9a9eed39de5 100644 --- a/gcc/c/ChangeLog +++ b/gcc/c/ChangeLog @@ -1,3 +1,10 @@ +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * c-parser.c (c_parser_omp_for, c_parser_omp_parallel, + c_parser_omp_distribute, c_parser_omp_teams, + c_parser_omp_target, c_parser_omp_declare): Handle + -fopenmp-simd. + 2013-11-03 Marek Polacek <polacek@redhat.com> * c-decl.c (grokdeclarator): Add VLA instrumentation. diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c index 6f52ba94e77..c0346ffe9cc 100644 --- a/gcc/c/c-parser.c +++ b/gcc/c/c-parser.c @@ -11616,6 +11616,8 @@ c_parser_omp_for (location_t loc, c_parser *parser, cclauses = cclauses_buf; c_parser_consume_token (parser); + if (!flag_openmp) /* flag_openmp_simd */ + return c_parser_omp_simd (loc, parser, p_name, mask, cclauses); block = c_begin_compound_stmt (true); ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses); block = c_end_compound_stmt (loc, block, true); @@ -11630,6 +11632,11 @@ c_parser_omp_for (location_t loc, c_parser *parser, return ret; } } + if (!flag_openmp) /* flag_openmp_simd */ + { + c_parser_skip_to_pragma_eol (parser); + return NULL_TREE; + } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) @@ -11825,6 +11832,8 @@ c_parser_omp_parallel (location_t loc, c_parser *parser, cclauses = cclauses_buf; c_parser_consume_token (parser); + if (!flag_openmp) /* flag_openmp_simd */ + return c_parser_omp_for (loc, parser, p_name, mask, cclauses); block = c_begin_omp_parallel (); c_parser_omp_for (loc, parser, p_name, mask, cclauses); stmt @@ -11839,6 +11848,11 @@ c_parser_omp_parallel (location_t loc, c_parser *parser, c_parser_skip_to_pragma_eol (parser); return NULL_TREE; } + else if (!flag_openmp) /* flag_openmp_simd */ + { + c_parser_skip_to_pragma_eol (parser); + return NULL_TREE; + } else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); @@ -12069,6 +12083,14 @@ c_parser_omp_distribute (location_t loc, c_parser *parser, if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); + if (!flag_openmp) /* flag_openmp_simd */ + { + if (simd) + return c_parser_omp_simd (loc, parser, p_name, mask, cclauses); + else + return c_parser_omp_parallel (loc, parser, p_name, mask, + cclauses); + } block = c_begin_compound_stmt (true); if (simd) ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses); @@ -12086,6 +12108,11 @@ c_parser_omp_distribute (location_t loc, c_parser *parser, return ret; } } + if (!flag_openmp) /* flag_openmp_simd */ + { + c_parser_skip_to_pragma_eol (parser); + return NULL_TREE; + } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) @@ -12134,6 +12161,8 @@ c_parser_omp_teams (location_t loc, c_parser *parser, cclauses = cclauses_buf; c_parser_consume_token (parser); + if (!flag_openmp) /* flag_openmp_simd */ + return c_parser_omp_distribute (loc, parser, p_name, mask, cclauses); block = c_begin_compound_stmt (true); ret = c_parser_omp_distribute (loc, parser, p_name, mask, cclauses); block = c_end_compound_stmt (loc, block, true); @@ -12147,6 +12176,11 @@ c_parser_omp_teams (location_t loc, c_parser *parser, return add_stmt (ret); } } + if (!flag_openmp) /* flag_openmp_simd */ + { + c_parser_skip_to_pragma_eol (parser); + return NULL_TREE; + } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) @@ -12258,24 +12292,16 @@ c_parser_omp_target (c_parser *parser, enum pragma_context context) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); - if (strcmp (p, "data") == 0) - { - c_parser_consume_token (parser); - c_parser_omp_target_data (loc, parser); - return true; - } - else if (strcmp (p, "update") == 0) - { - c_parser_consume_token (parser); - return c_parser_omp_target_update (loc, parser, context); - } - else if (strcmp (p, "teams") == 0) + if (strcmp (p, "teams") == 0) { tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT]; char p_name[sizeof ("#pragma omp target teams distribute " "parallel for simd")]; c_parser_consume_token (parser); + if (!flag_openmp) /* flag_openmp_simd */ + return c_parser_omp_teams (loc, parser, p_name, + OMP_TARGET_CLAUSE_MASK, cclauses); strcpy (p_name, "#pragma omp target"); keep_next_level (); tree block = c_begin_compound_stmt (true); @@ -12291,6 +12317,22 @@ c_parser_omp_target (c_parser *parser, enum pragma_context context) add_stmt (stmt); return true; } + else if (!flag_openmp) /* flag_openmp_simd */ + { + c_parser_skip_to_pragma_eol (parser); + return NULL_TREE; + } + else if (strcmp (p, "data") == 0) + { + c_parser_consume_token (parser); + c_parser_omp_target_data (loc, parser); + return true; + } + else if (strcmp (p, "update") == 0) + { + c_parser_consume_token (parser); + return c_parser_omp_target_update (loc, parser, context); + } } tree stmt = make_node (OMP_TARGET); @@ -12918,6 +12960,11 @@ c_parser_omp_declare (c_parser *parser, enum pragma_context context) c_parser_omp_declare_reduction (parser, context); return; } + if (!flag_openmp) /* flag_openmp_simd */ + { + c_parser_skip_to_pragma_eol (parser); + return; + } if (strcmp (p, "target") == 0) { c_parser_consume_token (parser); diff --git a/gcc/common.opt b/gcc/common.opt index 3a40db203fa..bda479071c4 100644 --- a/gcc/common.opt +++ b/gcc/common.opt @@ -2109,6 +2109,12 @@ foptimize-strlen Common Report Var(flag_optimize_strlen) Optimization Enable string length optimizations on trees +fisolate-erroneous-paths +Common Report Var(flag_isolate_erroneous_paths) Optimization +Detect paths which trigger erroneous or undefined behaviour. Isolate those +paths from the main control flow and turn the statement with erroneous or +undefined behaviour into a trap. + ftree-loop-distribution Common Report Var(flag_tree_loop_distribution) Optimization Enable loop distribution on trees diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 704e872fc61..a63798b48d0 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -4329,7 +4329,7 @@ aarch64_output_casesi (rtx *operands) { char buf[100]; char label[100]; - rtx diff_vec = PATTERN (next_active_insn (operands[2])); + rtx diff_vec = PATTERN (NEXT_INSN (operands[2])); int index; static const char *const patterns[4][2] = { diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index 654dcef4ab7..624cab9c1ac 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -95,13 +95,11 @@ static bool arm_print_operand_punct_valid_p (unsigned char code); static const char *fp_const_from_val (REAL_VALUE_TYPE *); static arm_cc get_arm_condition_code (rtx); static HOST_WIDE_INT int_log2 (HOST_WIDE_INT); -static rtx is_jump_table (rtx); static const char *output_multi_immediate (rtx *, const char *, const char *, int, HOST_WIDE_INT); static const char *shift_op (rtx, HOST_WIDE_INT *); static struct machine_function *arm_init_machine_status (void); static void thumb_exit (FILE *, int); -static rtx is_jump_table (rtx); static HOST_WIDE_INT get_jump_table_size (rtx); static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT); static Mnode *add_minipool_forward_ref (Mfix *); @@ -15464,23 +15462,6 @@ Mfix * minipool_fix_tail; /* The fix entry for the current minipool, once it has been placed. */ Mfix * minipool_barrier; -/* Determines if INSN is the start of a jump table. Returns the end - of the TABLE or NULL_RTX. */ -static rtx -is_jump_table (rtx insn) -{ - rtx table; - - if (jump_to_label_p (insn) - && ((table = next_active_insn (JUMP_LABEL (insn))) - == next_active_insn (insn)) - && table != NULL - && JUMP_TABLE_DATA_P (table)) - return table; - - return NULL_RTX; -} - #ifndef JUMP_TABLES_IN_TEXT_SECTION #define JUMP_TABLES_IN_TEXT_SECTION 0 #endif @@ -16089,8 +16070,7 @@ create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address) count += get_attr_length (from); /* If there is a jump table, add its length. */ - tmp = is_jump_table (from); - if (tmp != NULL) + if (tablejump_p (from, NULL, &tmp)) { count += get_jump_table_size (tmp); @@ -16696,7 +16676,7 @@ arm_reorg (void) /* If the insn is a vector jump, add the size of the table and skip the table. */ - if ((table = is_jump_table (insn)) != NULL) + if (tablejump_p (insn, NULL, &table)) { address += get_jump_table_size (table); insn = table; @@ -28606,7 +28586,7 @@ arm_output_iwmmxt_tinsr (rtx *operands) const char * thumb1_output_casesi (rtx *operands) { - rtx diff_vec = PATTERN (next_active_insn (operands[0])); + rtx diff_vec = PATTERN (NEXT_INSN (operands[0])); gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); @@ -28629,7 +28609,7 @@ thumb1_output_casesi (rtx *operands) const char * thumb2_output_casesi (rtx *operands) { - rtx diff_vec = PATTERN (next_active_insn (operands[2])); + rtx diff_vec = PATTERN (NEXT_INSN (operands[2])); gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); diff --git a/gcc/config/i386/i386-c.c b/gcc/config/i386/i386-c.c index a6eaf8ad34a..1c053b1e51e 100644 --- a/gcc/config/i386/i386-c.c +++ b/gcc/config/i386/i386-c.c @@ -141,6 +141,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag, def_or_undef (parse_in, "__corei7"); def_or_undef (parse_in, "__corei7__"); break; + case PROCESSOR_COREI7_AVX: + def_or_undef (parse_in, "__corei7_avx"); + def_or_undef (parse_in, "__corei7_avx__"); + break; case PROCESSOR_HASWELL: def_or_undef (parse_in, "__core_avx2"); def_or_undef (parse_in, "__core_avx2__"); @@ -238,6 +242,9 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag, case PROCESSOR_COREI7: def_or_undef (parse_in, "__tune_corei7__"); break; + case PROCESSOR_COREI7_AVX: + def_or_undef (parse_in, "__tune_corei7_avx__"); + break; case PROCESSOR_HASWELL: def_or_undef (parse_in, "__tune_core_avx2__"); break; @@ -467,6 +474,9 @@ ix86_target_macros (void) builtin_define_std ("i386"); } + if (!TARGET_80387) + cpp_define (parse_in, "_SOFT_FLOAT"); + if (TARGET_LONG_DOUBLE_64) cpp_define (parse_in, "__LONG_DOUBLE_64__"); diff --git a/gcc/config/i386/i386-protos.h b/gcc/config/i386/i386-protos.h index 5799251404b..fdf9d5804df 100644 --- a/gcc/config/i386/i386-protos.h +++ b/gcc/config/i386/i386-protos.h @@ -143,6 +143,7 @@ extern void ix86_split_lshr (rtx *, rtx, enum machine_mode); extern rtx ix86_find_base_term (rtx); extern bool ix86_check_movabs (rtx, int); extern void ix86_split_idivmod (enum machine_mode, rtx[], bool); +extern bool ix86_emit_cfi (); extern rtx assign_386_stack_local (enum machine_mode, enum ix86_stack_slot); extern int ix86_attr_length_immediate_default (rtx, bool); diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index bc93d7f849a..79db0df61c6 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -1834,8 +1834,9 @@ const struct processor_costs *ix86_cost = &pentium_cost; #define m_P4_NOCONA (m_PENT4 | m_NOCONA) #define m_CORE2 (1<<PROCESSOR_CORE2) #define m_COREI7 (1<<PROCESSOR_COREI7) +#define m_COREI7_AVX (1<<PROCESSOR_COREI7_AVX) #define m_HASWELL (1<<PROCESSOR_HASWELL) -#define m_CORE_ALL (m_CORE2 | m_COREI7 | m_HASWELL) +#define m_CORE_ALL (m_CORE2 | m_COREI7 | m_COREI7_AVX | m_HASWELL) #define m_ATOM (1<<PROCESSOR_ATOM) #define m_SLM (1<<PROCESSOR_SLM) @@ -2300,6 +2301,8 @@ static const struct ptt processor_target_table[PROCESSOR_max] = {&core_cost, 16, 10, 16, 10, 16}, /* Core i7 */ {&core_cost, 16, 10, 16, 10, 16}, + /* Core i7 avx */ + {&core_cost, 16, 10, 16, 10, 16}, /* Core avx2 */ {&core_cost, 16, 10, 16, 10, 16}, {&generic_cost, 16, 10, 16, 10, 16}, @@ -2329,6 +2332,7 @@ static const char *const cpu_names[TARGET_CPU_DEFAULT_max] = "nocona", "core2", "corei7", + "corei7-avx", "core-avx2", "atom", "slm", @@ -3017,12 +3021,12 @@ ix86_option_override_internal (bool main_args_p, {"corei7", PROCESSOR_COREI7, CPU_COREI7, PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16 | PTA_POPCNT | PTA_FXSR}, - {"corei7-avx", PROCESSOR_COREI7, CPU_COREI7, + {"corei7-avx", PROCESSOR_COREI7_AVX, CPU_COREI7, PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL | PTA_FXSR | PTA_XSAVE | PTA_XSAVEOPT}, - {"core-avx-i", PROCESSOR_COREI7, CPU_COREI7, + {"core-avx-i", PROCESSOR_COREI7_AVX, CPU_COREI7, PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AVX | PTA_CX16 | PTA_POPCNT | PTA_AES | PTA_PCLMUL | PTA_FSGSBASE @@ -17341,6 +17345,14 @@ ix86_split_idivmod (enum machine_mode mode, rtx operands[], emit_label (end_label); } +/* Whether it is OK to emit CFI directives when emitting asm code. */ + +bool +ix86_emit_cfi () +{ + return dwarf2out_do_cfi_asm (); +} + #define LEA_MAX_STALL (3) #define LEA_SEARCH_THRESHOLD (LEA_MAX_STALL << 1) @@ -24520,6 +24532,42 @@ ix86_instantiate_decls (void) instantiate_decl_rtl (s->rtl); } +/* Check whether x86 address PARTS is a pc-relative address. */ + +static bool +rip_relative_addr_p (struct ix86_address *parts) +{ + rtx base, index, disp; + + base = parts->base; + index = parts->index; + disp = parts->disp; + + if (disp && !base && !index) + { + if (TARGET_64BIT) + { + rtx symbol = disp; + + if (GET_CODE (disp) == CONST) + symbol = XEXP (disp, 0); + if (GET_CODE (symbol) == PLUS + && CONST_INT_P (XEXP (symbol, 1))) + symbol = XEXP (symbol, 0); + + if (GET_CODE (symbol) == LABEL_REF + || (GET_CODE (symbol) == SYMBOL_REF + && SYMBOL_REF_TLS_MODEL (symbol) == 0) + || (GET_CODE (symbol) == UNSPEC + && (XINT (symbol, 1) == UNSPEC_GOTPCREL + || XINT (symbol, 1) == UNSPEC_PCREL + || XINT (symbol, 1) == UNSPEC_GOTNTPOFF))) + return true; + } + } + return false; +} + /* Calculate the length of the memory address in the instruction encoding. Includes addr32 prefix, does not include the one-byte modrm, opcode, or other prefixes. We never generate addr32 prefix for LEA insn. */ @@ -24591,25 +24639,8 @@ memory_address_length (rtx addr, bool lea) else if (disp && !base && !index) { len += 4; - if (TARGET_64BIT) - { - rtx symbol = disp; - - if (GET_CODE (disp) == CONST) - symbol = XEXP (disp, 0); - if (GET_CODE (symbol) == PLUS - && CONST_INT_P (XEXP (symbol, 1))) - symbol = XEXP (symbol, 0); - - if (GET_CODE (symbol) != LABEL_REF - && (GET_CODE (symbol) != SYMBOL_REF - || SYMBOL_REF_TLS_MODEL (symbol) != 0) - && (GET_CODE (symbol) != UNSPEC - || (XINT (symbol, 1) != UNSPEC_GOTPCREL - && XINT (symbol, 1) != UNSPEC_PCREL - && XINT (symbol, 1) != UNSPEC_GOTNTPOFF))) - len++; - } + if (rip_relative_addr_p (&parts)) + len++; } else { @@ -24808,6 +24839,7 @@ ix86_issue_rate (void) case PROCESSOR_CORE2: case PROCESSOR_COREI7: + case PROCESSOR_COREI7_AVX: case PROCESSOR_HASWELL: return 4; @@ -25104,6 +25136,7 @@ ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost) case PROCESSOR_CORE2: case PROCESSOR_COREI7: + case PROCESSOR_COREI7_AVX: case PROCESSOR_HASWELL: memory = get_attr_memory (insn); @@ -25182,6 +25215,7 @@ ia32_multipass_dfa_lookahead (void) case PROCESSOR_CORE2: case PROCESSOR_COREI7: + case PROCESSOR_COREI7_AVX: case PROCESSOR_HASWELL: case PROCESSOR_ATOM: case PROCESSOR_SLM: @@ -25198,6 +25232,119 @@ ia32_multipass_dfa_lookahead (void) } } +/* Return true if target platform supports macro-fusion. */ + +static bool +ix86_macro_fusion_p () +{ + return TARGET_FUSE_CMP_AND_BRANCH; +} + +/* Check whether current microarchitecture support macro fusion + for insn pair "CONDGEN + CONDJMP". Refer to + "Intel Architectures Optimization Reference Manual". */ + +static bool +ix86_macro_fusion_pair_p (rtx condgen, rtx condjmp) +{ + rtx src, dest; + rtx single_set = single_set (condgen); + enum rtx_code ccode; + rtx compare_set = NULL_RTX, test_if, cond; + rtx alu_set = NULL_RTX, addr = NULL_RTX; + + if (get_attr_type (condgen) != TYPE_TEST + && get_attr_type (condgen) != TYPE_ICMP + && get_attr_type (condgen) != TYPE_INCDEC + && get_attr_type (condgen) != TYPE_ALU) + return false; + + if (single_set == NULL_RTX + && !TARGET_FUSE_ALU_AND_BRANCH) + return false; + + if (single_set != NULL_RTX) + compare_set = single_set; + else + { + int i; + rtx pat = PATTERN (condgen); + for (i = 0; i < XVECLEN (pat, 0); i++) + if (GET_CODE (XVECEXP (pat, 0, i)) == SET) + { + rtx set_src = SET_SRC (XVECEXP (pat, 0, i)); + if (GET_CODE (set_src) == COMPARE) + compare_set = XVECEXP (pat, 0, i); + else + alu_set = XVECEXP (pat, 0, i); + } + } + if (compare_set == NULL_RTX) + return false; + src = SET_SRC (compare_set); + if (GET_CODE (src) != COMPARE) + return false; + + /* Macro-fusion for cmp/test MEM-IMM + conditional jmp is not + supported. */ + if ((MEM_P (XEXP (src, 0)) + && CONST_INT_P (XEXP (src, 1))) + || (MEM_P (XEXP (src, 1)) + && CONST_INT_P (XEXP (src, 0)))) + return false; + + /* No fusion for RIP-relative address. */ + if (MEM_P (XEXP (src, 0))) + addr = XEXP (XEXP (src, 0), 0); + else if (MEM_P (XEXP (src, 1))) + addr = XEXP (XEXP (src, 1), 0); + + if (addr) { + ix86_address parts; + int ok = ix86_decompose_address (addr, &parts); + gcc_assert (ok); + + if (rip_relative_addr_p (&parts)) + return false; + } + + test_if = SET_SRC (pc_set (condjmp)); + cond = XEXP (test_if, 0); + ccode = GET_CODE (cond); + /* Check whether conditional jump use Sign or Overflow Flags. */ + if (!TARGET_FUSE_CMP_AND_BRANCH_SOFLAGS + && (ccode == GE + || ccode == GT + || ccode == LE + || ccode == LT)) + return false; + + /* Return true for TYPE_TEST and TYPE_ICMP. */ + if (get_attr_type (condgen) == TYPE_TEST + || get_attr_type (condgen) == TYPE_ICMP) + return true; + + /* The following is the case that macro-fusion for alu + jmp. */ + if (!TARGET_FUSE_ALU_AND_BRANCH || !alu_set) + return false; + + /* No fusion for alu op with memory destination operand. */ + dest = SET_DEST (alu_set); + if (MEM_P (dest)) + return false; + + /* Macro-fusion for inc/dec + unsigned conditional jump is not + supported. */ + if (get_attr_type (condgen) == TYPE_INCDEC + && (ccode == GEU + || ccode == GTU + || ccode == LEU + || ccode == LTU)) + return false; + + return true; +} + /* Try to reorder ready list to take advantage of Atom pipelined IMUL execution. It is applied if (1) IMUL instruction is on the top of list; @@ -25822,6 +25969,7 @@ ix86_sched_init_global (FILE *dump ATTRIBUTE_UNUSED, { case PROCESSOR_CORE2: case PROCESSOR_COREI7: + case PROCESSOR_COREI7_AVX: case PROCESSOR_HASWELL: /* Do not perform multipass scheduling for pre-reload schedule to save compile time. */ @@ -29672,6 +29820,10 @@ get_builtin_code_for_version (tree decl, tree *predicate_list) arg_str = "corei7"; priority = P_PROC_SSE4_2; break; + case PROCESSOR_COREI7_AVX: + arg_str = "corei7-avx"; + priority = P_PROC_SSE4_2; + break; case PROCESSOR_ATOM: arg_str = "atom"; priority = P_PROC_SSSE3; @@ -43375,6 +43527,18 @@ ix86_memmodel_check (unsigned HOST_WIDE_INT val) return val; } +/* Implement TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P. */ + +static bool +ix86_float_exceptions_rounding_supported_p (void) +{ + /* For x87 floating point with standard excess precision handling, + there is no adddf3 pattern (since x87 floating point only has + XFmode operations) so the default hook implementation gets this + wrong. */ + return TARGET_80387 || TARGET_SSE_MATH; +} + /* Initialize the GCC target structure. */ #undef TARGET_RETURN_IN_MEMORY #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory @@ -43475,6 +43639,10 @@ ix86_memmodel_check (unsigned HOST_WIDE_INT val) #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ ia32_multipass_dfa_lookahead +#undef TARGET_SCHED_MACRO_FUSION_P +#define TARGET_SCHED_MACRO_FUSION_P ix86_macro_fusion_p +#undef TARGET_SCHED_MACRO_FUSION_PAIR_P +#define TARGET_SCHED_MACRO_FUSION_PAIR_P ix86_macro_fusion_pair_p #undef TARGET_FUNCTION_OK_FOR_SIBCALL #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall @@ -43747,6 +43915,10 @@ ix86_memmodel_check (unsigned HOST_WIDE_INT val) #undef TARGET_SPILL_CLASS #define TARGET_SPILL_CLASS ix86_spill_class +#undef TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P +#define TARGET_FLOAT_EXCEPTIONS_ROUNDING_SUPPORTED_P \ + ix86_float_exceptions_rounding_supported_p + struct gcc_target targetm = TARGET_INITIALIZER; #include "gt-i386.h" diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h index c979ee534a8..9836a40b46c 100644 --- a/gcc/config/i386/i386.h +++ b/gcc/config/i386/i386.h @@ -304,6 +304,7 @@ extern const struct processor_costs ix86_size_cost; #define TARGET_NOCONA (ix86_tune == PROCESSOR_NOCONA) #define TARGET_CORE2 (ix86_tune == PROCESSOR_CORE2) #define TARGET_COREI7 (ix86_tune == PROCESSOR_COREI7) +#define TARGET_COREI7_AVX (ix86_tune == PROCESSOR_COREI7_AVX) #define TARGET_HASWELL (ix86_tune == PROCESSOR_HASWELL) #define TARGET_GENERIC (ix86_tune == PROCESSOR_GENERIC) #define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10) @@ -415,8 +416,17 @@ extern unsigned char ix86_tune_features[X86_TUNE_LAST]; ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS] #define TARGET_USE_VECTOR_CONVERTS \ ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS] +#define TARGET_FUSE_CMP_AND_BRANCH_32 \ + ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32] +#define TARGET_FUSE_CMP_AND_BRANCH_64 \ + ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_64] #define TARGET_FUSE_CMP_AND_BRANCH \ - ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH] + (TARGET_64BIT ? TARGET_FUSE_CMP_AND_BRANCH_64 \ + : TARGET_FUSE_CMP_AND_BRANCH_32) +#define TARGET_FUSE_CMP_AND_BRANCH_SOFLAGS \ + ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS] +#define TARGET_FUSE_ALU_AND_BRANCH \ + ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH] #define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU] #define TARGET_VECTORIZE_DOUBLE \ ix86_tune_features[X86_TUNE_VECTORIZE_DOUBLE] @@ -613,6 +623,7 @@ enum target_cpu_default TARGET_CPU_DEFAULT_nocona, TARGET_CPU_DEFAULT_core2, TARGET_CPU_DEFAULT_corei7, + TARGET_CPU_DEFAULT_corei7_avx, TARGET_CPU_DEFAULT_haswell, TARGET_CPU_DEFAULT_atom, TARGET_CPU_DEFAULT_slm, @@ -2229,6 +2240,7 @@ enum processor_type PROCESSOR_NOCONA, PROCESSOR_CORE2, PROCESSOR_COREI7, + PROCESSOR_COREI7_AVX, PROCESSOR_HASWELL, PROCESSOR_GENERIC, PROCESSOR_AMDFAM10, diff --git a/gcc/config/i386/rtemself.h b/gcc/config/i386/rtemself.h index 08ef18ccec5..087179191cb 100644 --- a/gcc/config/i386/rtemself.h +++ b/gcc/config/i386/rtemself.h @@ -26,7 +26,15 @@ along with GCC; see the file COPYING3. If not see builtin_define ("__rtems__"); \ builtin_define ("__USE_INIT_FINI__"); \ builtin_assert ("system=rtems"); \ - if (!TARGET_80387) \ - builtin_define ("_SOFT_FLOAT"); \ } \ while (0) + +#undef LONG_DOUBLE_TYPE_SIZE +#define LONG_DOUBLE_TYPE_SIZE (TARGET_80387 ? 80 : 64) + +#undef LIBGCC2_LONG_DOUBLE_TYPE_SIZE +#ifdef _SOFT_FLOAT +#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64 +#else +#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 80 +#endif diff --git a/gcc/config/i386/sync.md b/gcc/config/i386/sync.md index 9e5835662e1..8408a2bfe43 100644 --- a/gcc/config/i386/sync.md +++ b/gcc/config/i386/sync.md @@ -430,10 +430,21 @@ const char *xchg = "xchg{<imodesuffix>}\t%%<regprefix>bx, %5"; if (swap) - output_asm_insn (xchg, operands); + { + output_asm_insn (xchg, operands); + if (ix86_emit_cfi ()) + { + output_asm_insn (".cfi_remember_state", operands); + output_asm_insn (".cfi_register\t%%<regprefix>bx, %5", operands); + } + } output_asm_insn ("lock{%;} %K7cmpxchg<doublemodesuffix>b\t%2", operands); if (swap) - output_asm_insn (xchg, operands); + { + output_asm_insn (xchg, operands); + if (ix86_emit_cfi ()) + output_asm_insn (".cfi_restore_state", operands); + } return ""; }) diff --git a/gcc/config/i386/t-rtems b/gcc/config/i386/t-rtems index 6161ec10090..fef4c22e9c1 100644 --- a/gcc/config/i386/t-rtems +++ b/gcc/config/i386/t-rtems @@ -17,11 +17,10 @@ # <http://www.gnu.org/licenses/>. # -MULTILIB_OPTIONS = mtune=i486/mtune=pentium/mtune=pentiumpro \ -msoft-float +MULTILIB_OPTIONS = mtune=i486/mtune=pentium/mtune=pentiumpro msoft-float MULTILIB_DIRNAMES= m486 mpentium mpentiumpro soft-float -MULTILIB_MATCHES = msoft-float=mno-m80387 -MULTILIB_MATCHES += mtune?pentium=mtune?k6 mtune?pentiumpro=mtune?mathlon +MULTILIB_MATCHES = msoft-float=mno-80387 +MULTILIB_MATCHES += mtune?pentium=mtune?k6 mtune?pentiumpro=mtune?athlon MULTILIB_EXCEPTIONS = \ mtune=pentium/*msoft-float* \ mtune=pentiumpro/*msoft-float* diff --git a/gcc/config/i386/x86-tune.def b/gcc/config/i386/x86-tune.def index 6e5d2fe0aff..1a85ce266df 100644 --- a/gcc/config/i386/x86-tune.def +++ b/gcc/config/i386/x86-tune.def @@ -92,11 +92,29 @@ DEF_TUNE (X86_TUNE_MOVX, "movx", DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall", m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC) -/* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction - with a subsequent conditional jump instruction into a single - compare-and-branch uop. +/* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent + conditional jump instruction for 32 bit TARGET. FIXME: revisit for generic. */ -DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH, "fuse_cmp_and_branch", m_BDVER | m_CORE_ALL) +DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_32, "fuse_cmp_and_branch_32", + m_CORE_ALL | m_BDVER) + +/* X86_TUNE_FUSE_CMP_AND_BRANCH_64: Fuse compare with a subsequent + conditional jump instruction for TARGET_64BIT. + FIXME: revisit for generic. */ +DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_64, "fuse_cmp_and_branch_64", + m_COREI7 | m_COREI7_AVX | m_HASWELL | m_BDVER) + +/* X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS: Fuse compare with a + subsequent conditional jump instruction when the condition jump + check sign flag (SF) or overflow flag (OF). */ +DEF_TUNE (X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS, "fuse_cmp_and_branch_soflags", + m_COREI7 | m_COREI7_AVX | m_HASWELL | m_BDVER) + +/* X86_TUNE_FUSE_ALU_AND_BRANCH: Fuse alu with a subsequent conditional + jump instruction when the alu instruction produces the CCFLAG consumed by + the conditional jump instruction. */ +DEF_TUNE (X86_TUNE_FUSE_ALU_AND_BRANCH, "fuse_alu_and_branch", + m_COREI7_AVX | m_HASWELL) /* X86_TUNE_REASSOC_INT_TO_PARALLEL: Try to produce parallel computations during reassociation of integer computation. */ @@ -300,12 +318,12 @@ DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill", /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead of a sequence loading registers by parts. */ DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal", - m_COREI7 | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM | m_GENERIC) + m_COREI7 | m_COREI7_AVX | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM | m_GENERIC) /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores instead of a sequence loading registers by parts. */ DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal", - m_COREI7 | m_BDVER | m_SLM | m_GENERIC) + m_COREI7 | m_COREI7_AVX | m_BDVER | m_SLM | m_GENERIC) /* Use packed single precision instructions where posisble. I.e. movups instead of movupd. */ diff --git a/gcc/config/iq2000/iq2000.md b/gcc/config/iq2000/iq2000.md index 9e61cdd5bfc..7a516c1b508 100644 --- a/gcc/config/iq2000/iq2000.md +++ b/gcc/config/iq2000/iq2000.md @@ -1398,9 +1398,8 @@ (plus:SI (match_operand:SI 0 "register_operand" "d") (label_ref:SI (match_operand 1 "" "")))) (use (label_ref:SI (match_dup 1)))] - "!(Pmode == DImode) && next_active_insn (insn) != 0 - && GET_CODE (PATTERN (next_active_insn (insn))) == ADDR_DIFF_VEC - && PREV_INSN (next_active_insn (insn)) == operands[1]" + "!(Pmode == DImode) && NEXT_INSN (operands[1]) != 0 + && GET_CODE (PATTERN (NEXT_INSN (operands[1]))) == ADDR_DIFF_VEC" "* { return \"j\\t%0\"; diff --git a/gcc/config/microblaze/microblaze.md b/gcc/config/microblaze/microblaze.md index 40d27393619..8a526f1d2f8 100644 --- a/gcc/config/microblaze/microblaze.md +++ b/gcc/config/microblaze/microblaze.md @@ -1798,9 +1798,8 @@ (plus:SI (match_operand:SI 0 "register_operand" "d") (label_ref:SI (match_operand 1 "" "")))) (use (label_ref:SI (match_dup 1)))] - "next_active_insn (insn) != 0 - && GET_CODE (PATTERN (next_active_insn (insn))) == ADDR_DIFF_VEC - && PREV_INSN (next_active_insn (insn)) == operands[1] + "NEXT_INSN (operands[1]) != 0 + && GET_CODE (PATTERN (NEXT_INSN (operands[1]))) == ADDR_DIFF_VEC && flag_pic" { output_asm_insn ("addk\t%0,%0,r20",operands); diff --git a/gcc/config/rs6000/altivec.md b/gcc/config/rs6000/altivec.md index 37c59980ecf..697a2ad9ac1 100644 --- a/gcc/config/rs6000/altivec.md +++ b/gcc/config/rs6000/altivec.md @@ -2185,7 +2185,10 @@ emit_insn (gen_vec_widen_umult_even_v16qi (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_umult_odd_v16qi (vo, operands[1], operands[2])); - emit_insn (gen_altivec_vmrghh (operands[0], ve, vo)); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_altivec_vmrghh (operands[0], ve, vo)); + else + emit_insn (gen_altivec_vmrghh (operands[0], vo, ve)); DONE; }") @@ -2202,7 +2205,10 @@ emit_insn (gen_vec_widen_umult_even_v16qi (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_umult_odd_v16qi (vo, operands[1], operands[2])); - emit_insn (gen_altivec_vmrglh (operands[0], ve, vo)); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_altivec_vmrglh (operands[0], ve, vo)); + else + emit_insn (gen_altivec_vmrglh (operands[0], vo, ve)); DONE; }") @@ -2219,7 +2225,10 @@ emit_insn (gen_vec_widen_smult_even_v16qi (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_odd_v16qi (vo, operands[1], operands[2])); - emit_insn (gen_altivec_vmrghh (operands[0], ve, vo)); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_altivec_vmrghh (operands[0], ve, vo)); + else + emit_insn (gen_altivec_vmrghh (operands[0], vo, ve)); DONE; }") @@ -2236,7 +2245,10 @@ emit_insn (gen_vec_widen_smult_even_v16qi (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_odd_v16qi (vo, operands[1], operands[2])); - emit_insn (gen_altivec_vmrglh (operands[0], ve, vo)); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_altivec_vmrglh (operands[0], ve, vo)); + else + emit_insn (gen_altivec_vmrglh (operands[0], vo, ve)); DONE; }") @@ -2253,7 +2265,10 @@ emit_insn (gen_vec_widen_umult_even_v8hi (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_umult_odd_v8hi (vo, operands[1], operands[2])); - emit_insn (gen_altivec_vmrghw (operands[0], ve, vo)); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_altivec_vmrghw (operands[0], ve, vo)); + else + emit_insn (gen_altivec_vmrghw (operands[0], vo, ve)); DONE; }") @@ -2270,7 +2285,10 @@ emit_insn (gen_vec_widen_umult_even_v8hi (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_umult_odd_v8hi (vo, operands[1], operands[2])); - emit_insn (gen_altivec_vmrglw (operands[0], ve, vo)); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_altivec_vmrglw (operands[0], ve, vo)); + else + emit_insn (gen_altivec_vmrglw (operands[0], vo, ve)); DONE; }") @@ -2287,7 +2305,10 @@ emit_insn (gen_vec_widen_smult_even_v8hi (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_odd_v8hi (vo, operands[1], operands[2])); - emit_insn (gen_altivec_vmrghw (operands[0], ve, vo)); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_altivec_vmrghw (operands[0], ve, vo)); + else + emit_insn (gen_altivec_vmrghw (operands[0], vo, ve)); DONE; }") @@ -2304,7 +2325,10 @@ emit_insn (gen_vec_widen_smult_even_v8hi (ve, operands[1], operands[2])); emit_insn (gen_vec_widen_smult_odd_v8hi (vo, operands[1], operands[2])); - emit_insn (gen_altivec_vmrglw (operands[0], ve, vo)); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_altivec_vmrglw (operands[0], ve, vo)); + else + emit_insn (gen_altivec_vmrglw (operands[0], vo, ve)); DONE; }") diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index 37de331c8b6..8ca79fd79f7 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -6413,7 +6413,7 @@ legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict) return false; if (GET_MODE_NUNITS (mode) != 1) return false; - if (! lra_in_progress && GET_MODE_SIZE (mode) > UNITS_PER_WORD + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD && !(/* ??? Assume floating point reg based on mode? */ TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && (mode == DFmode || mode == DDmode))) diff --git a/gcc/config/rs6000/vector.md b/gcc/config/rs6000/vector.md index 0a1130f7bff..68214d91316 100644 --- a/gcc/config/rs6000/vector.md +++ b/gcc/config/rs6000/vector.md @@ -850,7 +850,12 @@ emit_insn (gen_vsx_xvcvdpsxws (r1, operands[1])); emit_insn (gen_vsx_xvcvdpsxws (r2, operands[2])); - rs6000_expand_extract_even (operands[0], r1, r2); + + if (BYTES_BIG_ENDIAN) + rs6000_expand_extract_even (operands[0], r1, r2); + else + rs6000_expand_extract_even (operands[0], r2, r1); + DONE; }) @@ -865,7 +870,12 @@ emit_insn (gen_vsx_xvcvdpuxws (r1, operands[1])); emit_insn (gen_vsx_xvcvdpuxws (r2, operands[2])); - rs6000_expand_extract_even (operands[0], r1, r2); + + if (BYTES_BIG_ENDIAN) + rs6000_expand_extract_even (operands[0], r1, r2); + else + rs6000_expand_extract_even (operands[0], r2, r1); + DONE; }) diff --git a/gcc/config/sh/sh.md b/gcc/config/sh/sh.md index 38397f376ba..9bc3a915784 100644 --- a/gcc/config/sh/sh.md +++ b/gcc/config/sh/sh.md @@ -10823,7 +10823,7 @@ label: (clobber (match_scratch:SI 3 "=X,1"))] "TARGET_SH1" { - rtx diff_vec = PATTERN (next_active_insn (operands[2])); + rtx diff_vec = PATTERN (NEXT_INSN (operands[2])); gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); @@ -10857,7 +10857,7 @@ label: (clobber (match_operand:SI 4 "" "=X,1"))] "TARGET_SH2 && reload_completed && flag_pic" { - rtx diff_vec = PATTERN (next_active_insn (operands[2])); + rtx diff_vec = PATTERN (NEXT_INSN (operands[2])); gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); switch (GET_MODE (diff_vec)) @@ -10895,7 +10895,7 @@ label: UNSPEC_CASESI)))] "TARGET_SHMEDIA" { - rtx diff_vec = PATTERN (next_active_insn (operands[2])); + rtx diff_vec = PATTERN (NEXT_INSN (operands[2])); gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); @@ -10922,7 +10922,7 @@ label: (label_ref:DI (match_operand 3 "" ""))] UNSPEC_CASESI)))] "TARGET_SHMEDIA" { - rtx diff_vec = PATTERN (next_active_insn (operands[3])); + rtx diff_vec = PATTERN (NEXT_INSN (operands[3])); gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC); diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog index 8a3df56e63f..887ea615944 100644 --- a/gcc/cp/ChangeLog +++ b/gcc/cp/ChangeLog @@ -1,3 +1,20 @@ +2013-11-05 Jason Merrill <jason@redhat.com> + + PR c++/58868 + * decl.c (check_initializer): Don't use build_vec_init for arrays + of trivial type. + +2013-11-05 Paolo Carlini <paolo.carlini@oracle.com> + + PR c++/58724 + * name-lookup.c (handle_namespace_attrs): Use get_attribute_name. + +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * parser.c (cp_parser_omp_for, cp_parser_omp_parallel, + cp_parser_omp_distribute, cp_parser_omp_teams, cp_parser_omp_target, + cp_parser_omp_declare): Handle -fopenmp-simd. + 2013-11-04 Eric Botcazou <ebotcazou@adacore.com> * decl2.c (cpp_check): Change type of first parameter and deal with diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c index 81a35004730..27eda06cc7f 100644 --- a/gcc/cp/decl.c +++ b/gcc/cp/decl.c @@ -5684,6 +5684,7 @@ check_initializer (tree decl, tree init, int flags, vec<tree, va_gc> **cleanups) && !(init && BRACE_ENCLOSED_INITIALIZER_P (init) && CP_AGGREGATE_TYPE_P (type) && (CLASS_TYPE_P (type) + || !TYPE_NEEDS_CONSTRUCTING (type) || type_has_extended_temps (type)))) { init_code = build_aggr_init_full_exprs (decl, init, flags); diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c index 025a03cd9fa..ced596e310a 100644 --- a/gcc/cp/name-lookup.c +++ b/gcc/cp/name-lookup.c @@ -3571,7 +3571,7 @@ handle_namespace_attrs (tree ns, tree attributes) for (d = attributes; d; d = TREE_CHAIN (d)) { - tree name = TREE_PURPOSE (d); + tree name = get_attribute_name (d); tree args = TREE_VALUE (d); if (is_attribute_p ("visibility", name)) diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c index 06d779698f4..e4f77e8d68c 100644 --- a/gcc/cp/parser.c +++ b/gcc/cp/parser.c @@ -29133,6 +29133,9 @@ cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok, cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); + if (!flag_openmp) /* flag_openmp_simd */ + return cp_parser_omp_simd (parser, pragma_tok, p_name, mask, + cclauses); sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_simd (parser, pragma_tok, p_name, mask, @@ -29150,6 +29153,11 @@ cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok, return ret; } } + if (!flag_openmp) /* flag_openmp_simd */ + { + cp_parser_require_pragma_eol (parser, pragma_tok); + return NULL_TREE; + } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok, cclauses == NULL); @@ -29333,6 +29341,8 @@ cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok, cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); + if (!flag_openmp) /* flag_openmp_simd */ + return cp_parser_omp_for (parser, pragma_tok, p_name, mask, cclauses); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_omp_for (parser, pragma_tok, p_name, mask, cclauses); @@ -29348,6 +29358,11 @@ cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok, cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } + else if (!flag_openmp) /* flag_openmp_simd */ + { + cp_parser_require_pragma_eol (parser, pragma_tok); + return NULL_TREE; + } else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; @@ -29576,6 +29591,15 @@ cp_parser_omp_distribute (cp_parser *parser, cp_token *pragma_tok, if (cclauses == NULL) cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); + if (!flag_openmp) /* flag_openmp_simd */ + { + if (simd) + return cp_parser_omp_simd (parser, pragma_tok, p_name, mask, + cclauses); + else + return cp_parser_omp_parallel (parser, pragma_tok, p_name, mask, + cclauses); + } sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); if (simd) @@ -29597,6 +29621,11 @@ cp_parser_omp_distribute (cp_parser *parser, cp_token *pragma_tok, return ret; } } + if (!flag_openmp) /* flag_openmp_simd */ + { + cp_parser_require_pragma_eol (parser, pragma_tok); + return NULL_TREE; + } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok, cclauses == NULL); @@ -29652,6 +29681,9 @@ cp_parser_omp_teams (cp_parser *parser, cp_token *pragma_tok, cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); + if (!flag_openmp) /* flag_openmp_simd */ + return cp_parser_omp_distribute (parser, pragma_tok, p_name, mask, + cclauses); sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_distribute (parser, pragma_tok, p_name, mask, @@ -29668,6 +29700,11 @@ cp_parser_omp_teams (cp_parser *parser, cp_token *pragma_tok, return add_stmt (ret); } } + if (!flag_openmp) /* flag_openmp_simd */ + { + cp_parser_require_pragma_eol (parser, pragma_tok); + return NULL_TREE; + } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok, cclauses == NULL); @@ -29777,18 +29814,7 @@ cp_parser_omp_target (cp_parser *parser, cp_token *pragma_tok, tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); - if (strcmp (p, "data") == 0) - { - cp_lexer_consume_token (parser->lexer); - cp_parser_omp_target_data (parser, pragma_tok); - return true; - } - else if (strcmp (p, "update") == 0) - { - cp_lexer_consume_token (parser->lexer); - return cp_parser_omp_target_update (parser, pragma_tok, context); - } - else if (strcmp (p, "teams") == 0) + if (strcmp (p, "teams") == 0) { tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT]; char p_name[sizeof ("#pragma omp target teams distribute " @@ -29797,6 +29823,9 @@ cp_parser_omp_target (cp_parser *parser, cp_token *pragma_tok, cp_lexer_consume_token (parser->lexer); strcpy (p_name, "#pragma omp target"); keep_next_level (true); + if (!flag_openmp) /* flag_openmp_simd */ + return cp_parser_omp_teams (parser, pragma_tok, p_name, + OMP_TARGET_CLAUSE_MASK, cclauses); tree sb = begin_omp_structured_block (); unsigned save = cp_parser_begin_omp_structured_block (parser); tree ret = cp_parser_omp_teams (parser, pragma_tok, p_name, @@ -29812,6 +29841,22 @@ cp_parser_omp_target (cp_parser *parser, cp_token *pragma_tok, add_stmt (stmt); return true; } + else if (!flag_openmp) /* flag_openmp_simd */ + { + cp_parser_require_pragma_eol (parser, pragma_tok); + return NULL_TREE; + } + else if (strcmp (p, "data") == 0) + { + cp_lexer_consume_token (parser->lexer); + cp_parser_omp_target_data (parser, pragma_tok); + return true; + } + else if (strcmp (p, "update") == 0) + { + cp_lexer_consume_token (parser->lexer); + return cp_parser_omp_target_update (parser, pragma_tok, context); + } } tree stmt = make_node (OMP_TARGET); @@ -30411,6 +30456,11 @@ cp_parser_omp_declare (cp_parser *parser, cp_token *pragma_tok, context); return; } + if (!flag_openmp) /* flag_openmp_simd */ + { + cp_parser_require_pragma_eol (parser, pragma_tok); + return; + } if (strcmp (p, "target") == 0) { cp_lexer_consume_token (parser->lexer); diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi index 799ede1ec95..0d72819b1b0 100644 --- a/gcc/doc/extend.texi +++ b/gcc/doc/extend.texi @@ -4275,6 +4275,12 @@ the One Definition Rule; for example, it is usually not useful to mark an inline method as hidden without marking the whole class as hidden. A C++ namespace declaration can also have the visibility attribute. + +@smallexample +namespace nspace1 __attribute__ ((visibility ("protected"))) +@{ /* @r{Do something.} */; @} +@end smallexample + This attribute applies only to the particular namespace body, not to other definitions of the same namespace; it is equivalent to using @samp{#pragma GCC visibility} before and after the namespace diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index e84bca30ab2..863e518b96f 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -168,8 +168,8 @@ in the following sections. @gccoptlist{-ansi -std=@var{standard} -fgnu89-inline @gol -aux-info @var{filename} -fallow-parameterless-variadic-functions @gol -fno-asm -fno-builtin -fno-builtin-@var{function} @gol --fhosted -ffreestanding -fopenmp -fms-extensions -fplan9-extensions @gol --trigraphs -traditional -traditional-cpp @gol +-fhosted -ffreestanding -fopenmp -fopenmp-simd -fms-extensions @gol +-fplan9-extensions -trigraphs -traditional -traditional-cpp @gol -fallow-single-precision -fcond-mismatch -flax-vector-conversions @gol -fsigned-bitfields -fsigned-char @gol -funsigned-bitfields -funsigned-char} @@ -240,7 +240,7 @@ Objective-C and Objective-C++ Dialects}. -Wno-attributes -Wno-builtin-macro-redefined @gol -Wc++-compat -Wc++11-compat -Wcast-align -Wcast-qual @gol -Wchar-subscripts -Wclobbered -Wcomment -Wconditionally-supported @gol --Wconversion -Wcoverage-mismatch -Wdelete-incomplete -Wno-cpp @gol +-Wconversion -Wcoverage-mismatch -Wdate-time -Wdelete-incomplete -Wno-cpp @gol -Wno-deprecated -Wno-deprecated-declarations -Wdisabled-optimization @gol -Wno-div-by-zero -Wdouble-promotion -Wempty-body -Wenum-compare @gol -Wno-endif-labels -Werror -Werror=* @gol @@ -1839,7 +1839,16 @@ Enable handling of OpenMP directives @code{#pragma omp} in C/C++ and compiler generates parallel code according to the OpenMP Application Program Interface v4.0 @w{@uref{http://www.openmp.org/}}. This option implies @option{-pthread}, and thus is only supported on targets that -have support for @option{-pthread}. +have support for @option{-pthread}. @option{-fopenmp} implies +@option{-fopenmp-simd}. + +@item -fopenmp-simd +@opindex fopenmp-simd +@cindex OpenMP SIMD +@cindex SIMD +Enable handling of OpenMP's SIMD directives with @code{#pragma omp} +in C/C++ and @code{!$omp} in Fortran. Other OpenMP directives +are ignored. @item -fcilkplus @opindex fcilkplus @@ -4517,6 +4526,13 @@ types. @option{-Wconversion-null} is enabled by default. Warn when a literal '0' is used as null pointer constant. This can be useful to facilitate the conversion to @code{nullptr} in C++11. +@item -Wdate-time +@opindex Wdate-time +@opindex Wno-date-time +Warn when macros @code{__TIME__}, @code{__DATE__} or @code{__TIMESTAMP__} +are encountered as they might prevent bit-wise-identical reproducable +compilations. + @item -Wdelete-incomplete @r{(C++ and Objective-C++ only)} @opindex Wdelete-incomplete @opindex Wno-delete-incomplete diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi index 1d26d72ab23..1a06e3d6e74 100644 --- a/gcc/doc/md.texi +++ b/gcc/doc/md.texi @@ -5291,12 +5291,13 @@ are the first two operands, and both are @code{mem:BLK}s with an address in mode @code{Pmode}. The number of bytes to move is the third operand, in mode @var{m}. -Usually, you specify @code{word_mode} for @var{m}. However, if you can +Usually, you specify @code{Pmode} for @var{m}. However, if you can generate better code knowing the range of valid lengths is smaller than -those representable in a full word, you should provide a pattern with a +those representable in a full Pmode pointer, you should provide +a pattern with a mode corresponding to the range of values you can handle efficiently (e.g., @code{QImode} for values in the range 0--127; note we avoid numbers -that appear negative) and also a pattern with @code{word_mode}. +that appear negative) and also a pattern with @code{Pmode}. The fourth operand is the known shared alignment of the source and destination, in the form of a @code{const_int} rtx. Thus, if the diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi index b59368cf753..7fdca198690 100644 --- a/gcc/doc/tm.texi +++ b/gcc/doc/tm.texi @@ -6574,6 +6574,17 @@ scheduling one insn causes other insns to become ready in the same cycle. These other insns can then be taken into account properly. @end deftypefn +@deftypefn {Target Hook} bool TARGET_SCHED_MACRO_FUSION_P (void) +This hook is used to check whether target platform supports macro fusion. +@end deftypefn + +@deftypefn {Target Hook} bool TARGET_SCHED_MACRO_FUSION_PAIR_P (rtx @var{condgen}, rtx @var{condjmp}) +This hook is used to check whether two insns could be macro fused for +target microarchitecture. If this hook returns true for the given insn pair +(@var{condgen} and @var{condjmp}), scheduler will put them into a sched +group, and they will not be scheduled apart. +@end deftypefn + @deftypefn {Target Hook} void TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK (rtx @var{head}, rtx @var{tail}) This hook is called after evaluation forward dependencies of insns in chain given by two parameter values (@var{head} and @var{tail} diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in index d3c2f9a6735..90662120975 100644 --- a/gcc/doc/tm.texi.in +++ b/gcc/doc/tm.texi.in @@ -4938,6 +4938,10 @@ them: try the first ones in this list first. @hook TARGET_SCHED_REORDER2 +@hook TARGET_SCHED_MACRO_FUSION_P + +@hook TARGET_SCHED_MACRO_FUSION_PAIR_P + @hook TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK @hook TARGET_SCHED_INIT diff --git a/gcc/double-int.c b/gcc/double-int.c index 32dc514a3dd..3803a63e3a9 100644 --- a/gcc/double-int.c +++ b/gcc/double-int.c @@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see #include "config.h" #include "system.h" #include "coretypes.h" -#include "tm.h" /* For SHIFT_COUNT_TRUNCATED. */ +#include "tm.h" /* For BITS_PER_UNIT and *_BIG_ENDIAN. */ #include "tree.h" static int add_double_with_sign (unsigned HOST_WIDE_INT, HOST_WIDE_INT, @@ -237,9 +237,6 @@ rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1)) : 0); - if (SHIFT_COUNT_TRUNCATED) - count %= prec; - if (count >= HOST_BITS_PER_DOUBLE_INT) { /* Shifting by the host word size is undefined according to the @@ -295,9 +292,6 @@ lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, { unsigned HOST_WIDE_INT signmask; - if (SHIFT_COUNT_TRUNCATED) - count %= prec; - if (count >= HOST_BITS_PER_DOUBLE_INT) { /* Shifting by the host word size is undefined according to the diff --git a/gcc/expr.c b/gcc/expr.c index c751004c68e..cb3abcdab89 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -1266,11 +1266,12 @@ emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align, /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT here because if SIZE is less than the mode mask, as it is returned by the macro, it will definitely be less than the - actual mode mask. */ + actual mode mask. Since SIZE is within the Pmode address + space, we limit MODE to Pmode. */ && ((CONST_INT_P (size) && ((unsigned HOST_WIDE_INT) INTVAL (size) <= (GET_MODE_MASK (mode) >> 1))) - || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)) + || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode))) { struct expand_operand ops[6]; unsigned int nops; @@ -2849,14 +2850,15 @@ set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align, enum insn_code code = direct_optab_handler (setmem_optab, mode); if (code != CODE_FOR_nothing - /* We don't need MODE to be narrower than - BITS_PER_HOST_WIDE_INT here because if SIZE is less than - the mode mask, as it is returned by the macro, it will - definitely be less than the actual mode mask. */ + /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT + here because if SIZE is less than the mode mask, as it is + returned by the macro, it will definitely be less than the + actual mode mask. Since SIZE is within the Pmode address + space, we limit MODE to Pmode. */ && ((CONST_INT_P (size) && ((unsigned HOST_WIDE_INT) INTVAL (size) <= (GET_MODE_MASK (mode) >> 1))) - || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)) + || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode))) { struct expand_operand ops[6]; unsigned int nops; diff --git a/gcc/final.c b/gcc/final.c index 052103cd0ea..f7425429b81 100644 --- a/gcc/final.c +++ b/gcc/final.c @@ -78,6 +78,7 @@ along with GCC; see the file COPYING3. If not see #include "cfgloop.h" #include "params.h" #include "tree-pretty-print.h" /* for dump_function_header */ +#include "asan.h" #include "wide-int-print.h" #ifdef XCOFF_DEBUGGING_INFO @@ -1739,6 +1740,9 @@ final_start_function (rtx first, FILE *file, high_block_linenum = high_function_linenum = last_linenum; + if (flag_sanitize & SANITIZE_ADDRESS) + asan_function_start (); + if (!DECL_IGNORED_P (current_function_decl)) debug_hooks->begin_prologue (last_linenum, last_filename); diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog index 059b7b3528c..155a6537261 100644 --- a/gcc/fortran/ChangeLog +++ b/gcc/fortran/ChangeLog @@ -1,3 +1,38 @@ +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * lang.opt (-Wdate-time): New option + * cpp.c (gfc_cpp_option_data): Add warn_date_time. + (gfc_cpp_init_options, gfc_cpp_handle_option, + gfc_cpp_post_options): Handle it and pass on to libcpp. + +2013-11-05 Steven G. Kargl <kargl@gcc.gnu.org> + + PR fortran/58989 + * check.c (gfc_check_reshape): ensure that shape is a constant + expression. + +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * lang.opt (fopenmp-simd): New option. + * gfortran.h (gfc_option_t): Add gfc_flag_openmp_simd. + * options.c (gfc_handle_option): Handle it. + +2013-11-04 Ian Lance Taylor <iant@google.com> + + * f95-lang.c (ATTR_LEAF_LIST): Define. + +2013-11-04 Paul Thomas <pault@gcc.gnu.org> + + PR fortran/58771 + * trans-io.c (transfer_expr): If the backend_decl for a derived + type is missing, build it with gfc_typenode_for_spec. + +2013-11-04 Paul Thomas <pault@gcc.gnu.org> + + PR fortran/57445 + * trans-expr.c (gfc_conv_class_to_class): Remove spurious + assert. + 2013-10-29 Tobias Burnus <burnus@net-b.de> PR fortran/44350 diff --git a/gcc/fortran/check.c b/gcc/fortran/check.c index 758639e27af..1508c744724 100644 --- a/gcc/fortran/check.c +++ b/gcc/fortran/check.c @@ -3277,7 +3277,7 @@ gfc_check_reshape (gfc_expr *source, gfc_expr *shape, "than %d elements", &shape->where, GFC_MAX_DIMENSIONS); return false; } - else if (shape->expr_type == EXPR_ARRAY) + else if (shape->expr_type == EXPR_ARRAY && gfc_is_constant_expr (shape)) { gfc_expr *e; int i, extent; diff --git a/gcc/fortran/cpp.c b/gcc/fortran/cpp.c index ea53681af0c..8417ddca163 100644 --- a/gcc/fortran/cpp.c +++ b/gcc/fortran/cpp.c @@ -100,6 +100,7 @@ struct gfc_cpp_option_data const char *deps_filename_user; /* -MF <arg> */ int deps_missing_are_generated; /* -MG */ int deps_phony; /* -MP */ + int warn_date_time; /* -Wdate-time */ const char *multilib; /* -imultilib <dir> */ const char *prefix; /* -iprefix <dir> */ @@ -262,6 +263,7 @@ gfc_cpp_init_options (unsigned int decoded_options_count, gfc_cpp_option.no_predefined = 0; gfc_cpp_option.standard_include_paths = 1; gfc_cpp_option.verbose = 0; + gfc_cpp_option.warn_date_time = 0; gfc_cpp_option.deps = 0; gfc_cpp_option.deps_skip_system = 0; gfc_cpp_option.deps_phony = 0; @@ -359,6 +361,9 @@ gfc_cpp_handle_option (size_t scode, const char *arg, int value ATTRIBUTE_UNUSED gfc_cpp_option.verbose = value; break; + case OPT_Wdate_time: + gfc_cpp_option.warn_date_time = value; + case OPT_A: case OPT_D: case OPT_U: @@ -469,6 +474,7 @@ gfc_cpp_post_options (void) cpp_option->discard_comments_in_macro_exp = gfc_cpp_option.discard_comments_in_macro_exp; cpp_option->print_include_names = gfc_cpp_option.print_include_names; cpp_option->preprocessed = gfc_option.flag_preprocessed; + cpp_option->warn_date_time = gfc_cpp_option.warn_date_time; if (gfc_cpp_makedep ()) { diff --git a/gcc/fortran/f95-lang.c b/gcc/fortran/f95-lang.c index 873c137e581..a70d60d6882 100644 --- a/gcc/fortran/f95-lang.c +++ b/gcc/fortran/f95-lang.c @@ -531,8 +531,9 @@ gfc_builtin_function (tree decl) return decl; } -/* So far we need just these 6 attribute types. */ +/* So far we need just these 7 attribute types. */ #define ATTR_NULL 0 +#define ATTR_LEAF_LIST (ECF_LEAF) #define ATTR_NOTHROW_LEAF_LIST (ECF_NOTHROW | ECF_LEAF) #define ATTR_NOTHROW_LEAF_MALLOC_LIST (ECF_NOTHROW | ECF_LEAF | ECF_MALLOC) #define ATTR_CONST_NOTHROW_LEAF_LIST (ECF_NOTHROW | ECF_LEAF | ECF_CONST) diff --git a/gcc/fortran/gfortran.h b/gcc/fortran/gfortran.h index b28edd80002..af5e68c13ab 100644 --- a/gcc/fortran/gfortran.h +++ b/gcc/fortran/gfortran.h @@ -2286,6 +2286,7 @@ typedef struct int flag_cray_pointer; int flag_d_lines; int gfc_flag_openmp; + int gfc_flag_openmp_simd; int flag_sign_zero; int flag_stack_arrays; int flag_module_private; diff --git a/gcc/fortran/lang.opt b/gcc/fortran/lang.opt index 4f7993433d4..5e09cbd1459 100644 --- a/gcc/fortran/lang.opt +++ b/gcc/fortran/lang.opt @@ -213,6 +213,10 @@ Wc-binding-type Fortran Warning Warn if the type of a variable might be not interoperable with C +Wdate-time +Fortran +; Documented in C + Wcharacter-truncation Fortran Warning Warn about truncated character expressions @@ -517,6 +521,10 @@ fopenmp Fortran ; Documented in C +fopenmp-simd +Fortran +; Documented in C + fpack-derived Fortran Try to lay out derived types as compactly as possible diff --git a/gcc/fortran/options.c b/gcc/fortran/options.c index 6e4e7c11696..e05528a9223 100644 --- a/gcc/fortran/options.c +++ b/gcc/fortran/options.c @@ -836,6 +836,10 @@ gfc_handle_option (size_t scode, const char *arg, int value, gfc_option.gfc_flag_openmp = value; break; + case OPT_fopenmp_simd: + gfc_option.gfc_flag_openmp_simd = value; + break; + case OPT_ffree_line_length_none: gfc_option.free_line_length = 0; break; diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c index c38a79ada77..59434bdb1b2 100644 --- a/gcc/fortran/trans-expr.c +++ b/gcc/fortran/trans-expr.c @@ -737,7 +737,6 @@ gfc_conv_class_to_class (gfc_se *parmse, gfc_expr *e, gfc_typespec class_ts, gfc_add_modify (&parmse->post, vptr, fold_convert (TREE_TYPE (vptr), ctree)); - gcc_assert (!optional || (optional && !copyback)); if (optional) { tree tmp2; @@ -7771,7 +7770,7 @@ is_runtime_conformable (gfc_expr *expr1, gfc_expr *expr2) e1 = a->expr; if (e1->rank > 0 && !is_runtime_conformable (expr1, e1)) return false; - } + } return true; } else if (expr2->value.function.isym diff --git a/gcc/fortran/trans-io.c b/gcc/fortran/trans-io.c index fd5642209d2..1b500b51d31 100644 --- a/gcc/fortran/trans-io.c +++ b/gcc/fortran/trans-io.c @@ -243,16 +243,16 @@ gfc_trans_io_runtime_check (tree cond, tree var, int error_code, /* The code to generate the error. */ gfc_start_block (&block); - + arg1 = gfc_build_addr_expr (NULL_TREE, var); - + arg2 = build_int_cst (integer_type_node, error_code), - + asprintf (&message, "%s", _(msgid)); arg3 = gfc_build_addr_expr (pchar_type_node, gfc_build_localized_cstring_const (message)); free (message); - + tmp = build_call_expr_loc (input_location, gfor_fndecl_generate_error, 3, arg1, arg2, arg3); @@ -521,7 +521,7 @@ set_parameter_value (stmtblock_t *block, tree var, enum iofield type, gfc_trans_io_runtime_check (cond, var, LIBERROR_BAD_UNIT, "Unit number in I/O statement too small", &se.pre); - + /* UNIT numbers should be less than the max. */ val = gfc_conv_mpz_to_tree (gfc_integer_kinds[i].huge, 4); cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, @@ -1000,7 +1000,7 @@ gfc_trans_open (gfc_code * code) if (p->convert) mask |= set_string (&block, &post_block, var, IOPARM_open_convert, p->convert); - + if (p->newunit) mask |= set_parameter_ref (&block, &post_block, var, IOPARM_open_newunit, p->newunit); @@ -1234,7 +1234,7 @@ gfc_trans_inquire (gfc_code * code) { mask |= set_parameter_ref (&block, &post_block, var, IOPARM_inquire_exist, p->exist); - + if (p->unit && !p->iostat) { p->iostat = create_dummy_iostat (); @@ -1322,7 +1322,7 @@ gfc_trans_inquire (gfc_code * code) if (p->pad) mask |= set_string (&block, &post_block, var, IOPARM_inquire_pad, p->pad); - + if (p->convert) mask |= set_string (&block, &post_block, var, IOPARM_inquire_convert, p->convert); @@ -1547,7 +1547,7 @@ transfer_namelist_element (stmtblock_t * block, const char * var_name, tree dtype; tree dt_parm_addr; tree decl = NULL_TREE; - int n_dim; + int n_dim; int itype; int rank = 0; @@ -2029,7 +2029,7 @@ transfer_expr (gfc_se * se, gfc_typespec * ts, tree addr_expr, gfc_code * code) ts->type = BT_INTEGER; ts->kind = gfc_index_integer_kind; } - + kind = ts->kind; function = NULL; arg2 = NULL; @@ -2111,7 +2111,7 @@ transfer_expr (gfc_se * se, gfc_typespec * ts, tree addr_expr, gfc_code * code) function = iocall[IOCALL_X_CHARACTER_WIDE]; else function = iocall[IOCALL_X_CHARACTER_WIDE_WRITE]; - + tmp = gfc_build_addr_expr (NULL_TREE, dt_parm); tmp = build_call_expr_loc (input_location, function, 4, tmp, addr_expr, arg2, arg3); @@ -2146,6 +2146,12 @@ transfer_expr (gfc_se * se, gfc_typespec * ts, tree addr_expr, gfc_code * code) expr = build_fold_indirect_ref_loc (input_location, expr); + /* Make sure that the derived type has been built. An external + function, if only referenced in an io statement, requires this + check (see PR58771). */ + if (ts->u.derived->backend_decl == NULL_TREE) + tmp = gfc_typenode_for_spec (ts); + for (c = ts->u.derived->components; c; c = c->next) { field = c->backend_decl; diff --git a/gcc/gengtype-parse.c b/gcc/gengtype-parse.c index 9fb554c628e..0a588226f64 100644 --- a/gcc/gengtype-parse.c +++ b/gcc/gengtype-parse.c @@ -774,7 +774,7 @@ declarator (type_p ty, const char **namep, options_p *optsp, ( type bitfield ';' | type declarator bitfield? ( ',' declarator bitfield? )+ ';' - )+ + )* Knows that such declarations must end with a close brace (or, erroneously, at EOF). @@ -788,7 +788,7 @@ struct_field_seq (void) const char *name; bool another; - do + while (token () != '}' && token () != EOF_TOKEN) { ty = type (&opts, true); @@ -831,7 +831,6 @@ struct_field_seq (void) } while (another); } - while (token () != '}' && token () != EOF_TOKEN); return nreverse_pairs (f); } diff --git a/gcc/gimple-expr.c b/gcc/gimple-expr.c new file mode 100644 index 00000000000..c74d9295d1d --- /dev/null +++ b/gcc/gimple-expr.c @@ -0,0 +1,721 @@ +/* Gimple decl, type, and expression support functions. + + Copyright (C) 2007-2013 Free Software Foundation, Inc. + Contributed by Aldy Hernandez <aldyh@redhat.com> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "tree.h" +#include "gimple.h" +#include "demangle.h" + +/* ----- Type related ----- */ + +/* Return true if the conversion from INNER_TYPE to OUTER_TYPE is a + useless type conversion, otherwise return false. + + This function implicitly defines the middle-end type system. With + the notion of 'a < b' meaning that useless_type_conversion_p (a, b) + holds and 'a > b' meaning that useless_type_conversion_p (b, a) holds, + the following invariants shall be fulfilled: + + 1) useless_type_conversion_p is transitive. + If a < b and b < c then a < c. + + 2) useless_type_conversion_p is not symmetric. + From a < b does not follow a > b. + + 3) Types define the available set of operations applicable to values. + A type conversion is useless if the operations for the target type + is a subset of the operations for the source type. For example + casts to void* are useless, casts from void* are not (void* can't + be dereferenced or offsetted, but copied, hence its set of operations + is a strict subset of that of all other data pointer types). Casts + to const T* are useless (can't be written to), casts from const T* + to T* are not. */ + +bool +useless_type_conversion_p (tree outer_type, tree inner_type) +{ + /* Do the following before stripping toplevel qualifiers. */ + if (POINTER_TYPE_P (inner_type) + && POINTER_TYPE_P (outer_type)) + { + /* Do not lose casts between pointers to different address spaces. */ + if (TYPE_ADDR_SPACE (TREE_TYPE (outer_type)) + != TYPE_ADDR_SPACE (TREE_TYPE (inner_type))) + return false; + } + + /* From now on qualifiers on value types do not matter. */ + inner_type = TYPE_MAIN_VARIANT (inner_type); + outer_type = TYPE_MAIN_VARIANT (outer_type); + + if (inner_type == outer_type) + return true; + + /* If we know the canonical types, compare them. */ + if (TYPE_CANONICAL (inner_type) + && TYPE_CANONICAL (inner_type) == TYPE_CANONICAL (outer_type)) + return true; + + /* Changes in machine mode are never useless conversions unless we + deal with aggregate types in which case we defer to later checks. */ + if (TYPE_MODE (inner_type) != TYPE_MODE (outer_type) + && !AGGREGATE_TYPE_P (inner_type)) + return false; + + /* If both the inner and outer types are integral types, then the + conversion is not necessary if they have the same mode and + signedness and precision, and both or neither are boolean. */ + if (INTEGRAL_TYPE_P (inner_type) + && INTEGRAL_TYPE_P (outer_type)) + { + /* Preserve changes in signedness or precision. */ + if (TYPE_UNSIGNED (inner_type) != TYPE_UNSIGNED (outer_type) + || TYPE_PRECISION (inner_type) != TYPE_PRECISION (outer_type)) + return false; + + /* Preserve conversions to/from BOOLEAN_TYPE if types are not + of precision one. */ + if (((TREE_CODE (inner_type) == BOOLEAN_TYPE) + != (TREE_CODE (outer_type) == BOOLEAN_TYPE)) + && TYPE_PRECISION (outer_type) != 1) + return false; + + /* We don't need to preserve changes in the types minimum or + maximum value in general as these do not generate code + unless the types precisions are different. */ + return true; + } + + /* Scalar floating point types with the same mode are compatible. */ + else if (SCALAR_FLOAT_TYPE_P (inner_type) + && SCALAR_FLOAT_TYPE_P (outer_type)) + return true; + + /* Fixed point types with the same mode are compatible. */ + else if (FIXED_POINT_TYPE_P (inner_type) + && FIXED_POINT_TYPE_P (outer_type)) + return true; + + /* We need to take special care recursing to pointed-to types. */ + else if (POINTER_TYPE_P (inner_type) + && POINTER_TYPE_P (outer_type)) + { + /* Do not lose casts to function pointer types. */ + if ((TREE_CODE (TREE_TYPE (outer_type)) == FUNCTION_TYPE + || TREE_CODE (TREE_TYPE (outer_type)) == METHOD_TYPE) + && !(TREE_CODE (TREE_TYPE (inner_type)) == FUNCTION_TYPE + || TREE_CODE (TREE_TYPE (inner_type)) == METHOD_TYPE)) + return false; + + /* We do not care for const qualification of the pointed-to types + as const qualification has no semantic value to the middle-end. */ + + /* Otherwise pointers/references are equivalent. */ + return true; + } + + /* Recurse for complex types. */ + else if (TREE_CODE (inner_type) == COMPLEX_TYPE + && TREE_CODE (outer_type) == COMPLEX_TYPE) + return useless_type_conversion_p (TREE_TYPE (outer_type), + TREE_TYPE (inner_type)); + + /* Recurse for vector types with the same number of subparts. */ + else if (TREE_CODE (inner_type) == VECTOR_TYPE + && TREE_CODE (outer_type) == VECTOR_TYPE + && TYPE_PRECISION (inner_type) == TYPE_PRECISION (outer_type)) + return useless_type_conversion_p (TREE_TYPE (outer_type), + TREE_TYPE (inner_type)); + + else if (TREE_CODE (inner_type) == ARRAY_TYPE + && TREE_CODE (outer_type) == ARRAY_TYPE) + { + /* Preserve string attributes. */ + if (TYPE_STRING_FLAG (inner_type) != TYPE_STRING_FLAG (outer_type)) + return false; + + /* Conversions from array types with unknown extent to + array types with known extent are not useless. */ + if (!TYPE_DOMAIN (inner_type) + && TYPE_DOMAIN (outer_type)) + return false; + + /* Nor are conversions from array types with non-constant size to + array types with constant size or to different size. */ + if (TYPE_SIZE (outer_type) + && TREE_CODE (TYPE_SIZE (outer_type)) == INTEGER_CST + && (!TYPE_SIZE (inner_type) + || TREE_CODE (TYPE_SIZE (inner_type)) != INTEGER_CST + || !tree_int_cst_equal (TYPE_SIZE (outer_type), + TYPE_SIZE (inner_type)))) + return false; + + /* Check conversions between arrays with partially known extents. + If the array min/max values are constant they have to match. + Otherwise allow conversions to unknown and variable extents. + In particular this declares conversions that may change the + mode to BLKmode as useless. */ + if (TYPE_DOMAIN (inner_type) + && TYPE_DOMAIN (outer_type) + && TYPE_DOMAIN (inner_type) != TYPE_DOMAIN (outer_type)) + { + tree inner_min = TYPE_MIN_VALUE (TYPE_DOMAIN (inner_type)); + tree outer_min = TYPE_MIN_VALUE (TYPE_DOMAIN (outer_type)); + tree inner_max = TYPE_MAX_VALUE (TYPE_DOMAIN (inner_type)); + tree outer_max = TYPE_MAX_VALUE (TYPE_DOMAIN (outer_type)); + + /* After gimplification a variable min/max value carries no + additional information compared to a NULL value. All that + matters has been lowered to be part of the IL. */ + if (inner_min && TREE_CODE (inner_min) != INTEGER_CST) + inner_min = NULL_TREE; + if (outer_min && TREE_CODE (outer_min) != INTEGER_CST) + outer_min = NULL_TREE; + if (inner_max && TREE_CODE (inner_max) != INTEGER_CST) + inner_max = NULL_TREE; + if (outer_max && TREE_CODE (outer_max) != INTEGER_CST) + outer_max = NULL_TREE; + + /* Conversions NULL / variable <- cst are useless, but not + the other way around. */ + if (outer_min + && (!inner_min + || !tree_int_cst_equal (inner_min, outer_min))) + return false; + if (outer_max + && (!inner_max + || !tree_int_cst_equal (inner_max, outer_max))) + return false; + } + + /* Recurse on the element check. */ + return useless_type_conversion_p (TREE_TYPE (outer_type), + TREE_TYPE (inner_type)); + } + + else if ((TREE_CODE (inner_type) == FUNCTION_TYPE + || TREE_CODE (inner_type) == METHOD_TYPE) + && TREE_CODE (inner_type) == TREE_CODE (outer_type)) + { + tree outer_parm, inner_parm; + + /* If the return types are not compatible bail out. */ + if (!useless_type_conversion_p (TREE_TYPE (outer_type), + TREE_TYPE (inner_type))) + return false; + + /* Method types should belong to a compatible base class. */ + if (TREE_CODE (inner_type) == METHOD_TYPE + && !useless_type_conversion_p (TYPE_METHOD_BASETYPE (outer_type), + TYPE_METHOD_BASETYPE (inner_type))) + return false; + + /* A conversion to an unprototyped argument list is ok. */ + if (!prototype_p (outer_type)) + return true; + + /* If the unqualified argument types are compatible the conversion + is useless. */ + if (TYPE_ARG_TYPES (outer_type) == TYPE_ARG_TYPES (inner_type)) + return true; + + for (outer_parm = TYPE_ARG_TYPES (outer_type), + inner_parm = TYPE_ARG_TYPES (inner_type); + outer_parm && inner_parm; + outer_parm = TREE_CHAIN (outer_parm), + inner_parm = TREE_CHAIN (inner_parm)) + if (!useless_type_conversion_p + (TYPE_MAIN_VARIANT (TREE_VALUE (outer_parm)), + TYPE_MAIN_VARIANT (TREE_VALUE (inner_parm)))) + return false; + + /* If there is a mismatch in the number of arguments the functions + are not compatible. */ + if (outer_parm || inner_parm) + return false; + + /* Defer to the target if necessary. */ + if (TYPE_ATTRIBUTES (inner_type) || TYPE_ATTRIBUTES (outer_type)) + return comp_type_attributes (outer_type, inner_type) != 0; + + return true; + } + + /* For aggregates we rely on TYPE_CANONICAL exclusively and require + explicit conversions for types involving to be structurally + compared types. */ + else if (AGGREGATE_TYPE_P (inner_type) + && TREE_CODE (inner_type) == TREE_CODE (outer_type)) + return false; + + return false; +} + + +/* ----- Decl related ----- */ + +/* Set sequence SEQ to be the GIMPLE body for function FN. */ + +void +gimple_set_body (tree fndecl, gimple_seq seq) +{ + struct function *fn = DECL_STRUCT_FUNCTION (fndecl); + if (fn == NULL) + { + /* If FNDECL still does not have a function structure associated + with it, then it does not make sense for it to receive a + GIMPLE body. */ + gcc_assert (seq == NULL); + } + else + fn->gimple_body = seq; +} + + +/* Return the body of GIMPLE statements for function FN. After the + CFG pass, the function body doesn't exist anymore because it has + been split up into basic blocks. In this case, it returns + NULL. */ + +gimple_seq +gimple_body (tree fndecl) +{ + struct function *fn = DECL_STRUCT_FUNCTION (fndecl); + return fn ? fn->gimple_body : NULL; +} + +/* Return true when FNDECL has Gimple body either in unlowered + or CFG form. */ +bool +gimple_has_body_p (tree fndecl) +{ + struct function *fn = DECL_STRUCT_FUNCTION (fndecl); + return (gimple_body (fndecl) || (fn && fn->cfg)); +} + +/* Return a printable name for symbol DECL. */ + +const char * +gimple_decl_printable_name (tree decl, int verbosity) +{ + if (!DECL_NAME (decl)) + return NULL; + + if (DECL_ASSEMBLER_NAME_SET_P (decl)) + { + const char *str, *mangled_str; + int dmgl_opts = DMGL_NO_OPTS; + + if (verbosity >= 2) + { + dmgl_opts = DMGL_VERBOSE + | DMGL_ANSI + | DMGL_GNU_V3 + | DMGL_RET_POSTFIX; + if (TREE_CODE (decl) == FUNCTION_DECL) + dmgl_opts |= DMGL_PARAMS; + } + + mangled_str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); + str = cplus_demangle_v3 (mangled_str, dmgl_opts); + return (str) ? str : mangled_str; + } + + return IDENTIFIER_POINTER (DECL_NAME (decl)); +} + + +/* Create a new VAR_DECL and copy information from VAR to it. */ + +tree +copy_var_decl (tree var, tree name, tree type) +{ + tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type); + + TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var); + TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var); + DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var); + DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var); + DECL_IGNORED_P (copy) = DECL_IGNORED_P (var); + DECL_CONTEXT (copy) = DECL_CONTEXT (var); + TREE_NO_WARNING (copy) = TREE_NO_WARNING (var); + TREE_USED (copy) = 1; + DECL_SEEN_IN_BIND_EXPR_P (copy) = 1; + DECL_ATTRIBUTES (copy) = DECL_ATTRIBUTES (var); + + return copy; +} + +/* Given SSA_NAMEs NAME1 and NAME2, return true if they are candidates for + coalescing together, false otherwise. + + This must stay consistent with var_map_base_init in tree-ssa-live.c. */ + +bool +gimple_can_coalesce_p (tree name1, tree name2) +{ + /* First check the SSA_NAME's associated DECL. We only want to + coalesce if they have the same DECL or both have no associated DECL. */ + tree var1 = SSA_NAME_VAR (name1); + tree var2 = SSA_NAME_VAR (name2); + var1 = (var1 && (!VAR_P (var1) || !DECL_IGNORED_P (var1))) ? var1 : NULL_TREE; + var2 = (var2 && (!VAR_P (var2) || !DECL_IGNORED_P (var2))) ? var2 : NULL_TREE; + if (var1 != var2) + return false; + + /* Now check the types. If the types are the same, then we should + try to coalesce V1 and V2. */ + tree t1 = TREE_TYPE (name1); + tree t2 = TREE_TYPE (name2); + if (t1 == t2) + return true; + + /* If the types are not the same, check for a canonical type match. This + (for example) allows coalescing when the types are fundamentally the + same, but just have different names. + + Note pointer types with different address spaces may have the same + canonical type. Those are rejected for coalescing by the + types_compatible_p check. */ + if (TYPE_CANONICAL (t1) + && TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2) + && types_compatible_p (t1, t2)) + return true; + + return false; +} + + +/* ----- Expression related ----- */ + +/* Extract the operands and code for expression EXPR into *SUBCODE_P, + *OP1_P, *OP2_P and *OP3_P respectively. */ + +void +extract_ops_from_tree_1 (tree expr, enum tree_code *subcode_p, tree *op1_p, + tree *op2_p, tree *op3_p) +{ + enum gimple_rhs_class grhs_class; + + *subcode_p = TREE_CODE (expr); + grhs_class = get_gimple_rhs_class (*subcode_p); + + if (grhs_class == GIMPLE_TERNARY_RHS) + { + *op1_p = TREE_OPERAND (expr, 0); + *op2_p = TREE_OPERAND (expr, 1); + *op3_p = TREE_OPERAND (expr, 2); + } + else if (grhs_class == GIMPLE_BINARY_RHS) + { + *op1_p = TREE_OPERAND (expr, 0); + *op2_p = TREE_OPERAND (expr, 1); + *op3_p = NULL_TREE; + } + else if (grhs_class == GIMPLE_UNARY_RHS) + { + *op1_p = TREE_OPERAND (expr, 0); + *op2_p = NULL_TREE; + *op3_p = NULL_TREE; + } + else if (grhs_class == GIMPLE_SINGLE_RHS) + { + *op1_p = expr; + *op2_p = NULL_TREE; + *op3_p = NULL_TREE; + } + else + gcc_unreachable (); +} + +/* Extract operands for a GIMPLE_COND statement out of COND_EXPR tree COND. */ + +void +gimple_cond_get_ops_from_tree (tree cond, enum tree_code *code_p, + tree *lhs_p, tree *rhs_p) +{ + gcc_assert (TREE_CODE_CLASS (TREE_CODE (cond)) == tcc_comparison + || TREE_CODE (cond) == TRUTH_NOT_EXPR + || is_gimple_min_invariant (cond) + || SSA_VAR_P (cond)); + + extract_ops_from_tree (cond, code_p, lhs_p, rhs_p); + + /* Canonicalize conditionals of the form 'if (!VAL)'. */ + if (*code_p == TRUTH_NOT_EXPR) + { + *code_p = EQ_EXPR; + gcc_assert (*lhs_p && *rhs_p == NULL_TREE); + *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p)); + } + /* Canonicalize conditionals of the form 'if (VAL)' */ + else if (TREE_CODE_CLASS (*code_p) != tcc_comparison) + { + *code_p = NE_EXPR; + gcc_assert (*lhs_p && *rhs_p == NULL_TREE); + *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p)); + } +} + +/* Return true if T is a valid LHS for a GIMPLE assignment expression. */ + +bool +is_gimple_lvalue (tree t) +{ + return (is_gimple_addressable (t) + || TREE_CODE (t) == WITH_SIZE_EXPR + /* These are complex lvalues, but don't have addresses, so they + go here. */ + || TREE_CODE (t) == BIT_FIELD_REF); +} + +/* Return true if T is a GIMPLE condition. */ + +bool +is_gimple_condexpr (tree t) +{ + return (is_gimple_val (t) || (COMPARISON_CLASS_P (t) + && !tree_could_throw_p (t) + && is_gimple_val (TREE_OPERAND (t, 0)) + && is_gimple_val (TREE_OPERAND (t, 1)))); +} + +/* Return true if T is a gimple address. */ + +bool +is_gimple_address (const_tree t) +{ + tree op; + + if (TREE_CODE (t) != ADDR_EXPR) + return false; + + op = TREE_OPERAND (t, 0); + while (handled_component_p (op)) + { + if ((TREE_CODE (op) == ARRAY_REF + || TREE_CODE (op) == ARRAY_RANGE_REF) + && !is_gimple_val (TREE_OPERAND (op, 1))) + return false; + + op = TREE_OPERAND (op, 0); + } + + if (CONSTANT_CLASS_P (op) || TREE_CODE (op) == MEM_REF) + return true; + + switch (TREE_CODE (op)) + { + case PARM_DECL: + case RESULT_DECL: + case LABEL_DECL: + case FUNCTION_DECL: + case VAR_DECL: + case CONST_DECL: + return true; + + default: + return false; + } +} + +/* Return true if T is a gimple invariant address. */ + +bool +is_gimple_invariant_address (const_tree t) +{ + const_tree op; + + if (TREE_CODE (t) != ADDR_EXPR) + return false; + + op = strip_invariant_refs (TREE_OPERAND (t, 0)); + if (!op) + return false; + + if (TREE_CODE (op) == MEM_REF) + { + const_tree op0 = TREE_OPERAND (op, 0); + return (TREE_CODE (op0) == ADDR_EXPR + && (CONSTANT_CLASS_P (TREE_OPERAND (op0, 0)) + || decl_address_invariant_p (TREE_OPERAND (op0, 0)))); + } + + return CONSTANT_CLASS_P (op) || decl_address_invariant_p (op); +} + +/* Return true if T is a gimple invariant address at IPA level + (so addresses of variables on stack are not allowed). */ + +bool +is_gimple_ip_invariant_address (const_tree t) +{ + const_tree op; + + if (TREE_CODE (t) != ADDR_EXPR) + return false; + + op = strip_invariant_refs (TREE_OPERAND (t, 0)); + if (!op) + return false; + + if (TREE_CODE (op) == MEM_REF) + { + const_tree op0 = TREE_OPERAND (op, 0); + return (TREE_CODE (op0) == ADDR_EXPR + && (CONSTANT_CLASS_P (TREE_OPERAND (op0, 0)) + || decl_address_ip_invariant_p (TREE_OPERAND (op0, 0)))); + } + + return CONSTANT_CLASS_P (op) || decl_address_ip_invariant_p (op); +} + +/* Return true if T is a GIMPLE minimal invariant. It's a restricted + form of function invariant. */ + +bool +is_gimple_min_invariant (const_tree t) +{ + if (TREE_CODE (t) == ADDR_EXPR) + return is_gimple_invariant_address (t); + + return is_gimple_constant (t); +} + +/* Return true if T is a GIMPLE interprocedural invariant. It's a restricted + form of gimple minimal invariant. */ + +bool +is_gimple_ip_invariant (const_tree t) +{ + if (TREE_CODE (t) == ADDR_EXPR) + return is_gimple_ip_invariant_address (t); + + return is_gimple_constant (t); +} + +/* Return true if T is a non-aggregate register variable. */ + +bool +is_gimple_reg (tree t) +{ + if (virtual_operand_p (t)) + return false; + + if (TREE_CODE (t) == SSA_NAME) + return true; + + if (!is_gimple_variable (t)) + return false; + + if (!is_gimple_reg_type (TREE_TYPE (t))) + return false; + + /* A volatile decl is not acceptable because we can't reuse it as + needed. We need to copy it into a temp first. */ + if (TREE_THIS_VOLATILE (t)) + return false; + + /* We define "registers" as things that can be renamed as needed, + which with our infrastructure does not apply to memory. */ + if (needs_to_live_in_memory (t)) + return false; + + /* Hard register variables are an interesting case. For those that + are call-clobbered, we don't know where all the calls are, since + we don't (want to) take into account which operations will turn + into libcalls at the rtl level. For those that are call-saved, + we don't currently model the fact that calls may in fact change + global hard registers, nor do we examine ASM_CLOBBERS at the tree + level, and so miss variable changes that might imply. All around, + it seems safest to not do too much optimization with these at the + tree level at all. We'll have to rely on the rtl optimizers to + clean this up, as there we've got all the appropriate bits exposed. */ + if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) + return false; + + /* Complex and vector values must have been put into SSA-like form. + That is, no assignments to the individual components. */ + if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE + || TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE) + return DECL_GIMPLE_REG_P (t); + + return true; +} + + +/* Return true if T is a GIMPLE rvalue, i.e. an identifier or a constant. */ + +bool +is_gimple_val (tree t) +{ + /* Make loads from volatiles and memory vars explicit. */ + if (is_gimple_variable (t) + && is_gimple_reg_type (TREE_TYPE (t)) + && !is_gimple_reg (t)) + return false; + + return (is_gimple_variable (t) || is_gimple_min_invariant (t)); +} + +/* Similarly, but accept hard registers as inputs to asm statements. */ + +bool +is_gimple_asm_val (tree t) +{ + if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) + return true; + + return is_gimple_val (t); +} + +/* Return true if T is a GIMPLE minimal lvalue. */ + +bool +is_gimple_min_lval (tree t) +{ + if (!(t = CONST_CAST_TREE (strip_invariant_refs (t)))) + return false; + return (is_gimple_id (t) || TREE_CODE (t) == MEM_REF); +} + +/* Return true if T is a valid function operand of a CALL_EXPR. */ + +bool +is_gimple_call_addr (tree t) +{ + return (TREE_CODE (t) == OBJ_TYPE_REF || is_gimple_val (t)); +} + +/* Return true if T is a valid address operand of a MEM_REF. */ + +bool +is_gimple_mem_ref_addr (tree t) +{ + return (is_gimple_reg (t) + || TREE_CODE (t) == INTEGER_CST + || (TREE_CODE (t) == ADDR_EXPR + && (CONSTANT_CLASS_P (TREE_OPERAND (t, 0)) + || decl_address_invariant_p (TREE_OPERAND (t, 0))))); +} diff --git a/gcc/gimple-expr.h b/gcc/gimple-expr.h new file mode 100644 index 00000000000..aad558cebb7 --- /dev/null +++ b/gcc/gimple-expr.h @@ -0,0 +1,171 @@ +/* Header file for gimple decl, type and expressions. + Copyright (C) 2013 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef GCC_GIMPLE_EXPR_H +#define GCC_GIMPLE_EXPR_H + +extern bool useless_type_conversion_p (tree, tree); + +extern void gimple_set_body (tree, gimple_seq); +extern gimple_seq gimple_body (tree); +extern bool gimple_has_body_p (tree); +extern const char *gimple_decl_printable_name (tree, int); +extern tree copy_var_decl (tree, tree, tree); +extern bool gimple_can_coalesce_p (tree, tree); + +extern void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, + tree *); +extern void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, + tree *); +extern bool is_gimple_lvalue (tree); +extern bool is_gimple_condexpr (tree); +extern bool is_gimple_address (const_tree); +extern bool is_gimple_invariant_address (const_tree); +extern bool is_gimple_ip_invariant_address (const_tree); +extern bool is_gimple_min_invariant (const_tree); +extern bool is_gimple_ip_invariant (const_tree); +extern bool is_gimple_reg (tree); +extern bool is_gimple_val (tree); +extern bool is_gimple_asm_val (tree); +extern bool is_gimple_min_lval (tree); +extern bool is_gimple_call_addr (tree); +extern bool is_gimple_mem_ref_addr (tree); + +/* Return true if a conversion from either type of TYPE1 and TYPE2 + to the other is not required. Otherwise return false. */ + +static inline bool +types_compatible_p (tree type1, tree type2) +{ + return (type1 == type2 + || (useless_type_conversion_p (type1, type2) + && useless_type_conversion_p (type2, type1))); +} + +/* Return true if TYPE is a suitable type for a scalar register variable. */ + +static inline bool +is_gimple_reg_type (tree type) +{ + return !AGGREGATE_TYPE_P (type); +} + +/* Return true if T is a variable. */ + +static inline bool +is_gimple_variable (tree t) +{ + return (TREE_CODE (t) == VAR_DECL + || TREE_CODE (t) == PARM_DECL + || TREE_CODE (t) == RESULT_DECL + || TREE_CODE (t) == SSA_NAME); +} + +/* Return true if T is a GIMPLE identifier (something with an address). */ + +static inline bool +is_gimple_id (tree t) +{ + return (is_gimple_variable (t) + || TREE_CODE (t) == FUNCTION_DECL + || TREE_CODE (t) == LABEL_DECL + || TREE_CODE (t) == CONST_DECL + /* Allow string constants, since they are addressable. */ + || TREE_CODE (t) == STRING_CST); +} + +/* Return true if OP, an SSA name or a DECL is a virtual operand. */ + +static inline bool +virtual_operand_p (tree op) +{ + if (TREE_CODE (op) == SSA_NAME) + { + op = SSA_NAME_VAR (op); + if (!op) + return false; + } + + if (TREE_CODE (op) == VAR_DECL) + return VAR_DECL_IS_VIRTUAL_OPERAND (op); + + return false; +} + +/* Return true if T is something whose address can be taken. */ + +static inline bool +is_gimple_addressable (tree t) +{ + return (is_gimple_id (t) || handled_component_p (t) + || TREE_CODE (t) == MEM_REF); +} + +/* Return true if T is a valid gimple constant. */ + +static inline bool +is_gimple_constant (const_tree t) +{ + switch (TREE_CODE (t)) + { + case INTEGER_CST: + case REAL_CST: + case FIXED_CST: + case STRING_CST: + case COMPLEX_CST: + case VECTOR_CST: + return true; + + default: + return false; + } +} + +/* A wrapper around extract_ops_from_tree_1, for callers which expect + to see only a maximum of two operands. */ + +static inline void +extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, + tree *op1) +{ + tree op2; + extract_ops_from_tree_1 (expr, code, op0, op1, &op2); + gcc_assert (op2 == NULL_TREE); +} + +/* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL + associated with the callee if known. Otherwise return NULL_TREE. */ + +static inline tree +gimple_call_addr_fndecl (const_tree fn) +{ + if (fn && TREE_CODE (fn) == ADDR_EXPR) + { + tree fndecl = TREE_OPERAND (fn, 0); + if (TREE_CODE (fndecl) == MEM_REF + && TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR + && integer_zerop (TREE_OPERAND (fndecl, 1))) + fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); + if (TREE_CODE (fndecl) == FUNCTION_DECL) + return fndecl; + } + return NULL_TREE; +} + +#endif /* GCC_GIMPLE_EXPR_H */ diff --git a/gcc/gimple-ssa-isolate-paths.c b/gcc/gimple-ssa-isolate-paths.c new file mode 100644 index 00000000000..4868867308f --- /dev/null +++ b/gcc/gimple-ssa-isolate-paths.c @@ -0,0 +1,325 @@ +/* Detect paths through the CFG which can never be executed in a conforming + program and isolate them. + + Copyright (C) 2013 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tree.h" +#include "flags.h" +#include "basic-block.h" +#include "gimple.h" +#include "tree-ssa.h" +#include "tree-ssanames.h" +#include "gimple-ssa.h" +#include "tree-ssa-operands.h" +#include "tree-phinodes.h" +#include "ssa-iterators.h" +#include "cfgloop.h" +#include "tree-pass.h" + + +static bool cfg_altered; + +/* Insert a trap before SI and remove SI and all statements after SI. */ + +static void +insert_trap_and_remove_trailing_statements (gimple_stmt_iterator *si_p) +{ + gimple_seq seq = NULL; + gimple stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); + gimple_seq_add_stmt (&seq, stmt); + gsi_insert_before (si_p, seq, GSI_SAME_STMT); + + /* Now delete all remaining statements in this block. */ + for (; !gsi_end_p (*si_p);) + { + stmt = gsi_stmt (*si_p); + unlink_stmt_vdef (stmt); + gsi_remove (si_p, true); + release_defs (stmt); + } +} + +/* BB when reached via incoming edge E will exhibit undefined behaviour + at STMT. Isolate and optimize the path which exhibits undefined + behaviour. + + Isolation is simple. Duplicate BB and redirect E to BB'. + + Optimization is simple as well. Replace STMT in BB' with an + unconditional trap and remove all outgoing edges from BB'. + + DUPLICATE is a pre-existing duplicate, use it as BB' if it exists. + + Return BB'. */ + +basic_block +isolate_path (basic_block bb, basic_block duplicate, edge e, gimple stmt) +{ + gimple_stmt_iterator si, si2; + edge_iterator ei; + edge e2; + + + /* First duplicate BB if we have not done so already and remove all + the duplicate's outgoing edges as duplicate is going to unconditionally + trap. Removing the outgoing edges is both an optimization and ensures + we don't need to do any PHI node updates. */ + if (!duplicate) + { + duplicate = duplicate_block (bb, NULL, NULL); + for (ei = ei_start (duplicate->succs); (e2 = ei_safe_edge (ei)); ) + remove_edge (e2); + } + + /* Complete the isolation step by redirecting E to reach DUPLICATE. */ + e2 = redirect_edge_and_branch (e, duplicate); + if (e2) + flush_pending_stmts (e2); + + + /* There may be more than one statement in DUPLICATE which exhibits + undefined behaviour. Ultimately we want the first such statement in + DUPLCIATE so that we're able to delete as much code as possible. + + So each time we discover undefined behaviour in DUPLICATE, search for + the statement which triggers undefined behaviour. If found, then + transform the statement into a trap and delete everything after the + statement. If not found, then this particular instance was subsumed by + an earlier instance of undefined behaviour and there's nothing to do. + + This is made more complicated by the fact that we have STMT, which is in + BB rather than in DUPLICATE. So we set up two iterators, one for each + block and walk forward looking for STMT in BB, advancing each iterator at + each step. + + When we find STMT the second iterator should point to STMT's equivalent in + duplicate. If DUPLICATE ends before STMT is found in BB, then there's + nothing to do. + + Ignore labels and debug statements. */ + si = gsi_start_nondebug_after_labels_bb (bb); + si2 = gsi_start_nondebug_after_labels_bb (duplicate); + while (!gsi_end_p (si) && !gsi_end_p (si2) && gsi_stmt (si) != stmt) + { + gsi_next_nondebug (&si); + gsi_next_nondebug (&si2); + } + + /* This would be an indicator that we never found STMT in BB, which should + never happen. */ + gcc_assert (!gsi_end_p (si)); + + /* If we did not run to the end of DUPLICATE, then SI points to STMT and + SI2 points to the duplicate of STMT in DUPLICATE. Insert a trap + before SI2 and remove SI2 and all trailing statements. */ + if (!gsi_end_p (si2)) + insert_trap_and_remove_trailing_statements (&si2); + + return duplicate; +} + +/* Search the function for statements which, if executed, would cause + the program to fault such as a dereference of a NULL pointer. + + Such a program can't be valid if such a statement was to execute + according to ISO standards. + + We detect explicit NULL pointer dereferences as well as those implied + by a PHI argument having a NULL value which unconditionally flows into + a dereference in the same block as the PHI. + + In the former case we replace the offending statement with an + unconditional trap and eliminate the outgoing edges from the statement's + basic block. This may expose secondary optimization opportunities. + + In the latter case, we isolate the path(s) with the NULL PHI + feeding the dereference. We can then replace the offending statement + and eliminate the outgoing edges in the duplicate. Again, this may + expose secondary optimization opportunities. + + A warning for both cases may be advisable as well. + + Other statically detectable violations of the ISO standard could be + handled in a similar way, such as out-of-bounds array indexing. */ + +static unsigned int +gimple_ssa_isolate_erroneous_paths (void) +{ + basic_block bb; + + initialize_original_copy_tables (); + + /* Search all the blocks for edges which, if traversed, will + result in undefined behaviour. */ + cfg_altered = false; + FOR_EACH_BB (bb) + { + gimple_stmt_iterator si; + + /* First look for a PHI which sets a pointer to NULL and which + is then dereferenced within BB. This is somewhat overly + conservative, but probably catches most of the interesting + cases. */ + for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) + { + gimple phi = gsi_stmt (si); + tree lhs = gimple_phi_result (phi); + + /* If the result is not a pointer, then there is no need to + examine the arguments. */ + if (!POINTER_TYPE_P (TREE_TYPE (lhs))) + continue; + + /* PHI produces a pointer result. See if any of the PHI's + arguments are NULL. + + When we remove an edge, we want to reprocess the current + index, hence the ugly way we update I for each iteration. */ + basic_block duplicate = NULL; + for (unsigned i = 0, next_i = 0; + i < gimple_phi_num_args (phi); + i = next_i) + { + tree op = gimple_phi_arg_def (phi, i); + + next_i = i + 1; + + if (!integer_zerop (op)) + continue; + + edge e = gimple_phi_arg_edge (phi, i); + imm_use_iterator iter; + gimple use_stmt; + + /* We've got a NULL PHI argument. Now see if the + PHI's result is dereferenced within BB. */ + FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) + { + /* We only care about uses in BB. Catching cases in + in other blocks would require more complex path + isolation code. */ + if (gimple_bb (use_stmt) != bb) + continue; + + if (infer_nonnull_range (use_stmt, lhs)) + { + duplicate = isolate_path (bb, duplicate, + e, use_stmt); + + /* When we remove an incoming edge, we need to + reprocess the Ith element. */ + next_i = i; + cfg_altered = true; + } + } + } + } + + /* Now look at the statements in the block and see if any of + them explicitly dereference a NULL pointer. This happens + because of jump threading and constant propagation. */ + for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) + { + gimple stmt = gsi_stmt (si); + + /* By passing null_pointer_node, we can use infer_nonnull_range + to detect explicit NULL pointer dereferences and other uses + where a non-NULL value is required. */ + if (infer_nonnull_range (stmt, null_pointer_node)) + { + insert_trap_and_remove_trailing_statements (&si); + + /* And finally, remove all outgoing edges from BB. */ + edge e; + for (edge_iterator ei = ei_start (bb->succs); + (e = ei_safe_edge (ei)); ) + remove_edge (e); + + /* Ignore any more operands on this statement and + continue the statement iterator (which should + terminate its loop immediately. */ + cfg_altered = true; + break; + } + } + } + free_original_copy_tables (); + + /* We scramble the CFG and loop structures a bit, clean up + appropriately. We really should incrementally update the + loop structures, in theory it shouldn't be that hard. */ + if (cfg_altered) + { + free_dominance_info (CDI_DOMINATORS); + free_dominance_info (CDI_POST_DOMINATORS); + loops_state_set (LOOPS_NEED_FIXUP); + return TODO_cleanup_cfg | TODO_update_ssa; + } + return 0; +} + +static bool +gate_isolate_erroneous_paths (void) +{ + /* If we do not have a suitable builtin function for the trap statement, + then do not perform the optimization. */ + return (flag_isolate_erroneous_paths != 0 + && builtin_decl_explicit (BUILT_IN_TRAP) != NULL); +} + +namespace { +const pass_data pass_data_isolate_erroneous_paths = +{ + GIMPLE_PASS, /* type */ + "isolate-paths", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + true, /* has_gate */ + true, /* has_execute */ + TV_ISOLATE_ERRONEOUS_PATHS, /* tv_id */ + ( PROP_cfg | PROP_ssa ), /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + TODO_verify_ssa, /* todo_flags_finish */ +}; + +class pass_isolate_erroneous_paths : public gimple_opt_pass +{ +public: + pass_isolate_erroneous_paths (gcc::context *ctxt) + : gimple_opt_pass (pass_data_isolate_erroneous_paths, ctxt) + {} + + /* opt_pass methods: */ + opt_pass * clone () { return new pass_isolate_erroneous_paths (m_ctxt); } + bool gate () { return gate_isolate_erroneous_paths (); } + unsigned int execute () { return gimple_ssa_isolate_erroneous_paths (); } + +}; // class pass_uncprop +} + +gimple_opt_pass * +make_pass_isolate_erroneous_paths (gcc::context *ctxt) +{ + return new pass_isolate_erroneous_paths (ctxt); +} diff --git a/gcc/gimple.c b/gcc/gimple.c index da7be0c4f4e..bd69fa647ec 100644 --- a/gcc/gimple.c +++ b/gcc/gimple.c @@ -386,47 +386,6 @@ gimple_call_get_nobnd_arg_index (const_gimple gs, unsigned index) } -/* Extract the operands and code for expression EXPR into *SUBCODE_P, - *OP1_P, *OP2_P and *OP3_P respectively. */ - -void -extract_ops_from_tree_1 (tree expr, enum tree_code *subcode_p, tree *op1_p, - tree *op2_p, tree *op3_p) -{ - enum gimple_rhs_class grhs_class; - - *subcode_p = TREE_CODE (expr); - grhs_class = get_gimple_rhs_class (*subcode_p); - - if (grhs_class == GIMPLE_TERNARY_RHS) - { - *op1_p = TREE_OPERAND (expr, 0); - *op2_p = TREE_OPERAND (expr, 1); - *op3_p = TREE_OPERAND (expr, 2); - } - else if (grhs_class == GIMPLE_BINARY_RHS) - { - *op1_p = TREE_OPERAND (expr, 0); - *op2_p = TREE_OPERAND (expr, 1); - *op3_p = NULL_TREE; - } - else if (grhs_class == GIMPLE_UNARY_RHS) - { - *op1_p = TREE_OPERAND (expr, 0); - *op2_p = NULL_TREE; - *op3_p = NULL_TREE; - } - else if (grhs_class == GIMPLE_SINGLE_RHS) - { - *op1_p = expr; - *op2_p = NULL_TREE; - *op3_p = NULL_TREE; - } - else - gcc_unreachable (); -} - - /* Build a GIMPLE_ASSIGN statement. LHS of the assignment. @@ -526,37 +485,6 @@ gimple_build_cond (enum tree_code pred_code, tree lhs, tree rhs, return p; } - -/* Extract operands for a GIMPLE_COND statement out of COND_EXPR tree COND. */ - -void -gimple_cond_get_ops_from_tree (tree cond, enum tree_code *code_p, - tree *lhs_p, tree *rhs_p) -{ - gcc_assert (TREE_CODE_CLASS (TREE_CODE (cond)) == tcc_comparison - || TREE_CODE (cond) == TRUTH_NOT_EXPR - || is_gimple_min_invariant (cond) - || SSA_VAR_P (cond)); - - extract_ops_from_tree (cond, code_p, lhs_p, rhs_p); - - /* Canonicalize conditionals of the form 'if (!VAL)'. */ - if (*code_p == TRUTH_NOT_EXPR) - { - *code_p = EQ_EXPR; - gcc_assert (*lhs_p && *rhs_p == NULL_TREE); - *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p)); - } - /* Canonicalize conditionals of the form 'if (VAL)' */ - else if (TREE_CODE_CLASS (*code_p) != tcc_comparison) - { - *code_p = NE_EXPR; - gcc_assert (*lhs_p && *rhs_p == NULL_TREE); - *rhs_p = build_zero_cst (TREE_TYPE (*lhs_p)); - } -} - - /* Build a GIMPLE_COND statement from the conditional expression tree COND. T_LABEL and F_LABEL are as in gimple_build_cond. */ @@ -1906,45 +1834,6 @@ walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt, } -/* Set sequence SEQ to be the GIMPLE body for function FN. */ - -void -gimple_set_body (tree fndecl, gimple_seq seq) -{ - struct function *fn = DECL_STRUCT_FUNCTION (fndecl); - if (fn == NULL) - { - /* If FNDECL still does not have a function structure associated - with it, then it does not make sense for it to receive a - GIMPLE body. */ - gcc_assert (seq == NULL); - } - else - fn->gimple_body = seq; -} - - -/* Return the body of GIMPLE statements for function FN. After the - CFG pass, the function body doesn't exist anymore because it has - been split up into basic blocks. In this case, it returns - NULL. */ - -gimple_seq -gimple_body (tree fndecl) -{ - struct function *fn = DECL_STRUCT_FUNCTION (fndecl); - return fn ? fn->gimple_body : NULL; -} - -/* Return true when FNDECL has Gimple body either in unlowered - or CFG form. */ -bool -gimple_has_body_p (tree fndecl) -{ - struct function *fn = DECL_STRUCT_FUNCTION (fndecl); - return (gimple_body (fndecl) || (fn && fn->cfg)); -} - /* Return true if calls C1 and C2 are known to go to the same function. */ bool @@ -2602,325 +2491,6 @@ const unsigned char gimple_rhs_class_table[] = { #undef DEFTREECODE #undef END_OF_BASE_TREE_CODES -/* For the definitive definition of GIMPLE, see doc/tree-ssa.texi. */ - -/* Validation of GIMPLE expressions. */ - -/* Return true if T is a valid LHS for a GIMPLE assignment expression. */ - -bool -is_gimple_lvalue (tree t) -{ - return (is_gimple_addressable (t) - || TREE_CODE (t) == WITH_SIZE_EXPR - /* These are complex lvalues, but don't have addresses, so they - go here. */ - || TREE_CODE (t) == BIT_FIELD_REF); -} - -/* Return true if T is a GIMPLE condition. */ - -bool -is_gimple_condexpr (tree t) -{ - return (is_gimple_val (t) || (COMPARISON_CLASS_P (t) - && !tree_could_throw_p (t) - && is_gimple_val (TREE_OPERAND (t, 0)) - && is_gimple_val (TREE_OPERAND (t, 1)))); -} - -/* Return true if T is something whose address can be taken. */ - -bool -is_gimple_addressable (tree t) -{ - return (is_gimple_id (t) || handled_component_p (t) - || TREE_CODE (t) == MEM_REF); -} - -/* Return true if T is a valid gimple constant. */ - -bool -is_gimple_constant (const_tree t) -{ - switch (TREE_CODE (t)) - { - case INTEGER_CST: - case REAL_CST: - case FIXED_CST: - case STRING_CST: - case COMPLEX_CST: - case VECTOR_CST: - return true; - - default: - return false; - } -} - -/* Return true if T is a gimple address. */ - -bool -is_gimple_address (const_tree t) -{ - tree op; - - if (TREE_CODE (t) != ADDR_EXPR) - return false; - - op = TREE_OPERAND (t, 0); - while (handled_component_p (op)) - { - if ((TREE_CODE (op) == ARRAY_REF - || TREE_CODE (op) == ARRAY_RANGE_REF) - && !is_gimple_val (TREE_OPERAND (op, 1))) - return false; - - op = TREE_OPERAND (op, 0); - } - - if (CONSTANT_CLASS_P (op) || TREE_CODE (op) == MEM_REF) - return true; - - switch (TREE_CODE (op)) - { - case PARM_DECL: - case RESULT_DECL: - case LABEL_DECL: - case FUNCTION_DECL: - case VAR_DECL: - case CONST_DECL: - return true; - - default: - return false; - } -} - -/* Return true if T is a gimple invariant address. */ - -bool -is_gimple_invariant_address (const_tree t) -{ - const_tree op; - - if (TREE_CODE (t) != ADDR_EXPR) - return false; - - op = strip_invariant_refs (TREE_OPERAND (t, 0)); - if (!op) - return false; - - if (TREE_CODE (op) == MEM_REF) - { - const_tree op0 = TREE_OPERAND (op, 0); - return (TREE_CODE (op0) == ADDR_EXPR - && (CONSTANT_CLASS_P (TREE_OPERAND (op0, 0)) - || decl_address_invariant_p (TREE_OPERAND (op0, 0)))); - } - - return CONSTANT_CLASS_P (op) || decl_address_invariant_p (op); -} - -/* Return true if T is a gimple invariant address at IPA level - (so addresses of variables on stack are not allowed). */ - -bool -is_gimple_ip_invariant_address (const_tree t) -{ - const_tree op; - - if (TREE_CODE (t) != ADDR_EXPR) - return false; - - op = strip_invariant_refs (TREE_OPERAND (t, 0)); - if (!op) - return false; - - if (TREE_CODE (op) == MEM_REF) - { - const_tree op0 = TREE_OPERAND (op, 0); - return (TREE_CODE (op0) == ADDR_EXPR - && (CONSTANT_CLASS_P (TREE_OPERAND (op0, 0)) - || decl_address_ip_invariant_p (TREE_OPERAND (op0, 0)))); - } - - return CONSTANT_CLASS_P (op) || decl_address_ip_invariant_p (op); -} - -/* Return true if T is a GIMPLE minimal invariant. It's a restricted - form of function invariant. */ - -bool -is_gimple_min_invariant (const_tree t) -{ - if (TREE_CODE (t) == ADDR_EXPR) - return is_gimple_invariant_address (t); - - return is_gimple_constant (t); -} - -/* Return true if T is a GIMPLE interprocedural invariant. It's a restricted - form of gimple minimal invariant. */ - -bool -is_gimple_ip_invariant (const_tree t) -{ - if (TREE_CODE (t) == ADDR_EXPR) - return is_gimple_ip_invariant_address (t); - - return is_gimple_constant (t); -} - -/* Return true if T is a variable. */ - -bool -is_gimple_variable (tree t) -{ - return (TREE_CODE (t) == VAR_DECL - || TREE_CODE (t) == PARM_DECL - || TREE_CODE (t) == RESULT_DECL - || TREE_CODE (t) == SSA_NAME); -} - -/* Return true if T is a GIMPLE identifier (something with an address). */ - -bool -is_gimple_id (tree t) -{ - return (is_gimple_variable (t) - || TREE_CODE (t) == FUNCTION_DECL - || TREE_CODE (t) == LABEL_DECL - || TREE_CODE (t) == CONST_DECL - /* Allow string constants, since they are addressable. */ - || TREE_CODE (t) == STRING_CST); -} - -/* Return true if OP, an SSA name or a DECL is a virtual operand. */ - -bool -virtual_operand_p (tree op) -{ - if (TREE_CODE (op) == SSA_NAME) - { - op = SSA_NAME_VAR (op); - if (!op) - return false; - } - - if (TREE_CODE (op) == VAR_DECL) - return VAR_DECL_IS_VIRTUAL_OPERAND (op); - - return false; -} - - -/* Return true if T is a non-aggregate register variable. */ - -bool -is_gimple_reg (tree t) -{ - if (virtual_operand_p (t)) - return false; - - if (TREE_CODE (t) == SSA_NAME) - return true; - - if (!is_gimple_variable (t)) - return false; - - if (!is_gimple_reg_type (TREE_TYPE (t))) - return false; - - /* A volatile decl is not acceptable because we can't reuse it as - needed. We need to copy it into a temp first. */ - if (TREE_THIS_VOLATILE (t)) - return false; - - /* We define "registers" as things that can be renamed as needed, - which with our infrastructure does not apply to memory. */ - if (needs_to_live_in_memory (t)) - return false; - - /* Hard register variables are an interesting case. For those that - are call-clobbered, we don't know where all the calls are, since - we don't (want to) take into account which operations will turn - into libcalls at the rtl level. For those that are call-saved, - we don't currently model the fact that calls may in fact change - global hard registers, nor do we examine ASM_CLOBBERS at the tree - level, and so miss variable changes that might imply. All around, - it seems safest to not do too much optimization with these at the - tree level at all. We'll have to rely on the rtl optimizers to - clean this up, as there we've got all the appropriate bits exposed. */ - if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) - return false; - - /* Complex and vector values must have been put into SSA-like form. - That is, no assignments to the individual components. */ - if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE - || TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE) - return DECL_GIMPLE_REG_P (t); - - return true; -} - - -/* Return true if T is a GIMPLE rvalue, i.e. an identifier or a constant. */ - -bool -is_gimple_val (tree t) -{ - /* Make loads from volatiles and memory vars explicit. */ - if (is_gimple_variable (t) - && is_gimple_reg_type (TREE_TYPE (t)) - && !is_gimple_reg (t)) - return false; - - return (is_gimple_variable (t) || is_gimple_min_invariant (t)); -} - -/* Similarly, but accept hard registers as inputs to asm statements. */ - -bool -is_gimple_asm_val (tree t) -{ - if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) - return true; - - return is_gimple_val (t); -} - -/* Return true if T is a GIMPLE minimal lvalue. */ - -bool -is_gimple_min_lval (tree t) -{ - if (!(t = CONST_CAST_TREE (strip_invariant_refs (t)))) - return false; - return (is_gimple_id (t) || TREE_CODE (t) == MEM_REF); -} - -/* Return true if T is a valid function operand of a CALL_EXPR. */ - -bool -is_gimple_call_addr (tree t) -{ - return (TREE_CODE (t) == OBJ_TYPE_REF || is_gimple_val (t)); -} - -/* Return true if T is a valid address operand of a MEM_REF. */ - -bool -is_gimple_mem_ref_addr (tree t) -{ - return (is_gimple_reg (t) - || TREE_CODE (t) == INTEGER_CST - || (TREE_CODE (t) == ADDR_EXPR - && (CONSTANT_CLASS_P (TREE_OPERAND (t, 0)) - || decl_address_invariant_p (TREE_OPERAND (t, 0))))); -} - - /* Given a memory reference expression T, return its base address. The base address of a memory reference expression is the main object being referenced. For instance, the base address for @@ -3642,37 +3212,6 @@ gimple_ior_addresses_taken (bitmap addresses_taken, gimple stmt) } -/* Return a printable name for symbol DECL. */ - -const char * -gimple_decl_printable_name (tree decl, int verbosity) -{ - if (!DECL_NAME (decl)) - return NULL; - - if (DECL_ASSEMBLER_NAME_SET_P (decl)) - { - const char *str, *mangled_str; - int dmgl_opts = DMGL_NO_OPTS; - - if (verbosity >= 2) - { - dmgl_opts = DMGL_VERBOSE - | DMGL_ANSI - | DMGL_GNU_V3 - | DMGL_RET_POSTFIX; - if (TREE_CODE (decl) == FUNCTION_DECL) - dmgl_opts |= DMGL_PARAMS; - } - - mangled_str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); - str = cplus_demangle_v3 (mangled_str, dmgl_opts); - return (str) ? str : mangled_str; - } - - return IDENTIFIER_POINTER (DECL_NAME (decl)); -} - /* Return TRUE iff stmt is a call to a built-in function. */ bool @@ -3763,261 +3302,6 @@ gimple_asm_clobbers_memory_p (const_gimple stmt) return false; } - -/* Return true if the conversion from INNER_TYPE to OUTER_TYPE is a - useless type conversion, otherwise return false. - - This function implicitly defines the middle-end type system. With - the notion of 'a < b' meaning that useless_type_conversion_p (a, b) - holds and 'a > b' meaning that useless_type_conversion_p (b, a) holds, - the following invariants shall be fulfilled: - - 1) useless_type_conversion_p is transitive. - If a < b and b < c then a < c. - - 2) useless_type_conversion_p is not symmetric. - From a < b does not follow a > b. - - 3) Types define the available set of operations applicable to values. - A type conversion is useless if the operations for the target type - is a subset of the operations for the source type. For example - casts to void* are useless, casts from void* are not (void* can't - be dereferenced or offsetted, but copied, hence its set of operations - is a strict subset of that of all other data pointer types). Casts - to const T* are useless (can't be written to), casts from const T* - to T* are not. */ - -bool -useless_type_conversion_p (tree outer_type, tree inner_type) -{ - /* Do the following before stripping toplevel qualifiers. */ - if (POINTER_TYPE_P (inner_type) - && POINTER_TYPE_P (outer_type)) - { - /* Do not lose casts between pointers to different address spaces. */ - if (TYPE_ADDR_SPACE (TREE_TYPE (outer_type)) - != TYPE_ADDR_SPACE (TREE_TYPE (inner_type))) - return false; - } - - /* From now on qualifiers on value types do not matter. */ - inner_type = TYPE_MAIN_VARIANT (inner_type); - outer_type = TYPE_MAIN_VARIANT (outer_type); - - if (inner_type == outer_type) - return true; - - /* If we know the canonical types, compare them. */ - if (TYPE_CANONICAL (inner_type) - && TYPE_CANONICAL (inner_type) == TYPE_CANONICAL (outer_type)) - return true; - - /* Changes in machine mode are never useless conversions unless we - deal with aggregate types in which case we defer to later checks. */ - if (TYPE_MODE (inner_type) != TYPE_MODE (outer_type) - && !AGGREGATE_TYPE_P (inner_type)) - return false; - - /* If both the inner and outer types are integral types, then the - conversion is not necessary if they have the same mode and - signedness and precision, and both or neither are boolean. */ - if (INTEGRAL_TYPE_P (inner_type) - && INTEGRAL_TYPE_P (outer_type)) - { - /* Preserve changes in signedness or precision. */ - if (TYPE_UNSIGNED (inner_type) != TYPE_UNSIGNED (outer_type) - || TYPE_PRECISION (inner_type) != TYPE_PRECISION (outer_type)) - return false; - - /* Preserve conversions to/from BOOLEAN_TYPE if types are not - of precision one. */ - if (((TREE_CODE (inner_type) == BOOLEAN_TYPE) - != (TREE_CODE (outer_type) == BOOLEAN_TYPE)) - && TYPE_PRECISION (outer_type) != 1) - return false; - - /* We don't need to preserve changes in the types minimum or - maximum value in general as these do not generate code - unless the types precisions are different. */ - return true; - } - - /* Scalar floating point types with the same mode are compatible. */ - else if (SCALAR_FLOAT_TYPE_P (inner_type) - && SCALAR_FLOAT_TYPE_P (outer_type)) - return true; - - /* Fixed point types with the same mode are compatible. */ - else if (FIXED_POINT_TYPE_P (inner_type) - && FIXED_POINT_TYPE_P (outer_type)) - return true; - - /* We need to take special care recursing to pointed-to types. */ - else if (POINTER_TYPE_P (inner_type) - && POINTER_TYPE_P (outer_type)) - { - /* Do not lose casts to function pointer types. */ - if ((TREE_CODE (TREE_TYPE (outer_type)) == FUNCTION_TYPE - || TREE_CODE (TREE_TYPE (outer_type)) == METHOD_TYPE) - && !(TREE_CODE (TREE_TYPE (inner_type)) == FUNCTION_TYPE - || TREE_CODE (TREE_TYPE (inner_type)) == METHOD_TYPE)) - return false; - - /* We do not care for const qualification of the pointed-to types - as const qualification has no semantic value to the middle-end. */ - - /* Otherwise pointers/references are equivalent. */ - return true; - } - - /* Recurse for complex types. */ - else if (TREE_CODE (inner_type) == COMPLEX_TYPE - && TREE_CODE (outer_type) == COMPLEX_TYPE) - return useless_type_conversion_p (TREE_TYPE (outer_type), - TREE_TYPE (inner_type)); - - /* Recurse for vector types with the same number of subparts. */ - else if (TREE_CODE (inner_type) == VECTOR_TYPE - && TREE_CODE (outer_type) == VECTOR_TYPE - && TYPE_PRECISION (inner_type) == TYPE_PRECISION (outer_type)) - return useless_type_conversion_p (TREE_TYPE (outer_type), - TREE_TYPE (inner_type)); - - else if (TREE_CODE (inner_type) == ARRAY_TYPE - && TREE_CODE (outer_type) == ARRAY_TYPE) - { - /* Preserve string attributes. */ - if (TYPE_STRING_FLAG (inner_type) != TYPE_STRING_FLAG (outer_type)) - return false; - - /* Conversions from array types with unknown extent to - array types with known extent are not useless. */ - if (!TYPE_DOMAIN (inner_type) - && TYPE_DOMAIN (outer_type)) - return false; - - /* Nor are conversions from array types with non-constant size to - array types with constant size or to different size. */ - if (TYPE_SIZE (outer_type) - && TREE_CODE (TYPE_SIZE (outer_type)) == INTEGER_CST - && (!TYPE_SIZE (inner_type) - || TREE_CODE (TYPE_SIZE (inner_type)) != INTEGER_CST - || !tree_int_cst_equal (TYPE_SIZE (outer_type), - TYPE_SIZE (inner_type)))) - return false; - - /* Check conversions between arrays with partially known extents. - If the array min/max values are constant they have to match. - Otherwise allow conversions to unknown and variable extents. - In particular this declares conversions that may change the - mode to BLKmode as useless. */ - if (TYPE_DOMAIN (inner_type) - && TYPE_DOMAIN (outer_type) - && TYPE_DOMAIN (inner_type) != TYPE_DOMAIN (outer_type)) - { - tree inner_min = TYPE_MIN_VALUE (TYPE_DOMAIN (inner_type)); - tree outer_min = TYPE_MIN_VALUE (TYPE_DOMAIN (outer_type)); - tree inner_max = TYPE_MAX_VALUE (TYPE_DOMAIN (inner_type)); - tree outer_max = TYPE_MAX_VALUE (TYPE_DOMAIN (outer_type)); - - /* After gimplification a variable min/max value carries no - additional information compared to a NULL value. All that - matters has been lowered to be part of the IL. */ - if (inner_min && TREE_CODE (inner_min) != INTEGER_CST) - inner_min = NULL_TREE; - if (outer_min && TREE_CODE (outer_min) != INTEGER_CST) - outer_min = NULL_TREE; - if (inner_max && TREE_CODE (inner_max) != INTEGER_CST) - inner_max = NULL_TREE; - if (outer_max && TREE_CODE (outer_max) != INTEGER_CST) - outer_max = NULL_TREE; - - /* Conversions NULL / variable <- cst are useless, but not - the other way around. */ - if (outer_min - && (!inner_min - || !tree_int_cst_equal (inner_min, outer_min))) - return false; - if (outer_max - && (!inner_max - || !tree_int_cst_equal (inner_max, outer_max))) - return false; - } - - /* Recurse on the element check. */ - return useless_type_conversion_p (TREE_TYPE (outer_type), - TREE_TYPE (inner_type)); - } - - else if ((TREE_CODE (inner_type) == FUNCTION_TYPE - || TREE_CODE (inner_type) == METHOD_TYPE) - && TREE_CODE (inner_type) == TREE_CODE (outer_type)) - { - tree outer_parm, inner_parm; - - /* If the return types are not compatible bail out. */ - if (!useless_type_conversion_p (TREE_TYPE (outer_type), - TREE_TYPE (inner_type))) - return false; - - /* Method types should belong to a compatible base class. */ - if (TREE_CODE (inner_type) == METHOD_TYPE - && !useless_type_conversion_p (TYPE_METHOD_BASETYPE (outer_type), - TYPE_METHOD_BASETYPE (inner_type))) - return false; - - /* A conversion to an unprototyped argument list is ok. */ - if (!prototype_p (outer_type)) - return true; - - /* If the unqualified argument types are compatible the conversion - is useless. */ - if (TYPE_ARG_TYPES (outer_type) == TYPE_ARG_TYPES (inner_type)) - return true; - - for (outer_parm = TYPE_ARG_TYPES (outer_type), - inner_parm = TYPE_ARG_TYPES (inner_type); - outer_parm && inner_parm; - outer_parm = TREE_CHAIN (outer_parm), - inner_parm = TREE_CHAIN (inner_parm)) - if (!useless_type_conversion_p - (TYPE_MAIN_VARIANT (TREE_VALUE (outer_parm)), - TYPE_MAIN_VARIANT (TREE_VALUE (inner_parm)))) - return false; - - /* If there is a mismatch in the number of arguments the functions - are not compatible. */ - if (outer_parm || inner_parm) - return false; - - /* Defer to the target if necessary. */ - if (TYPE_ATTRIBUTES (inner_type) || TYPE_ATTRIBUTES (outer_type)) - return comp_type_attributes (outer_type, inner_type) != 0; - - return true; - } - - /* For aggregates we rely on TYPE_CANONICAL exclusively and require - explicit conversions for types involving to be structurally - compared types. */ - else if (AGGREGATE_TYPE_P (inner_type) - && TREE_CODE (inner_type) == TREE_CODE (outer_type)) - return false; - - return false; -} - -/* Return true if a conversion from either type of TYPE1 and TYPE2 - to the other is not required. Otherwise return false. */ - -bool -types_compatible_p (tree type1, tree type2) -{ - return (type1 == type2 - || (useless_type_conversion_p (type1, type2) - && useless_type_conversion_p (type2, type1))); -} - /* Dump bitmap SET (assumed to contain VAR_DECLs) to FILE. */ void @@ -4042,45 +3326,6 @@ dump_decl_set (FILE *file, bitmap set) fprintf (file, "NIL"); } -/* Given SSA_NAMEs NAME1 and NAME2, return true if they are candidates for - coalescing together, false otherwise. - - This must stay consistent with var_map_base_init in tree-ssa-live.c. */ - -bool -gimple_can_coalesce_p (tree name1, tree name2) -{ - /* First check the SSA_NAME's associated DECL. We only want to - coalesce if they have the same DECL or both have no associated DECL. */ - tree var1 = SSA_NAME_VAR (name1); - tree var2 = SSA_NAME_VAR (name2); - var1 = (var1 && (!VAR_P (var1) || !DECL_IGNORED_P (var1))) ? var1 : NULL_TREE; - var2 = (var2 && (!VAR_P (var2) || !DECL_IGNORED_P (var2))) ? var2 : NULL_TREE; - if (var1 != var2) - return false; - - /* Now check the types. If the types are the same, then we should - try to coalesce V1 and V2. */ - tree t1 = TREE_TYPE (name1); - tree t2 = TREE_TYPE (name2); - if (t1 == t2) - return true; - - /* If the types are not the same, check for a canonical type match. This - (for example) allows coalescing when the types are fundamentally the - same, but just have different names. - - Note pointer types with different address spaces may have the same - canonical type. Those are rejected for coalescing by the - types_compatible_p check. */ - if (TYPE_CANONICAL (t1) - && TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2) - && types_compatible_p (t1, t2)) - return true; - - return false; -} - /* Return true when CALL is a call stmt that definitely doesn't free any memory or makes it unavailable otherwise. */ bool @@ -4103,23 +3348,83 @@ nonfreeing_call_p (gimple call) return false; } -/* Create a new VAR_DECL and copy information from VAR to it. */ +/* Callback for walk_stmt_load_store_ops. + + Return TRUE if OP will dereference the tree stored in DATA, FALSE + otherwise. -tree -copy_var_decl (tree var, tree name, tree type) + This routine only makes a superficial check for a dereference. Thus + it must only be used if it is safe to return a false negative. */ +static bool +check_loadstore (gimple stmt ATTRIBUTE_UNUSED, tree op, void *data) { - tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type); - - TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var); - TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var); - DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var); - DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var); - DECL_IGNORED_P (copy) = DECL_IGNORED_P (var); - DECL_CONTEXT (copy) = DECL_CONTEXT (var); - TREE_NO_WARNING (copy) = TREE_NO_WARNING (var); - TREE_USED (copy) = 1; - DECL_SEEN_IN_BIND_EXPR_P (copy) = 1; - DECL_ATTRIBUTES (copy) = DECL_ATTRIBUTES (var); + if ((TREE_CODE (op) == MEM_REF || TREE_CODE (op) == TARGET_MEM_REF) + && operand_equal_p (TREE_OPERAND (op, 0), (tree)data, 0)) + return true; + return false; +} - return copy; +/* If OP can be inferred to be non-zero after STMT executes, return true. */ + +bool +infer_nonnull_range (gimple stmt, tree op) +{ + /* We can only assume that a pointer dereference will yield + non-NULL if -fdelete-null-pointer-checks is enabled. */ + if (!flag_delete_null_pointer_checks + || !POINTER_TYPE_P (TREE_TYPE (op)) + || gimple_code (stmt) == GIMPLE_ASM) + return false; + + if (walk_stmt_load_store_ops (stmt, (void *)op, + check_loadstore, check_loadstore)) + return true; + + if (is_gimple_call (stmt) && !gimple_call_internal_p (stmt)) + { + tree fntype = gimple_call_fntype (stmt); + tree attrs = TYPE_ATTRIBUTES (fntype); + for (; attrs; attrs = TREE_CHAIN (attrs)) + { + attrs = lookup_attribute ("nonnull", attrs); + + /* If "nonnull" wasn't specified, we know nothing about + the argument. */ + if (attrs == NULL_TREE) + return false; + + /* If "nonnull" applies to all the arguments, then ARG + is non-null if it's in the argument list. */ + if (TREE_VALUE (attrs) == NULL_TREE) + { + for (unsigned int i = 0; i < gimple_call_num_args (stmt); i++) + { + if (operand_equal_p (op, gimple_call_arg (stmt, i), 0) + && POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (stmt, i)))) + return true; + } + return false; + } + + /* Now see if op appears in the nonnull list. */ + for (tree t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) + { + int idx = TREE_INT_CST_LOW (TREE_VALUE (t)) - 1; + tree arg = gimple_call_arg (stmt, idx); + if (operand_equal_p (op, arg, 0)) + return true; + } + } + } + + /* If this function is marked as returning non-null, then we can + infer OP is non-null if it is used in the return statement. */ + if (gimple_code (stmt) == GIMPLE_RETURN + && gimple_return_retval (stmt) + && operand_equal_p (gimple_return_retval (stmt), op, 0) + && lookup_attribute ("returns_nonnull", + TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl)))) + return true; + + return false; } diff --git a/gcc/gimple.h b/gcc/gimple.h index b34424c18d3..a548a5b45e8 100644 --- a/gcc/gimple.h +++ b/gcc/gimple.h @@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see #include "internal-fn.h" #include "gimple-fold.h" #include "tree-eh.h" +#include "gimple-expr.h" typedef gimple gimple_seq_node; @@ -745,8 +746,6 @@ gimple gimple_build_return (tree); gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL); #define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO) -void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *); - gimple gimple_build_assign_with_ops (enum tree_code, tree, tree, tree CXX_MEM_STAT_INFO); @@ -809,9 +808,6 @@ gimple gimple_build_predict (enum br_predictor, enum prediction); enum gimple_statement_structure_enum gss_for_assign (enum tree_code); void sort_case_labels (vec<tree> ); void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *); -void gimple_set_body (tree, gimple_seq); -gimple_seq gimple_body (tree); -bool gimple_has_body_p (tree); gimple_seq gimple_seq_alloc (void); void gimple_seq_free (gimple_seq); void gimple_seq_add_seq (gimple_seq *, gimple_seq); @@ -832,7 +828,6 @@ tree gimple_get_lhs (const_gimple); void gimple_set_lhs (gimple, tree); void gimple_replace_lhs (gimple, tree); gimple gimple_copy (gimple); -void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *); gimple gimple_build_cond_from_tree (tree, tree, tree); void gimple_cond_set_condition_from_tree (gimple, tree); bool gimple_has_side_effects (const_gimple); @@ -844,48 +839,6 @@ bool empty_body_p (gimple_seq); unsigned get_gimple_rhs_num_ops (enum tree_code); #define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO) gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL); -const char *gimple_decl_printable_name (tree, int); - -/* Returns true iff T is a virtual ssa name decl. */ -extern bool virtual_operand_p (tree); -/* Returns true iff T is a scalar register variable. */ -extern bool is_gimple_reg (tree); -/* Returns true iff T is any sort of variable. */ -extern bool is_gimple_variable (tree); -/* Returns true iff T is any sort of symbol. */ -extern bool is_gimple_id (tree); -/* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */ -extern bool is_gimple_min_lval (tree); -/* Returns true iff T is something whose address can be taken. */ -extern bool is_gimple_addressable (tree); -/* Returns true iff T is any valid GIMPLE lvalue. */ -extern bool is_gimple_lvalue (tree); - -/* Returns true iff T is a GIMPLE address. */ -bool is_gimple_address (const_tree); -/* Returns true iff T is a GIMPLE invariant address. */ -bool is_gimple_invariant_address (const_tree); -/* Returns true iff T is a GIMPLE invariant address at interprocedural - level. */ -bool is_gimple_ip_invariant_address (const_tree); -/* Returns true iff T is a valid GIMPLE constant. */ -bool is_gimple_constant (const_tree); -/* Returns true iff T is a GIMPLE restricted function invariant. */ -extern bool is_gimple_min_invariant (const_tree); -/* Returns true iff T is a GIMPLE restricted interprecodural invariant. */ -extern bool is_gimple_ip_invariant (const_tree); -/* Returns true iff T is a GIMPLE rvalue. */ -extern bool is_gimple_val (tree); -/* Returns true iff T is a GIMPLE asm statement input. */ -extern bool is_gimple_asm_val (tree); -/* Returns true iff T is a valid address operand of a MEM_REF. */ -bool is_gimple_mem_ref_addr (tree); - -/* Returns true iff T is a valid if-statement condition. */ -extern bool is_gimple_condexpr (tree); - -/* Returns true iff T is a valid call address expression. */ -extern bool is_gimple_call_addr (tree); /* Return TRUE iff stmt is a call to a built-in function. */ extern bool is_gimple_builtin_call (gimple stmt); @@ -906,8 +859,6 @@ extern bool gimple_ior_addresses_taken (bitmap, gimple); extern bool gimple_call_builtin_p (gimple, enum built_in_class); extern bool gimple_call_builtin_p (gimple, enum built_in_function); extern bool gimple_asm_clobbers_memory_p (const_gimple); -extern bool useless_type_conversion_p (tree, tree); -extern bool types_compatible_p (tree, tree); /* In gimplify.c */ extern tree create_tmp_var_raw (tree, const char *); @@ -1086,9 +1037,8 @@ extern tree gimple_boolify (tree); extern gimple_predicate rhs_predicate_for (tree); extern tree canonicalize_cond_expr_cond (tree); extern void dump_decl_set (FILE *, bitmap); -extern bool gimple_can_coalesce_p (tree, tree); extern bool nonfreeing_call_p (gimple); -extern tree copy_var_decl (tree, tree, tree); +extern bool infer_nonnull_range (gimple, tree); /* In trans-mem.c. */ extern void diagnose_tm_safe_errors (tree); @@ -2042,18 +1992,6 @@ gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL); } -/* A wrapper around extract_ops_from_tree_1, for callers which expect - to see only a maximum of two operands. */ - -static inline void -extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, - tree *op1) -{ - tree op2; - extract_ops_from_tree_1 (expr, code, op0, op1, &op2); - gcc_assert (op2 == NULL_TREE); -} - /* Returns true if GS is a nontemporal move. */ static inline bool @@ -2316,25 +2254,6 @@ gimple_call_set_internal_fn (gimple gs, enum internal_fn fn) } -/* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL - associated with the callee if known. Otherwise return NULL_TREE. */ - -static inline tree -gimple_call_addr_fndecl (const_tree fn) -{ - if (fn && TREE_CODE (fn) == ADDR_EXPR) - { - tree fndecl = TREE_OPERAND (fn, 0); - if (TREE_CODE (fndecl) == MEM_REF - && TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR - && integer_zerop (TREE_OPERAND (fndecl, 1))) - fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); - if (TREE_CODE (fndecl) == FUNCTION_DECL) - return fndecl; - } - return NULL_TREE; -} - /* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it. Otherwise return NULL. This function is analogous to get_callee_fndecl in tree land. */ @@ -3633,19 +3552,6 @@ gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg) gs->gimple_phi.args[index] = *phiarg; } -/* PHI nodes should contain only ssa_names and invariants. A test - for ssa_name is definitely simpler; don't let invalid contents - slip in in the meantime. */ - -static inline bool -phi_ssa_name_p (const_tree t) -{ - if (TREE_CODE (t) == SSA_NAME) - return true; - gcc_checking_assert (is_gimple_min_invariant (t)); - return false; -} - /* Return the PHI nodes for basic block BB, or NULL if there are no PHI nodes. */ @@ -5398,14 +5304,6 @@ gimple_expr_type (const_gimple stmt) return void_type_node; } -/* Return true if TYPE is a suitable type for a scalar register variable. */ - -static inline bool -is_gimple_reg_type (tree type) -{ - return !AGGREGATE_TYPE_P (type); -} - /* Return a new iterator pointing to GIMPLE_SEQ's first statement. */ static inline gimple_stmt_iterator diff --git a/gcc/gimplify.c b/gcc/gimplify.c index 21dc7cad156..58cb533ab94 100644 --- a/gcc/gimplify.c +++ b/gcc/gimplify.c @@ -8867,7 +8867,7 @@ gimplify_body (tree fndecl, bool do_parms) nonlocal_vlas = NULL; } - if (flag_openmp && gimplify_omp_ctxp) + if ((flag_openmp || flag_openmp_simd) && gimplify_omp_ctxp) { delete_omp_context (gimplify_omp_ctxp); gimplify_omp_ctxp = NULL; diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c index aab5748f67b..ab21d0d481b 100644 --- a/gcc/haifa-sched.c +++ b/gcc/haifa-sched.c @@ -6519,6 +6519,50 @@ setup_sched_dump (void) ? stderr : dump_file); } +/* Try to group comparison and the following conditional jump INSN if + they're already adjacent. This is to prevent scheduler from scheduling + them apart. */ + +static void +try_group_insn (rtx insn) +{ + unsigned int condreg1, condreg2; + rtx cc_reg_1; + rtx prev; + + if (!any_condjump_p (insn)) + return; + + targetm.fixed_condition_code_regs (&condreg1, &condreg2); + cc_reg_1 = gen_rtx_REG (CCmode, condreg1); + prev = prev_nonnote_nondebug_insn (insn); + if (!reg_referenced_p (cc_reg_1, PATTERN (insn)) + || !prev + || !modified_in_p (cc_reg_1, prev)) + return; + + /* Different microarchitectures support macro fusions for different + combinations of insn pairs. */ + if (!targetm.sched.macro_fusion_pair_p + || !targetm.sched.macro_fusion_pair_p (prev, insn)) + return; + + SCHED_GROUP_P (insn) = 1; +} + +/* If the last cond jump and the cond register defining insn are consecutive + before scheduling, we want them to be in a schedule group. This is good + for performance on microarchitectures supporting macro-fusion. */ + +static void +group_insns_for_macro_fusion () +{ + basic_block bb; + + FOR_EACH_BB (bb) + try_group_insn (BB_END (bb)); +} + /* Initialize some global state for the scheduler. This function works with the common data shared between all the schedulers. It is called from the scheduler specific initialization routine. */ @@ -6645,6 +6689,11 @@ sched_init (void) } curr_state = xmalloc (dfa_state_size); + + /* Group compare and branch insns for macro-fusion. */ + if (targetm.sched.macro_fusion_p + && targetm.sched.macro_fusion_p ()) + group_insns_for_macro_fusion (); } static void haifa_init_only_bb (basic_block, basic_block); diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c index 9761d6e0a99..c1531589d0c 100644 --- a/gcc/ipa-prop.c +++ b/gcc/ipa-prop.c @@ -852,7 +852,7 @@ static bool ipa_load_from_parm_agg_1 (vec<ipa_param_descriptor_t> descriptors, struct param_analysis_info *parms_ainfo, gimple stmt, tree op, int *index_p, HOST_WIDE_INT *offset_p, - bool *by_ref_p) + HOST_WIDE_INT *size_p, bool *by_ref_p) { int index; HOST_WIDE_INT size, max_size; @@ -870,6 +870,8 @@ ipa_load_from_parm_agg_1 (vec<ipa_param_descriptor_t> descriptors, { *index_p = index; *by_ref_p = false; + if (size_p) + *size_p = size; return true; } return false; @@ -912,6 +914,8 @@ ipa_load_from_parm_agg_1 (vec<ipa_param_descriptor_t> descriptors, { *index_p = index; *by_ref_p = true; + if (size_p) + *size_p = size; return true; } return false; @@ -926,7 +930,7 @@ ipa_load_from_parm_agg (struct ipa_node_params *info, gimple stmt, bool *by_ref_p) { return ipa_load_from_parm_agg_1 (info->descriptors, NULL, stmt, op, index_p, - offset_p, by_ref_p); + offset_p, NULL, by_ref_p); } /* Given that an actual argument is an SSA_NAME (given in NAME) and is a result @@ -1826,7 +1830,7 @@ ipa_analyze_indirect_call_uses (struct cgraph_node *node, if (gimple_assign_single_p (def) && ipa_load_from_parm_agg_1 (info->descriptors, parms_ainfo, def, gimple_assign_rhs1 (def), &index, &offset, - &by_ref)) + NULL, &by_ref)) { struct cgraph_edge *cs = ipa_note_param_call (node, index, call); cs->indirect_info->offset = offset; @@ -4566,7 +4570,7 @@ ipcp_transform_function (struct cgraph_node *node) struct ipa_agg_replacement_value *v; gimple stmt = gsi_stmt (gsi); tree rhs, val, t; - HOST_WIDE_INT offset; + HOST_WIDE_INT offset, size; int index; bool by_ref, vce; @@ -4593,13 +4597,15 @@ ipcp_transform_function (struct cgraph_node *node) continue; if (!ipa_load_from_parm_agg_1 (descriptors, parms_ainfo, stmt, - rhs, &index, &offset, &by_ref)) + rhs, &index, &offset, &size, &by_ref)) continue; for (v = aggval; v; v = v->next) if (v->index == index && v->offset == offset) break; - if (!v || v->by_ref != by_ref) + if (!v + || v->by_ref != by_ref + || tree_low_cst (TYPE_SIZE (TREE_TYPE (v->value)), 0) != size) continue; gcc_checking_assert (is_gimple_ip_invariant (v->value)); diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c index 60d5043d67a..6b5f82b16b1 100644 --- a/gcc/loop-iv.c +++ b/gcc/loop-iv.c @@ -436,7 +436,9 @@ iv_subreg (struct rtx_iv *iv, enum machine_mode mode) && !iv->first_special) { rtx val = get_iv_value (iv, const0_rtx); - val = lowpart_subreg (mode, val, iv->extend_mode); + val = lowpart_subreg (mode, val, + iv->extend == IV_UNKNOWN_EXTEND + ? iv->mode : iv->extend_mode); iv->base = val; iv->extend = IV_UNKNOWN_EXTEND; @@ -476,8 +478,14 @@ iv_extend (struct rtx_iv *iv, enum iv_extend_code extend, enum machine_mode mode && !iv->first_special) { rtx val = get_iv_value (iv, const0_rtx); + if (iv->extend_mode != iv->mode + && iv->extend != IV_UNKNOWN_EXTEND + && iv->extend != extend) + val = lowpart_subreg (iv->mode, val, iv->extend_mode); val = simplify_gen_unary (iv_extend_to_rtx_code (extend), mode, - val, iv->extend_mode); + val, + iv->extend == extend + ? iv->extend_mode : iv->mode); iv->base = val; iv->extend = IV_UNKNOWN_EXTEND; iv->mode = iv->extend_mode = mode; diff --git a/gcc/loop-unswitch.c b/gcc/loop-unswitch.c index 3bdb10a4373..219c943545b 100644 --- a/gcc/loop-unswitch.c +++ b/gcc/loop-unswitch.c @@ -191,6 +191,7 @@ may_unswitch_on (basic_block bb, struct loop *loop, rtx *cinsn) if (!test) return NULL_RTX; + mode = VOIDmode; for (i = 0; i < 2; i++) { op[i] = XEXP (test, i); @@ -205,11 +206,15 @@ may_unswitch_on (basic_block bb, struct loop *loop, rtx *cinsn) return NULL_RTX; op[i] = get_iv_value (&iv, const0_rtx); + if (iv.extend != IV_UNKNOWN_EXTEND + && iv.mode != iv.extend_mode) + op[i] = lowpart_subreg (iv.mode, op[i], iv.extend_mode); + if (mode == VOIDmode) + mode = iv.mode; + else + gcc_assert (mode == iv.mode); } - mode = GET_MODE (op[0]); - if (mode == VOIDmode) - mode = GET_MODE (op[1]); if (GET_MODE_CLASS (mode) == MODE_CC) { if (at != BB_END (bb)) diff --git a/gcc/lto-opts.c b/gcc/lto-opts.c index 0a63f89c882..c9d4e03d00c 100644 --- a/gcc/lto-opts.c +++ b/gcc/lto-opts.c @@ -77,7 +77,7 @@ lto_write_options (void) obstack_init (&temporary_obstack); - /* Output options that affect GIMPLE IL semantics and are implicitely + /* Output options that affect GIMPLE IL semantics and are implicitly enabled by the frontend. This for now includes an explicit set of options that we also handle explicitly in lto-wrapper.c. In the end the effects on GIMPLE IL @@ -88,8 +88,13 @@ lto_write_options (void) if (global_options.x_flag_exceptions) append_to_collect_gcc_options (&temporary_obstack, &first_p, "-fexceptions"); + /* -fnon-call-exceptions changes the generation of exception + regions. It is enabled implicitly by the Go frontend. */ + if (global_options.x_flag_non_call_exceptions) + append_to_collect_gcc_options (&temporary_obstack, &first_p, + "-fnon-call-exceptions"); - /* Output explicitely passed options. */ + /* Output explicitly passed options. */ for (i = 1; i < save_decoded_options_count; ++i) { struct cl_decoded_option *option = &save_decoded_options[i]; diff --git a/gcc/lto-wrapper.c b/gcc/lto-wrapper.c index 755993ca634..57978c883d4 100644 --- a/gcc/lto-wrapper.c +++ b/gcc/lto-wrapper.c @@ -409,6 +409,7 @@ merge_and_complain (struct cl_decoded_option **decoded_options, case OPT_fpie: case OPT_fcommon: case OPT_fexceptions: + case OPT_fnon_call_exceptions: case OPT_fgnu_tm: /* Do what the old LTO code did - collect exactly one option setting per OPT code, we pick the first we encounter. @@ -573,6 +574,7 @@ run_gcc (unsigned argc, char *argv[]) case OPT_fpie: case OPT_fcommon: case OPT_fexceptions: + case OPT_fnon_call_exceptions: case OPT_fgnu_tm: case OPT_freg_struct_return: case OPT_fpcc_struct_return: diff --git a/gcc/omp-low.c b/gcc/omp-low.c index ad0c609622b..8e4727cac75 100644 --- a/gcc/omp-low.c +++ b/gcc/omp-low.c @@ -8229,7 +8229,7 @@ execute_expand_omp (void) static bool gate_expand_omp (void) { - return (flag_openmp != 0 && !seen_error ()); + return ((flag_openmp != 0 || flag_openmp_simd != 0) && !seen_error ()); } namespace { @@ -10050,7 +10050,7 @@ execute_lower_omp (void) /* This pass always runs, to provide PROP_gimple_lomp. But there is nothing to do unless -fopenmp is given. */ - if (flag_openmp == 0) + if (flag_openmp == 0 && flag_openmp_simd == 0) return 0; all_contexts = splay_tree_new (splay_tree_compare_pointers, 0, diff --git a/gcc/optabs.c b/gcc/optabs.c index d6e49c9272c..a9b0c198557 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -6670,7 +6670,7 @@ expand_vec_perm (enum machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target) } tmp = gen_rtx_CONST_VECTOR (qimode, vec); sel = gen_lowpart (qimode, sel); - sel = expand_vec_perm (qimode, gen_reg_rtx (qimode), sel, tmp, NULL); + sel = expand_vec_perm (qimode, sel, sel, tmp, NULL); gcc_assert (sel != NULL); /* Add the byte offset to each byte element. */ diff --git a/gcc/opts.c b/gcc/opts.c index 4db20f038d9..3a939ac92b9 100644 --- a/gcc/opts.c +++ b/gcc/opts.c @@ -493,6 +493,7 @@ static const struct default_options default_options_table[] = { OPT_LEVELS_2_PLUS, OPT_fvect_cost_model_, NULL, VECT_COST_MODEL_CHEAP }, { OPT_LEVELS_2_PLUS_SPEED_ONLY, OPT_foptimize_strlen, NULL, 1 }, { OPT_LEVELS_2_PLUS, OPT_fhoist_adjacent_loads, NULL, 1 }, + { OPT_LEVELS_2_PLUS, OPT_fisolate_erroneous_paths, NULL, 1 }, /* -O3 optimizations. */ { OPT_LEVELS_3_PLUS, OPT_ftree_loop_distribute_patterns, NULL, 1 }, diff --git a/gcc/passes.def b/gcc/passes.def index 31ce11381d8..1e2c4dc00ca 100644 --- a/gcc/passes.def +++ b/gcc/passes.def @@ -126,6 +126,7 @@ along with GCC; see the file COPYING3. If not see /* These passes are run after IPA passes on every function that is being output to the assembler file. */ INSERT_PASSES_AFTER (all_passes) + NEXT_PASS (pass_fixup_cfg); NEXT_PASS (pass_lower_eh_dispatch); NEXT_PASS (pass_all_optimizations); PUSH_INSERT_PASSES_WITHIN (pass_all_optimizations) @@ -166,9 +167,16 @@ along with GCC; see the file COPYING3. If not see is removed, and this place fits nicely. Remember this when trying to move or duplicate pass_dominator somewhere earlier. */ NEXT_PASS (pass_dominator); + /* At this point the majority of const/copy propagations + are exposed. Go ahead and identify paths that should never + be executed in a conforming program and isolate those paths. + + This will expose more degenerate PHIs in the main path and + expose more PRE/DOM optimization opportunities. */ + NEXT_PASS (pass_isolate_erroneous_paths); /* The only const/copy propagation opportunities left after - DOM should be due to degenerate PHI nodes. So rather than - run the full propagators, run a specialized pass which + DOM and erroneous path isolation should be due to degenerate PHI nodes. + So rather than run the full propagators, run a specialized pass which only examines PHIs to discover const/copy propagation opportunities. */ NEXT_PASS (pass_phi_only_cprop); diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 12a5ce71553..a0a31a6c2f6 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -2742,10 +2742,9 @@ tablejump_p (const_rtx insn, rtx *labelp, rtx *tablep) label = JUMP_LABEL (insn); if (label != NULL_RTX && !ANY_RETURN_P (label) - && (table = next_active_insn (label)) != NULL_RTX + && (table = NEXT_INSN (label)) != NULL_RTX && JUMP_TABLE_DATA_P (table)) { - gcc_assert (table == NEXT_INSN (label)); if (labelp) *labelp = label; if (tablep) diff --git a/gcc/sanitizer.def b/gcc/sanitizer.def index c7c780929b8..0f45e9eead4 100644 --- a/gcc/sanitizer.def +++ b/gcc/sanitizer.def @@ -27,7 +27,7 @@ along with GCC; see the file COPYING3. If not see for other FEs by asan.c. */ /* Address Sanitizer */ -DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v1", +DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v3", BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST) /* Do not reorder the BUILT_IN_ASAN_REPORT* builtins, e.g. cfgcleanup.c relies on this order. */ diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index bf9b10df3a9..73a236b5929 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -2443,6 +2443,8 @@ add_branch_dependences (rtx head, rtx tail) cc0 setters remain at the end because they can't be moved away from their cc0 user. + Predecessors of SCHED_GROUP_P instructions at the end remain at the end. + COND_EXEC insns cannot be moved past a branch (see e.g. PR17808). Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return @@ -2465,7 +2467,8 @@ add_branch_dependences (rtx head, rtx tail) #endif || (!reload_completed && sets_likely_spilled (PATTERN (insn))))) - || NOTE_P (insn)) + || NOTE_P (insn) + || (last != 0 && SCHED_GROUP_P (last))) { if (!NOTE_P (insn)) { diff --git a/gcc/sync-builtins.def b/gcc/sync-builtins.def index 4f7a22f8a65..3176f9b197a 100644 --- a/gcc/sync-builtins.def +++ b/gcc/sync-builtins.def @@ -29,559 +29,565 @@ along with GCC; see the file COPYING3. If not see "_1" through "_16" versions, plus some extra casts. */ DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_N, "__sync_fetch_and_add", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_1, "__sync_fetch_and_add_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_2, "__sync_fetch_and_add_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_4, "__sync_fetch_and_add_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_8, "__sync_fetch_and_add_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_ADD_16, "__sync_fetch_and_add_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_N, "__sync_fetch_and_sub", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_1, "__sync_fetch_and_sub_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_2, "__sync_fetch_and_sub_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_4, "__sync_fetch_and_sub_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_8, "__sync_fetch_and_sub_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_SUB_16, "__sync_fetch_and_sub_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_N, "__sync_fetch_and_or", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_1, "__sync_fetch_and_or_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_2, "__sync_fetch_and_or_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_4, "__sync_fetch_and_or_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_8, "__sync_fetch_and_or_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_OR_16, "__sync_fetch_and_or_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_N, "__sync_fetch_and_and", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_1, "__sync_fetch_and_and_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_2, "__sync_fetch_and_and_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_4, "__sync_fetch_and_and_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_8, "__sync_fetch_and_and_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_AND_16, "__sync_fetch_and_and_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_N, "__sync_fetch_and_xor", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_1, "__sync_fetch_and_xor_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_2, "__sync_fetch_and_xor_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_4, "__sync_fetch_and_xor_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_8, "__sync_fetch_and_xor_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_XOR_16, "__sync_fetch_and_xor_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_N, "__sync_fetch_and_nand", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_1, "__sync_fetch_and_nand_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_2, "__sync_fetch_and_nand_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_4, "__sync_fetch_and_nand_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_8, "__sync_fetch_and_nand_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_FETCH_AND_NAND_16, "__sync_fetch_and_nand_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_N, "__sync_add_and_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_1, "__sync_add_and_fetch_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_2, "__sync_add_and_fetch_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_4, "__sync_add_and_fetch_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_8, "__sync_add_and_fetch_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_ADD_AND_FETCH_16, "__sync_add_and_fetch_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_N, "__sync_sub_and_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_1, "__sync_sub_and_fetch_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_2, "__sync_sub_and_fetch_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_4, "__sync_sub_and_fetch_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_8, "__sync_sub_and_fetch_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SUB_AND_FETCH_16, "__sync_sub_and_fetch_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_N, "__sync_or_and_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_1, "__sync_or_and_fetch_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_2, "__sync_or_and_fetch_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_4, "__sync_or_and_fetch_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_8, "__sync_or_and_fetch_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_OR_AND_FETCH_16, "__sync_or_and_fetch_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_N, "__sync_and_and_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_1, "__sync_and_and_fetch_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_2, "__sync_and_and_fetch_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_4, "__sync_and_and_fetch_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_8, "__sync_and_and_fetch_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_AND_AND_FETCH_16, "__sync_and_and_fetch_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_N, "__sync_xor_and_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_1, "__sync_xor_and_fetch_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_2, "__sync_xor_and_fetch_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_4, "__sync_xor_and_fetch_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_8, "__sync_xor_and_fetch_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_XOR_AND_FETCH_16, "__sync_xor_and_fetch_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_N, "__sync_nand_and_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_1, "__sync_nand_and_fetch_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_2, "__sync_nand_and_fetch_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_4, "__sync_nand_and_fetch_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_8, "__sync_nand_and_fetch_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_NAND_AND_FETCH_16, "__sync_nand_and_fetch_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N, "__sync_bool_compare_and_swap", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1, "__sync_bool_compare_and_swap_1", - BT_FN_BOOL_VPTR_I1_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_I1_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2, "__sync_bool_compare_and_swap_2", - BT_FN_BOOL_VPTR_I2_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_I2_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4, "__sync_bool_compare_and_swap_4", - BT_FN_BOOL_VPTR_I4_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_I4_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8, "__sync_bool_compare_and_swap_8", - BT_FN_BOOL_VPTR_I8_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_I8_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16, "__sync_bool_compare_and_swap_16", - BT_FN_BOOL_VPTR_I16_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_I16_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N, "__sync_val_compare_and_swap", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1, "__sync_val_compare_and_swap_1", - BT_FN_I1_VPTR_I1_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2, "__sync_val_compare_and_swap_2", - BT_FN_I2_VPTR_I2_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4, "__sync_val_compare_and_swap_4", - BT_FN_I4_VPTR_I4_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_8, "__sync_val_compare_and_swap_8", - BT_FN_I8_VPTR_I8_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_16, "__sync_val_compare_and_swap_16", - BT_FN_I16_VPTR_I16_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_N, "__sync_lock_test_and_set", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_1, "__sync_lock_test_and_set_1", - BT_FN_I1_VPTR_I1, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_2, "__sync_lock_test_and_set_2", - BT_FN_I2_VPTR_I2, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_4, "__sync_lock_test_and_set_4", - BT_FN_I4_VPTR_I4, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_8, "__sync_lock_test_and_set_8", - BT_FN_I8_VPTR_I8, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_TEST_AND_SET_16, "__sync_lock_test_and_set_16", - BT_FN_I16_VPTR_I16, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_N, "__sync_lock_release", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_1, "__sync_lock_release_1", - BT_FN_VOID_VPTR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_2, "__sync_lock_release_2", - BT_FN_VOID_VPTR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_4, "__sync_lock_release_4", - BT_FN_VOID_VPTR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_8, "__sync_lock_release_8", - BT_FN_VOID_VPTR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_LOCK_RELEASE_16, "__sync_lock_release_16", - BT_FN_VOID_VPTR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SYNCHRONIZE, "__sync_synchronize", - BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID, ATTR_NOTHROWCALL_LEAF_LIST) /* __sync* builtins for the C++ memory model. */ DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_TEST_AND_SET, "__atomic_test_and_set", - BT_FN_BOOL_VPTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_CLEAR, "__atomic_clear", BT_FN_VOID_VPTR_INT, - ATTR_NOTHROW_LEAF_LIST) + ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE, "__atomic_exchange", - BT_FN_VOID_SIZE_VPTR_PTR_PTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_SIZE_VPTR_PTR_PTR_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_N, "__atomic_exchange_n", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_1, "__atomic_exchange_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_2, "__atomic_exchange_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_4, "__atomic_exchange_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_8, "__atomic_exchange_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE_16, "__atomic_exchange_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD, "__atomic_load", - BT_FN_VOID_SIZE_CONST_VPTR_PTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_SIZE_CONST_VPTR_PTR_INT, + ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_N, "__atomic_load_n", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_1, "__atomic_load_1", - BT_FN_I1_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_2, "__atomic_load_2", - BT_FN_I2_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_4, "__atomic_load_4", - BT_FN_I4_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_8, "__atomic_load_8", - BT_FN_I8_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_LOAD_16, "__atomic_load_16", - BT_FN_I16_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_CONST_VPTR_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE, "__atomic_compare_exchange", BT_FN_BOOL_SIZE_VPTR_PTR_PTR_INT_INT, - ATTR_NOTHROW_LEAF_LIST) + ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N, "__atomic_compare_exchange_n", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1, "__atomic_compare_exchange_1", - BT_FN_BOOL_VPTR_PTR_I1_BOOL_INT_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_PTR_I1_BOOL_INT_INT, + ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2, "__atomic_compare_exchange_2", - BT_FN_BOOL_VPTR_PTR_I2_BOOL_INT_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_PTR_I2_BOOL_INT_INT, + ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4, "__atomic_compare_exchange_4", - BT_FN_BOOL_VPTR_PTR_I4_BOOL_INT_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_PTR_I4_BOOL_INT_INT, + ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8, "__atomic_compare_exchange_8", - BT_FN_BOOL_VPTR_PTR_I8_BOOL_INT_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_PTR_I8_BOOL_INT_INT, + ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16, "__atomic_compare_exchange_16", - BT_FN_BOOL_VPTR_PTR_I16_BOOL_INT_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_BOOL_VPTR_PTR_I16_BOOL_INT_INT, + ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE, "__atomic_store", - BT_FN_VOID_SIZE_VPTR_PTR_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_SIZE_VPTR_PTR_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_N, "__atomic_store_n", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_1, "__atomic_store_1", - BT_FN_VOID_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_2, "__atomic_store_2", - BT_FN_VOID_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_4, "__atomic_store_4", - BT_FN_VOID_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_8, "__atomic_store_8", - BT_FN_VOID_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_STORE_16, "__atomic_store_16", - BT_FN_VOID_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_N, "__atomic_add_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_1, "__atomic_add_fetch_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_2, "__atomic_add_fetch_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_4, "__atomic_add_fetch_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_8, "__atomic_add_fetch_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ADD_FETCH_16, "__atomic_add_fetch_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_N, "__atomic_sub_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_1, "__atomic_sub_fetch_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_2, "__atomic_sub_fetch_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_4, "__atomic_sub_fetch_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_8, "__atomic_sub_fetch_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_SUB_FETCH_16, "__atomic_sub_fetch_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_N, "__atomic_and_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_1, "__atomic_and_fetch_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_2, "__atomic_and_fetch_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_4, "__atomic_and_fetch_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_8, "__atomic_and_fetch_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_AND_FETCH_16, "__atomic_and_fetch_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_N, "__atomic_nand_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_1, "__atomic_nand_fetch_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_2, "__atomic_nand_fetch_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_4, "__atomic_nand_fetch_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_8, "__atomic_nand_fetch_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_NAND_FETCH_16, "__atomic_nand_fetch_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_N, "__atomic_xor_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_1, "__atomic_xor_fetch_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_2, "__atomic_xor_fetch_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_4, "__atomic_xor_fetch_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_8, "__atomic_xor_fetch_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_XOR_FETCH_16, "__atomic_xor_fetch_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_N, "__atomic_or_fetch", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_1, "__atomic_or_fetch_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_2, "__atomic_or_fetch_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_4, "__atomic_or_fetch_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_8, "__atomic_or_fetch_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_OR_FETCH_16, "__atomic_or_fetch_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_N, "__atomic_fetch_add", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_1, "__atomic_fetch_add_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_2, "__atomic_fetch_add_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_4, "__atomic_fetch_add_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_8, "__atomic_fetch_add_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_ADD_16, "__atomic_fetch_add_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_N, "__atomic_fetch_sub", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_1, "__atomic_fetch_sub_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_2, "__atomic_fetch_sub_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_4, "__atomic_fetch_sub_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_8, "__atomic_fetch_sub_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_SUB_16, "__atomic_fetch_sub_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_N, "__atomic_fetch_and", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_1, "__atomic_fetch_and_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_2, "__atomic_fetch_and_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_4, "__atomic_fetch_and_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_8, "__atomic_fetch_and_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_AND_16, "__atomic_fetch_and_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_N, "__atomic_fetch_nand", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_1, "__atomic_fetch_nand_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_2, "__atomic_fetch_nand_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_4, "__atomic_fetch_nand_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_8, "__atomic_fetch_nand_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_NAND_16, "__atomic_fetch_nand_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_N, "__atomic_fetch_xor", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_1, "__atomic_fetch_xor_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_2, "__atomic_fetch_xor_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_4, "__atomic_fetch_xor_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_8, "__atomic_fetch_xor_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_XOR_16, "__atomic_fetch_xor_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_N, "__atomic_fetch_or", - BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST) + BT_FN_VOID_VAR, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_1, "__atomic_fetch_or_1", - BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I1_VPTR_I1_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_2, "__atomic_fetch_or_2", - BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I2_VPTR_I2_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_4, "__atomic_fetch_or_4", - BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I4_VPTR_I4_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_8, "__atomic_fetch_or_8", - BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I8_VPTR_I8_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_FETCH_OR_16, "__atomic_fetch_or_16", - BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROW_LEAF_LIST) + BT_FN_I16_VPTR_I16_INT, ATTR_NOTHROWCALL_LEAF_LIST) DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE, "__atomic_always_lock_free", diff --git a/gcc/target.def b/gcc/target.def index dda75374ec7..23aec383953 100644 --- a/gcc/target.def +++ b/gcc/target.def @@ -1041,6 +1041,19 @@ scheduling one insn causes other insns to become ready in the same\n\ cycle. These other insns can then be taken into account properly.", int, (FILE *file, int verbose, rtx *ready, int *n_readyp, int clock), NULL) +DEFHOOK +(macro_fusion_p, + "This hook is used to check whether target platform supports macro fusion.", + bool, (void), NULL) + +DEFHOOK +(macro_fusion_pair_p, + "This hook is used to check whether two insns could be macro fused for\n\ +target microarchitecture. If this hook returns true for the given insn pair\n\ +(@var{condgen} and @var{condjmp}), scheduler will put them into a sched\n\ +group, and they will not be scheduled apart.", + bool, (rtx condgen, rtx condjmp), NULL) + /* The following member value is a pointer to a function called after evaluation forward dependencies of insns in chain given by two parameter values (head and tail correspondingly). */ diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 1f487d7f822..9b5a538e6f7 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,107 @@ +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * g++.dg/warn/wdate-time.C: New. + * gcc.dg/wdate-time.c: New. + * gfortran.dg/wdate-time.F90: New. + +2013-11-05 Steven G. Kargl <kargl@gcc.gnu.org> + + PR fortran/58989 + * gfortran.dg/reshape_6.f90: New test. + +2013-10-05 Jeff Law <law@redhat.com> + + * gcc.dg/pr38984.c: Add -fno-isolate-erroneous-paths. + * gcc.dg/tree-ssa/isolate-1.c: New test. + * gcc.dg/tree-ssa/isolate-2.c: New test. + * gcc.dg/tree-ssa/isolate-3.c: New test. + * gcc.dg/tree-ssa/isolate-4.c: New test. + +2013-11-05 Jakub Jelinek <jakub@redhat.com> + + PR rtl-optimization/58997 + * gcc.c-torture/compile/pr58997.c: New test. + +2013-11-05 Paolo Carlini <paolo.carlini@oracle.com> + + PR c++/58724 + * g++.dg/cpp0x/gen-attrs-56.C: New. + +2013-11-05 Richard Biener <rguenther@suse.de> + + PR ipa/58492 + * gcc.dg/ipa/pr58492.c: New testcase. + +2013-11-05 Richard Biener <rguenther@suse.de> + + PR tree-optimization/58955 + * gcc.dg/torture/pr58955-1.c: New testcase. + * gcc.dg/torture/pr58955-2.c: Likewise. + +2013-11-05 H.J. Lu <hongjiu.lu@intel.com> + + PR middle-end/58981 + * gcc.dg/pr58981.c: New test. + +2013-11-05 Richard Biener <rguenther@suse.de> + + PR middle-end/58941 + * gcc.dg/torture/pr58941.c: New testcase. + +2013-11-05 Marc Glisse <marc.glisse@inria.fr> + + PR tree-optimization/58958 + * gcc.dg/tree-ssa/pr58958.c: New file. + +2013-11-05 Marc Glisse <marc.glisse@inria.fr> + + * gcc.dg/tree-ssa/alias-26.c: New file. + +2013-11-05 Jakub Jelinek <jakub@redhat.com> + + PR tree-optimization/58984 + * gcc.c-torture/execute/pr58984.c: New test. + +2013-11-05 Andreas Schwab <schwab@suse.de> + + * g++.dg/ext/sync-4.C: Require sync_long_long_runtime support. + +2013-11-05 Tobias Burnus <burnus@net-b.de> + + * g++.dg/gomp/openmp-simd-1.C: New. + * g++.dg/gomp/openmp-simd-2.C: New. + * gcc.dg/gomp/openmp-simd-1.c: New. + * gcc.dg/gomp/openmp-simd-2.c: New. + +2013-11-04 Senthil Kumar Selvaraj <senthil_kumar.selvaraj@atmel.com> + + * gcc.dg/superblock.c: Require scheduling support. + +2013-11-04 Kostya Serebryany <kcc@google.com> + + * g++.dg/asan/asan_test.cc: Update the test + to match the fresh asan run-time. + * c-c++-common/asan/stack-overflow-1.c: Ditto. + +2013-11-04 Ian Lance Taylor <iant@google.com> + + * g++.dg/ext/sync-4.C: New test. + +2013-11-04 Paul Thomas <pault@gcc.gnu.org> + + PR fortran/58771 + * gfortran.dg/derived_external_function_1.f90 : New test + +2013-11-04 Jakub Jelinek <jakub@redhat.com> + + PR tree-optimization/58978 + * gcc.c-torture/compile/pr58978.c: New test. + +2013-11-04 Paul Thomas <pault@gcc.gnu.org> + + PR fortran/57445 + * gfortran.dg/optional_class_1.f90 : New test + 2013-11-04 Vladimir Makarov <vmakarov@redhat.com> PR rtl-optimization/58968 diff --git a/gcc/testsuite/c-c++-common/asan/stack-overflow-1.c b/gcc/testsuite/c-c++-common/asan/stack-overflow-1.c index 5f563561935..c7176509710 100644 --- a/gcc/testsuite/c-c++-common/asan/stack-overflow-1.c +++ b/gcc/testsuite/c-c++-common/asan/stack-overflow-1.c @@ -19,4 +19,5 @@ int main() { /* { dg-output "READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */ /* { dg-output " #0 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*stack-overflow-1.c:16|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */ -/* { dg-output "\[^\n\r]*Address 0x\[0-9a-f\]+ is\[^\n\r]*frame <main>" } */ +/* { dg-output "\[^\n\r]*Address 0x\[0-9a-f\]+ is located in stack of thread T0.*(\n|\r\n|\r)" */ +/* { dg-output "\[^\n\r]*in main.*stack-overflow-1.c.*(\n|\r\n|\r)" */ diff --git a/gcc/testsuite/g++.dg/asan/asan_test.cc b/gcc/testsuite/g++.dg/asan/asan_test.cc index 76b6e693498..2df8c62cbb1 100644 --- a/gcc/testsuite/g++.dg/asan/asan_test.cc +++ b/gcc/testsuite/g++.dg/asan/asan_test.cc @@ -204,16 +204,6 @@ TEST(AddressSanitizer, BitFieldNegativeTest) { delete Ident(x); } -TEST(AddressSanitizer, OutOfMemoryTest) { - size_t size = SANITIZER_WORDSIZE == 64 ? (size_t)(1ULL << 48) : (0xf0000000); - EXPECT_EQ(0, realloc(0, size)); - EXPECT_EQ(0, realloc(0, ~Ident(0))); - EXPECT_EQ(0, malloc(size)); - EXPECT_EQ(0, malloc(~Ident(0))); - EXPECT_EQ(0, calloc(1, size)); - EXPECT_EQ(0, calloc(1, ~Ident(0))); -} - #if ASAN_NEEDS_SEGV namespace { @@ -497,42 +487,6 @@ TEST(AddressSanitizer, ManyStackObjectsTest) { EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ"); } -NOINLINE static void Frame0(int frame, char *a, char *b, char *c) { - char d[4] = {0}; - char *D = Ident(d); - switch (frame) { - case 3: a[5]++; break; - case 2: b[5]++; break; - case 1: c[5]++; break; - case 0: D[5]++; break; - } -} -NOINLINE static void Frame1(int frame, char *a, char *b) { - char c[4] = {0}; Frame0(frame, a, b, c); - break_optimization(0); -} -NOINLINE static void Frame2(int frame, char *a) { - char b[4] = {0}; Frame1(frame, a, b); - break_optimization(0); -} -NOINLINE static void Frame3(int frame) { - char a[4] = {0}; Frame2(frame, a); - break_optimization(0); -} - -TEST(AddressSanitizer, GuiltyStackFrame0Test) { - EXPECT_DEATH(Frame3(0), "located .*in frame <.*Frame0"); -} -TEST(AddressSanitizer, GuiltyStackFrame1Test) { - EXPECT_DEATH(Frame3(1), "located .*in frame <.*Frame1"); -} -TEST(AddressSanitizer, GuiltyStackFrame2Test) { - EXPECT_DEATH(Frame3(2), "located .*in frame <.*Frame2"); -} -TEST(AddressSanitizer, GuiltyStackFrame3Test) { - EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3"); -} - NOINLINE void LongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. int a; diff --git a/gcc/testsuite/g++.dg/cpp0x/gen-attrs-56.C b/gcc/testsuite/g++.dg/cpp0x/gen-attrs-56.C new file mode 100644 index 00000000000..f331ed369ac --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp0x/gen-attrs-56.C @@ -0,0 +1,5 @@ +// PR c++/58724 +// { dg-do compile { target c++11 } } + +namespace foo __attribute__((visibility("default"))) {} +namespace bar [[gnu::visibility("default")]] {} diff --git a/gcc/testsuite/g++.dg/ext/sync-4.C b/gcc/testsuite/g++.dg/ext/sync-4.C new file mode 100644 index 00000000000..14ed27337fb --- /dev/null +++ b/gcc/testsuite/g++.dg/ext/sync-4.C @@ -0,0 +1,122 @@ +/* { dg-do run { target hppa*-*-hpux* *-*-linux* *-*-gnu* powerpc*-*-darwin* *-*-darwin[912]* } } */ +/* { dg-require-effective-target sync_long_long_runtime } */ +/* { dg-options "-fexceptions -fnon-call-exceptions -O2" } */ + +/* Verify that the builtin functions are correctly marked as trapping + when using -fnon-call-exceptions. */ + +#include <stdlib.h> +#include <signal.h> + +typedef int int32_t __attribute__ ((mode (SI))); +typedef int int64_t __attribute__ ((mode (DI))); + +#define FN(IDX, RET, CALL) \ +static RET f ## IDX (void *p) __attribute__ ((noinline)); \ +static RET \ +f ## IDX (void *p) \ +{ \ + return CALL; \ +} \ +static void \ +t ## IDX () \ +{ \ + try \ + { \ + f ## IDX(0); \ + } \ + catch (...) \ + { \ + return; \ + } \ + abort(); \ +} + +FN(1, int64_t, (__sync_fetch_and_add((int64_t*)p, 1))) +FN(2, int64_t, (__sync_fetch_and_sub((int64_t*)p, 1))) +FN(3, int64_t, (__sync_fetch_and_or((int64_t*)p, 1))) +FN(4, int64_t, (__sync_fetch_and_and((int64_t*)p, 1))) +FN(5, int64_t, (__sync_fetch_and_xor((int64_t*)p, 1))) +FN(6, int64_t, (__sync_fetch_and_nand((int64_t*)p, 1))) + +FN( 7, int64_t, (__sync_add_and_fetch((int64_t*)p, 1))) +FN( 8, int64_t, (__sync_sub_and_fetch((int64_t*)p, 1))) +FN( 9, int64_t, (__sync_or_and_fetch((int64_t*)p, 1))) +FN(10, int64_t, (__sync_and_and_fetch((int64_t*)p, 1))) +FN(11, int64_t, (__sync_xor_and_fetch((int64_t*)p, 1))) +FN(12, int64_t, (__sync_nand_and_fetch((int64_t*)p, 1))) + +FN(13, bool, (__sync_bool_compare_and_swap((int64_t*)p, 1, 2))) +FN(14, int64_t, (__sync_val_compare_and_swap((int64_t*)p, 1, 2))) + +FN(15, int64_t, (__sync_lock_test_and_set((int64_t*)p, 1))) +FN(16, void, (__sync_lock_release((int64_t*)p))) + +FN(17, bool, (__atomic_test_and_set((int64_t*)p, __ATOMIC_SEQ_CST))) +FN(18, void, (__atomic_clear((int64_t*)p, __ATOMIC_SEQ_CST))) + +FN(19, void, (__atomic_exchange((int64_t*)p, (int64_t*)0, (int64_t*)0, __ATOMIC_SEQ_CST))) +FN(20, int64_t, (__atomic_exchange_n((int64_t*)p, 1, 2))) + +FN(21, void, (__atomic_load((int64_t*)p, (int64_t*)0, __ATOMIC_SEQ_CST))) +FN(22, int64_t, (__atomic_load_n((int64_t*)p, __ATOMIC_SEQ_CST))) + +FN(23, bool, (__atomic_compare_exchange((int64_t*)p, (int64_t*)0, (int64_t*)0, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))) +FN(24, bool, (__atomic_compare_exchange_n((int64_t*)p, (int64_t*)0, 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))) + +FN(25, void, (__atomic_store((int64_t*)p, (int64_t*)0, __ATOMIC_SEQ_CST))) +FN(26, void, (__atomic_store_n((int64_t*)p, 1, __ATOMIC_SEQ_CST))) + +FN(27, int64_t, (__atomic_add_fetch((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(28, int64_t, (__atomic_sub_fetch((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(29, int64_t, (__atomic_and_fetch((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(30, int64_t, (__atomic_nand_fetch((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(31, int64_t, (__atomic_xor_fetch((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(32, int64_t, (__atomic_or_fetch((int64_t*)p, 1, __ATOMIC_SEQ_CST))) + +FN(33, int64_t, (__atomic_fetch_add((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(34, int64_t, (__atomic_fetch_sub((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(35, int64_t, (__atomic_fetch_and((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(36, int64_t, (__atomic_fetch_nand((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(37, int64_t, (__atomic_fetch_xor((int64_t*)p, 1, __ATOMIC_SEQ_CST))) +FN(38, int64_t, (__atomic_fetch_or((int64_t*)p, 1, __ATOMIC_SEQ_CST))) + +static void +handler(int) +{ + sigset_t clear; + + sigfillset (&clear); + sigprocmask (SIG_UNBLOCK, &clear, NULL); + throw 0; +} + +int +main () +{ + signal (SIGSEGV, handler); + signal (SIGBUS, handler); + + t1(); + t2(); + t3(); + t4(); + t5(); + t6(); + t7(); + t8(); + t9(); + t10(); + t11(); + t12(); + t13(); + t14(); + t15(); + t16(); + t17(); + t18(); + t19(); + t20(); + + exit(0); +} diff --git a/gcc/testsuite/g++.dg/gomp/openmp-simd-1.C b/gcc/testsuite/g++.dg/gomp/openmp-simd-1.C new file mode 100644 index 00000000000..fedb186fedf --- /dev/null +++ b/gcc/testsuite/g++.dg/gomp/openmp-simd-1.C @@ -0,0 +1,46 @@ +/* { dg-do compile } */ +/* { dg-options "-fopenmp-simd -fdump-tree-original" } */ + +#pragma omp declare simd +float bar(float b) { + return b*b; +} + +void foo(int n, float *a, float *b) +{ + int i; +#pragma omp simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp distribute simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp distribute parallel for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp parallel for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp teams distribute simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp target teams distribute simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp teams distribute parallel for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp target teams distribute parallel for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +} + +/* { dg-final { scan-tree-dump-times "pragma omp simd" 9 "original" } } */ +/* { dg-final { scan-tree-dump-not "omp for" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp distribute" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp teams" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp target" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp parallel" "original" } } */ diff --git a/gcc/testsuite/g++.dg/gomp/openmp-simd-2.C b/gcc/testsuite/g++.dg/gomp/openmp-simd-2.C new file mode 100644 index 00000000000..e31c1ebecf9 --- /dev/null +++ b/gcc/testsuite/g++.dg/gomp/openmp-simd-2.C @@ -0,0 +1,44 @@ +/* { dg-do compile } */ +/* { dg-options "-fopenmp-simd -fdump-tree-original" } */ + +extern void abort (); +int a[1024] __attribute__((aligned (32))) = { 1 }; +struct S { int s; }; +#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:int:omp_out += omp_in) + +__attribute__((noinline, noclone)) int +foo (void) +{ + int i, u = 0; + struct S s, t; + s.s = 0; t.s = 0; + #pragma omp simd aligned(a : 32) reduction(+:s) reduction(foo:t, u) + for (i = 0; i < 1024; i++) + { + int x = a[i]; + s.s += x; + t.s += x; + u += x; + } + if (t.s != s.s || u != s.s) + abort (); + return s.s; +} + + +void bar(int n, float *a, float *b) +{ + int i; +#pragma omp parallel for simd num_threads(4) safelen(64) + for (i = 0; i < n ; i++) + a[i] = b[i]; +} + +/* { dg-final { scan-tree-dump-times "Function void omp declare reduction operator\\+" 1 "original" } } */ +/* { dg-final { scan-tree-dump-times "Function void omp declare reduction foo" 2 "original" } } */ +/* { dg-final { scan-tree-dump-times "pragma omp simd reduction\\(u\\) reduction\\(t\\) reduction\\(\\+:s\\) aligned\\(a:32\\)" 1 "original" } } */ +/* { dg-final { scan-tree-dump-times "pragma omp simd safelen\\(64\\)" 1 "original" } } */ +/* { dg-final { scan-tree-dump-not "omp parallel" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp for" "original" } } */ diff --git a/gcc/testsuite/g++.dg/init/array35.C b/gcc/testsuite/g++.dg/init/array35.C new file mode 100644 index 00000000000..6ce6d5cc956 --- /dev/null +++ b/gcc/testsuite/g++.dg/init/array35.C @@ -0,0 +1,3 @@ +// PR c++/58868 + +static struct { const int i; } a[] = { 1 }; diff --git a/gcc/testsuite/g++.dg/warn/wdate-time.C b/gcc/testsuite/g++.dg/warn/wdate-time.C new file mode 100644 index 00000000000..040dd995c2e --- /dev/null +++ b/gcc/testsuite/g++.dg/warn/wdate-time.C @@ -0,0 +1,6 @@ +/* { dg-do compile } */ +/* { dg-options "-Wdate-time" } */ + +const char time[] = __TIME__; /* { dg-warning "might prevent reproduce builds" } */ +const char date[] = __DATE__; /* { dg-warning "might prevent reproduce builds" } */ +const char timestamp[] = __TIMESTAMP__; /* { dg-warning "might prevent reproduce builds" } */ diff --git a/gcc/testsuite/gcc.c-torture/compile/pr58978.c b/gcc/testsuite/gcc.c-torture/compile/pr58978.c new file mode 100644 index 00000000000..721801da16b --- /dev/null +++ b/gcc/testsuite/gcc.c-torture/compile/pr58978.c @@ -0,0 +1,16 @@ +/* PR tree-optimization/58978 */ + +int +foo (int x) +{ + switch (x) + { + case 0: + case 1: + case 9: + break; + default: + __builtin_unreachable (); + } + return x; +} diff --git a/gcc/testsuite/gcc.c-torture/compile/pr58997.c b/gcc/testsuite/gcc.c-torture/compile/pr58997.c new file mode 100644 index 00000000000..2c7a0f82c8a --- /dev/null +++ b/gcc/testsuite/gcc.c-torture/compile/pr58997.c @@ -0,0 +1,19 @@ +/* PR rtl-optimization/58997 */ + +int a, b, c, e; +short d; +char h; + +void +foo () +{ + while (b) + { + d = a ? c : 1 % a; + c = d; + h = d; + if (!h) + while (e) + ; + } +} diff --git a/gcc/testsuite/gcc.c-torture/execute/pr58984.c b/gcc/testsuite/gcc.c-torture/execute/pr58984.c new file mode 100644 index 00000000000..e0f7669c78d --- /dev/null +++ b/gcc/testsuite/gcc.c-torture/execute/pr58984.c @@ -0,0 +1,57 @@ +/* PR tree-optimization/58984 */ + +struct S { int f0 : 8; int : 6; int f1 : 5; }; +struct T { char f0; int : 6; int f1 : 5; }; + +int a, *c = &a, e, n, b, m; + +static int +foo (struct S p) +{ + const unsigned short *f[36]; + for (; e < 2; e++) + { + const unsigned short **i = &f[0]; + *c ^= 1; + if (p.f1) + { + *i = 0; + return b; + } + } + return 0; +} + +static int +bar (struct T p) +{ + const unsigned short *f[36]; + for (; e < 2; e++) + { + const unsigned short **i = &f[0]; + *c ^= 1; + if (p.f1) + { + *i = 0; + return b; + } + } + return 0; +} + +int +main () +{ + struct S o = { 1, 1 }; + foo (o); + m = n || o.f0; + if (a != 1) + __builtin_abort (); + e = 0; + struct T p = { 1, 1 }; + bar (p); + m |= n || p.f0; + if (a != 0) + __builtin_abort (); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/gomp/openmp-simd-1.c b/gcc/testsuite/gcc.dg/gomp/openmp-simd-1.c new file mode 100644 index 00000000000..fedb186fedf --- /dev/null +++ b/gcc/testsuite/gcc.dg/gomp/openmp-simd-1.c @@ -0,0 +1,46 @@ +/* { dg-do compile } */ +/* { dg-options "-fopenmp-simd -fdump-tree-original" } */ + +#pragma omp declare simd +float bar(float b) { + return b*b; +} + +void foo(int n, float *a, float *b) +{ + int i; +#pragma omp simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp distribute simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp distribute parallel for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp parallel for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp teams distribute simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp target teams distribute simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp teams distribute parallel for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +#pragma omp target teams distribute parallel for simd + for (i = 0; i < n ; i++) + a[i] = b[i]; +} + +/* { dg-final { scan-tree-dump-times "pragma omp simd" 9 "original" } } */ +/* { dg-final { scan-tree-dump-not "omp for" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp distribute" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp teams" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp target" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp parallel" "original" } } */ diff --git a/gcc/testsuite/gcc.dg/gomp/openmp-simd-2.c b/gcc/testsuite/gcc.dg/gomp/openmp-simd-2.c new file mode 100644 index 00000000000..e66806845b7 --- /dev/null +++ b/gcc/testsuite/gcc.dg/gomp/openmp-simd-2.c @@ -0,0 +1,42 @@ +/* { dg-do compile } */ +/* { dg-options "-fopenmp-simd -fdump-tree-original" } */ + +extern void abort (); +int a[1024] __attribute__((aligned (32))) = { 1 }; +struct S { int s; }; +#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s) +#pragma omp declare reduction (foo:int:omp_out += omp_in) + +__attribute__((noinline, noclone)) int +foo (void) +{ + int i, u = 0; + struct S s, t; + s.s = 0; t.s = 0; + #pragma omp simd aligned(a : 32) reduction(+:s) reduction(foo:t, u) + for (i = 0; i < 1024; i++) + { + int x = a[i]; + s.s += x; + t.s += x; + u += x; + } + if (t.s != s.s || u != s.s) + abort (); + return s.s; +} + + +void bar(int n, float *a, float *b) +{ + int i; +#pragma omp parallel for simd num_threads(4) safelen(64) + for (i = 0; i < n ; i++) + a[i] = b[i]; +} + +/* { dg-final { scan-tree-dump-times "pragma omp simd reduction\\(u\\) reduction\\(t\\) reduction\\(\\+:s\\) aligned\\(a:32\\)" 1 "original" } } */ +/* { dg-final { scan-tree-dump-times "pragma omp simd safelen\\(64\\)" 1 "original" } } */ +/* { dg-final { scan-tree-dump-not "omp parallel" "original" } } */ +/* { dg-final { scan-tree-dump-not "omp for" "original" } } */ diff --git a/gcc/testsuite/gcc.dg/ipa/pr58492.c b/gcc/testsuite/gcc.dg/ipa/pr58492.c new file mode 100644 index 00000000000..79958d5640a --- /dev/null +++ b/gcc/testsuite/gcc.dg/ipa/pr58492.c @@ -0,0 +1,7 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -fipa-pta" } */ + +void f(int p, short q) +{ + f(0, 0); +} diff --git a/gcc/testsuite/gcc.dg/pr38984.c b/gcc/testsuite/gcc.dg/pr38984.c index 11f1e7f211a..0c031805ea8 100644 --- a/gcc/testsuite/gcc.dg/pr38984.c +++ b/gcc/testsuite/gcc.dg/pr38984.c @@ -1,5 +1,5 @@ /* { dg-do compile } */ -/* { dg-options "-O2 -fno-delete-null-pointer-checks -fdump-tree-optimized" } +/* { dg-options "-O2 -fno-delete-null-pointer-checks -fdump-tree-optimized -fno-isolate-erroneous-paths" } * */ int f(int *p) diff --git a/gcc/testsuite/gcc.dg/pr58981.c b/gcc/testsuite/gcc.dg/pr58981.c new file mode 100644 index 00000000000..1c8293e4985 --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr58981.c @@ -0,0 +1,55 @@ +/* { dg-do run } */ +/* { dg-options "-O2" } */ +/* { dg-additional-options "-minline-all-stringops" { target { i?86-*-* x86_64-*-* } } } */ + +extern void abort (void); + +#define MAX_OFFSET (sizeof (long long)) +#define MAX_COPY (8 * sizeof (long long)) +#define MAX_EXTRA (sizeof (long long)) + +#define MAX_LENGTH (MAX_OFFSET + MAX_COPY + MAX_EXTRA) + +static union { + char buf[MAX_LENGTH]; + long long align_int; + long double align_fp; +} u; + +char A[MAX_LENGTH]; + +int +main () +{ + int off, len, i; + char *p, *q; + + for (i = 0; i < MAX_LENGTH; i++) + A[i] = 'A'; + + for (off = 0; off < MAX_OFFSET; off++) + for (len = 1; len < MAX_COPY; len++) + { + for (i = 0; i < MAX_LENGTH; i++) + u.buf[i] = 'a'; + + p = __builtin_memcpy (u.buf + off, A, len); + if (p != u.buf + off) + abort (); + + q = u.buf; + for (i = 0; i < off; i++, q++) + if (*q != 'a') + abort (); + + for (i = 0; i < len; i++, q++) + if (*q != 'A') + abort (); + + for (i = 0; i < MAX_EXTRA; i++, q++) + if (*q != 'a') + abort (); + } + + return 0; +} diff --git a/gcc/testsuite/gcc.dg/superblock.c b/gcc/testsuite/gcc.dg/superblock.c index 2b9aedfdac4..272d161f058 100644 --- a/gcc/testsuite/gcc.dg/superblock.c +++ b/gcc/testsuite/gcc.dg/superblock.c @@ -1,5 +1,6 @@ /* { dg-do compile } */ /* { dg-options "-O2 -fno-asynchronous-unwind-tables -fsched2-use-superblocks -fdump-rtl-sched2 -fdump-rtl-bbro" } */ +/* { dg-require-effective-target scheduling } */ typedef int aligned __attribute__ ((aligned (64))); extern void abort (void); diff --git a/gcc/testsuite/gcc.dg/torture/pr58941.c b/gcc/testsuite/gcc.dg/torture/pr58941.c new file mode 100644 index 00000000000..c0eea073165 --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr58941.c @@ -0,0 +1,33 @@ +/* { dg-do run } */ + +extern void abort (void); + +typedef struct { + int msgLength; + unsigned char data[1000]; +} SMsg; + +typedef struct { + int dummy; + int d[0]; +} SData; + +int condition = 3; + +int main() +{ + SMsg msg; + SData *pData = (SData*)(msg.data); + unsigned int i = 0; + for (i = 0; i < 1; i++) + { + pData->d[i] = 0; + if(condition & 1) + pData->d[i] |= 0x55; + if(condition & 2) + pData->d[i] |= 0xaa; + } + if (pData->d[0] != 0xff) + abort (); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/torture/pr58955-1.c b/gcc/testsuite/gcc.dg/torture/pr58955-1.c new file mode 100644 index 00000000000..a79f42f0b38 --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr58955-1.c @@ -0,0 +1,20 @@ +/* { dg-do run } */ + +extern void abort (void); + +int a, b, c, d[4] = { 0, 0, 0, 1 }; + +int +main () +{ + for (; a < 4; a++) + { + int e = d[a]; + for (c = 1; c < 1; c++); + b = e; + d[a] = 0; + } + if (b != 1) + abort (); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/torture/pr58955-2.c b/gcc/testsuite/gcc.dg/torture/pr58955-2.c new file mode 100644 index 00000000000..a43860e9b02 --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr58955-2.c @@ -0,0 +1,18 @@ +/* { dg-do run } */ + +extern void abort (void); + +int a, b[10]; + +int +main () +{ + for (; a < 2; a++) + { + b[a] = 1; + b[a + 1] = 0; + } + if (b[1] != 1) + abort (); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/tree-ssa/alias-26.c b/gcc/testsuite/gcc.dg/tree-ssa/alias-26.c new file mode 100644 index 00000000000..a1eb8f7d0a8 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/alias-26.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-optimized" } */ + +void f (const char *c, int *i) +{ + *i = 42; + __builtin_memcpy (i - 1, c, sizeof (int)); + if (*i != 42) __builtin_abort(); +} + +/* { dg-final { scan-tree-dump-not "abort" "optimized" } } */ +/* { dg-final { cleanup-tree-dump "optimized" } } */ + diff --git a/gcc/testsuite/gcc.dg/tree-ssa/isolate-1.c b/gcc/testsuite/gcc.dg/tree-ssa/isolate-1.c new file mode 100644 index 00000000000..6b779b4a4bc --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/isolate-1.c @@ -0,0 +1,58 @@ + +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-isolate-paths" } */ + + +struct demangle_component +{ + + int type; + int zzz; + +}; + + +struct d_info +{ + struct demangle_component *comps; + int next_comp; + int num_comps; +}; + + +static struct demangle_component * +d_make_empty (struct d_info *di) +{ + struct demangle_component *p; + + if (di->next_comp >= di->num_comps) + return ((void *)0); + p = &di->comps[di->next_comp]; + return p; +} + + + +struct demangle_component * +d_type (struct d_info *di) +{ + struct demangle_component *ret; + ret = d_make_empty (di); + ret->type = 42; + ret->zzz = -1; + return ret; +} + +/* We're testing two aspects of isolation here. First that isolation + occurs, second that if we have two null dereferences in a block that + that we delete everything from the first dereferece to the end of the + block, regardless of which comes first in the immediate use iterator. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "isolate-paths"} } */ +/* { dg-final { scan-tree-dump-times "->type" 1 "isolate-paths"} } */ +/* { dg-final { scan-tree-dump-times "->zzz" 1 "isolate-paths"} } */ +/* { dg-final { cleanup-tree-dump "isolate-paths" } } */ + + + + + diff --git a/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c b/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c new file mode 100644 index 00000000000..290b44c5bd4 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c @@ -0,0 +1,43 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-isolate-paths -fdump-tree-phicprop1" } */ + + +int z; +int y; + +int * foo(int a) __attribute__((returns_nonnull)); +int * bar(void) __attribute__((returns_nonnull)); + +int * +foo(int a) + +{ + switch (a) + { + case 0: + return &z; + default: + return (int *)0; + } +} + + +int * +bar (void) +{ + return 0; +} + +/* We testing that the path isolation code can take advantage of the + returns non-null attribute to isolate a path where NULL flows into + a return statement. We test this twice, once where the NULL flows + from a PHI, the second with an explicit return 0 in the IL. + + We also verify that after isolation phi-cprop simplifies the + return statement so that it returns &z directly. +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "isolate-paths"} } */ +/* { dg-final { scan-tree-dump-times "return &z;" 1 "phicprop1"} } */ +/* { dg-final { cleanup-tree-dump "isolate-paths" } } */ +/* { dg-final { cleanup-tree-dump "phicprop1" } } */ + + diff --git a/gcc/testsuite/gcc.dg/tree-ssa/isolate-3.c b/gcc/testsuite/gcc.dg/tree-ssa/isolate-3.c new file mode 100644 index 00000000000..7dddd8062c0 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/isolate-3.c @@ -0,0 +1,65 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-isolate-paths" } */ + + +typedef long unsigned int size_t; +extern void *memset (void *__s, int __c, size_t __n) + __attribute__ ((__nothrow__, __leaf__)) __attribute__ ((__nonnull__ (1))); +struct rtx_def; +typedef struct rtx_def *rtx; +typedef struct VEC_rtx_base + +{ + unsigned num; + unsigned alloc; + rtx vec[1]; +} VEC_rtx_base; +static __inline__ rtx * +VEC_rtx_base_address (VEC_rtx_base * vec_) +{ + return vec_ ? vec_->vec : 0; +} +typedef struct VEC_rtx_gc +{ + VEC_rtx_base base; +} VEC_rtx_gc; + +static __inline__ void +VEC_rtx_gc_safe_grow (VEC_rtx_gc ** vec_, int size_, const char *file_, + unsigned line_, const char *function_) +{ + ((*vec_) ? &(*vec_)->base : 0)->num = size_; +} + +static __inline__ void +VEC_rtx_gc_safe_grow_cleared (VEC_rtx_gc ** vec_, int size_, + const char *file_, unsigned line_, + const char *function_, int oldsize) +{ + VEC_rtx_gc_safe_grow (vec_, size_, file_, line_, function_); + memset (&(VEC_rtx_base_address ((*vec_) ? &(*vec_)->base : 0))[oldsize], 0, + sizeof (rtx) * (size_ - oldsize)); +} + +static VEC_rtx_gc *reg_base_value; +void +init_alias_analysis (void) +{ + unsigned int maxreg = max_reg_num (); + (VEC_rtx_gc_safe_grow_cleared + (&(reg_base_value), maxreg, "../../../gcc-4.6.0/gcc/alias.c", 2755, + __FUNCTION__, arf ())); +} + + + +/* This is an example of how a NULL pointer dereference can show up + without a PHI. Note VEC_rtx_gcc_safe_grow. If an earlier pass + (such as VRP) isolates the NULL path for some reason or another + we end up with an explicit NULL dereference in the IL. Yes, it + started with a PHI, but by the time the path isolation code runs + its explicit in the IL. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "isolate-paths"} } */ +/* { dg-final { cleanup-tree-dump "isolate-paths" } } */ + + diff --git a/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c b/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c new file mode 100644 index 00000000000..6937d25580a --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c @@ -0,0 +1,32 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-isolate-paths -fdump-tree-phicprop1" } */ + + +extern void foo(void *) __attribute__ ((__nonnull__ (1))); + +int z; + +void +com (int a) +{ + foo (a == 42 ? &z : (void *) 0); +} + +void +bar (void) +{ + foo ((void *)0); +} + +/* We testing that the path isolation code can take advantage of the + returns non-null attribute to isolate a path where NULL flows into + a return statement. + + We also verify that after isolation phi-cprop simplifies the + return statement so that it returns &z directly. +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "isolate-paths"} } */ +/* { dg-final { scan-tree-dump-times "foo .&z.;" 1 "phicprop1"} } */ +/* { dg-final { cleanup-tree-dump "isolate-paths" } } */ +/* { dg-final { cleanup-tree-dump "phicprop1" } } */ + + diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr58958.c b/gcc/testsuite/gcc.dg/tree-ssa/pr58958.c new file mode 100644 index 00000000000..faf377fb87e --- /dev/null +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr58958.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-optimized" } */ + +double a[10]; +int f(int n){ + a[3]=9; + __builtin_memset(&a[n],3,sizeof(double)); + return a[3]==9; +} + +/* { dg-final { scan-tree-dump " == 9" "optimized" } } */ +/* { dg-final { cleanup-tree-dump "optimized" } } */ diff --git a/gcc/testsuite/gcc.dg/wdate-time.c b/gcc/testsuite/gcc.dg/wdate-time.c new file mode 100644 index 00000000000..040dd995c2e --- /dev/null +++ b/gcc/testsuite/gcc.dg/wdate-time.c @@ -0,0 +1,6 @@ +/* { dg-do compile } */ +/* { dg-options "-Wdate-time" } */ + +const char time[] = __TIME__; /* { dg-warning "might prevent reproduce builds" } */ +const char date[] = __DATE__; /* { dg-warning "might prevent reproduce builds" } */ +const char timestamp[] = __TIMESTAMP__; /* { dg-warning "might prevent reproduce builds" } */ diff --git a/gcc/testsuite/gfortran.dg/derived_external_function_1.f90 b/gcc/testsuite/gfortran.dg/derived_external_function_1.f90 new file mode 100644 index 00000000000..7421c4c0f22 --- /dev/null +++ b/gcc/testsuite/gfortran.dg/derived_external_function_1.f90 @@ -0,0 +1,27 @@ +! { dg-do run } +! +! PR fortran/58771 +! +! Contributed by Vittorio Secca <zeccav@gmail.com> +! +! ICEd on the write statement with f() because the derived type backend +! declaration not built. +! +module m + type t + integer(4) g + end type +end + +type(t) function f() result(ff) + use m + ff%g = 42 +end + + use m + character (20) :: line1, line2 + type(t) f + write (line1, *) f() + write (line2, *) 42_4 + if (line1 .ne. line2) call abort +end diff --git a/gcc/testsuite/gfortran.dg/optional_class_1.f90 b/gcc/testsuite/gfortran.dg/optional_class_1.f90 new file mode 100644 index 00000000000..589fc6023e7 --- /dev/null +++ b/gcc/testsuite/gfortran.dg/optional_class_1.f90 @@ -0,0 +1,45 @@ +! { dg-do run } +! +! PR fortran/57445 +! +! Contributed by Tobias Burnus <burnus@gcc.gnu.org> +! +! Spurious assert was added at revision 192495 +! +module m + implicit none + type t + integer :: i + end type t +contains + subroutine opt(xa, xc, xaa, xca) + type(t), allocatable, intent(out), optional :: xa + class(t), allocatable, intent(out), optional :: xc + type(t), allocatable, intent(out), optional :: xaa(:) + class(t), allocatable, intent(out), optional :: xca(:) + if (present (xca)) call foo_opt(xca=xca) + end subroutine opt + subroutine foo_opt(xa, xc, xaa, xca) + type(t), allocatable, intent(out), optional :: xa + class(t), allocatable, intent(out), optional :: xc + type(t), allocatable, intent(out), optional :: xaa(:) + class(t), allocatable, intent(out), optional :: xca(:) + if (present (xca)) then + if (allocated (xca)) deallocate (xca) + allocate (xca(3), source = [t(9),t(99),t(999)]) + end if + end subroutine foo_opt +end module m + use m + class(t), allocatable :: xca(:) + allocate (xca(1), source = t(42)) + select type (xca) + type is (t) + if (any (xca%i .ne. [42])) call abort + end select + call opt (xca = xca) + select type (xca) + type is (t) + if (any (xca%i .ne. [9,99,999])) call abort + end select +end diff --git a/gcc/testsuite/gfortran.dg/reshape_6.f90 b/gcc/testsuite/gfortran.dg/reshape_6.f90 new file mode 100644 index 00000000000..149f31efe7a --- /dev/null +++ b/gcc/testsuite/gfortran.dg/reshape_6.f90 @@ -0,0 +1,19 @@ +! { dg-do compile } +! PR fortran/58989 +! +program test + + real(8), dimension(4,4) :: fluxes + real(8), dimension(2,2,2,2) :: f + integer, dimension(3) :: dmmy + integer, parameter :: indx(4)=(/2,2,2,2/) + + fluxes = 1 + + dmmy = (/2,2,2/) + + f = reshape(fluxes,(/dmmy,2/)) ! Caused an ICE + f = reshape(fluxes,(/2,2,2,2/)) ! Works as expected + f = reshape(fluxes,indx) ! Works as expected + +end program test diff --git a/gcc/testsuite/gfortran.dg/wdate-time.F90 b/gcc/testsuite/gfortran.dg/wdate-time.F90 new file mode 100644 index 00000000000..f3a4f46c0b2 --- /dev/null +++ b/gcc/testsuite/gfortran.dg/wdate-time.F90 @@ -0,0 +1,6 @@ +! { dg-do compile } +! { dg-options "-Wdate-time" } +print *, __TIMESTAMP__ ! { dg-warning "might prevent reproduce builds" } +print *, __TIME__ ! { dg-warning "might prevent reproduce builds" } +print *, __DATE__ ! { dg-warning "might prevent reproduce builds" } +end diff --git a/gcc/timevar.def b/gcc/timevar.def index 66d61aecc4d..afdadb878a7 100644 --- a/gcc/timevar.def +++ b/gcc/timevar.def @@ -144,6 +144,7 @@ DEFTIMEVAR (TV_TREE_SSA_INCREMENTAL , "tree SSA incremental") DEFTIMEVAR (TV_TREE_OPS , "tree operand scan") DEFTIMEVAR (TV_TREE_SSA_DOMINATOR_OPTS , "dominator optimization") DEFTIMEVAR (TV_TREE_SRA , "tree SRA") +DEFTIMEVAR (TV_ISOLATE_ERRONEOUS_PATHS , "isolate eroneous paths") DEFTIMEVAR (TV_TREE_CCP , "tree CCP") DEFTIMEVAR (TV_TREE_PHI_CPROP , "tree PHI const/copy prop") DEFTIMEVAR (TV_TREE_SPLIT_EDGES , "tree split crit edges") diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c index 13b27599aea..47413f2e2fe 100644 --- a/gcc/tree-dfa.c +++ b/gcc/tree-dfa.c @@ -389,7 +389,6 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, offset_int bit_offset = 0; HOST_WIDE_INT hbit_offset; bool seen_variable_array_ref = false; - tree base_type; /* First get the final access size from just the outermost expression. */ if (TREE_CODE (exp) == COMPONENT_REF) @@ -420,8 +419,6 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, and find the ultimate containing object. */ while (1) { - base_type = TREE_TYPE (exp); - switch (TREE_CODE (exp)) { case BIT_FIELD_REF: @@ -545,7 +542,38 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, case VIEW_CONVERT_EXPR: break; + case TARGET_MEM_REF: + /* Via the variable index or index2 we can reach the + whole object. Still hand back the decl here. */ + if (TREE_CODE (TMR_BASE (exp)) == ADDR_EXPR + && (TMR_INDEX (exp) || TMR_INDEX2 (exp))) + { + exp = TREE_OPERAND (TMR_BASE (exp), 0); + bit_offset = 0; + maxsize = -1; + goto done; + } + /* Fallthru. */ case MEM_REF: + /* We need to deal with variable arrays ending structures such as + struct { int length; int a[1]; } x; x.a[d] + struct { struct { int a; int b; } a[1]; } x; x.a[d].a + struct { struct { int a[1]; } a[1]; } x; x.a[0][d], x.a[d][0] + struct { int len; union { int a[1]; struct X x; } u; } x; x.u.a[d] + where we do not know maxsize for variable index accesses to + the array. The simplest way to conservatively deal with this + is to punt in the case that offset + maxsize reaches the + base type boundary. This needs to include possible trailing + padding that is there for alignment purposes. */ + if (seen_variable_array_ref + && maxsize != -1 + && (!bit_offset.fits_shwi () + || !tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))) + || (bit_offset.to_shwi () + maxsize + == (signed) tree_to_uhwi + (TYPE_SIZE (TREE_TYPE (exp)))))) + maxsize = -1; + /* Hand back the decl for MEM[&decl, off]. */ if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR) { @@ -566,44 +594,23 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, } goto done; - case TARGET_MEM_REF: - /* Hand back the decl for MEM[&decl, off]. */ - if (TREE_CODE (TMR_BASE (exp)) == ADDR_EXPR) - { - /* Via the variable index or index2 we can reach the - whole object. */ - if (TMR_INDEX (exp) || TMR_INDEX2 (exp)) - { - exp = TREE_OPERAND (TMR_BASE (exp), 0); - bit_offset = 0; - maxsize = -1; - goto done; - } - if (integer_zerop (TMR_OFFSET (exp))) - exp = TREE_OPERAND (TMR_BASE (exp), 0); - else - { - offset_int off = mem_ref_offset (exp); - off = wi::lshift (off, (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT))); - off += bit_offset; - if (wi::fits_shwi_p (off)) - { - bit_offset = off; - exp = TREE_OPERAND (TMR_BASE (exp), 0); - } - } - } - goto done; - default: goto done; } exp = TREE_OPERAND (exp, 0); } - done: + /* We need to deal with variable arrays ending structures. */ + if (seen_variable_array_ref + && maxsize != -1 + && (!bit_offset.fits_shwi () + || !tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))) + || (bit_offset.to_shwi () + maxsize + == (signed) tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))))) + maxsize = -1; + + done: if (!wi::fits_shwi_p (bit_offset)) { *poffset = 0; @@ -615,24 +622,6 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, hbit_offset = bit_offset.to_shwi (); - /* We need to deal with variable arrays ending structures such as - struct { int length; int a[1]; } x; x.a[d] - struct { struct { int a; int b; } a[1]; } x; x.a[d].a - struct { struct { int a[1]; } a[1]; } x; x.a[0][d], x.a[d][0] - struct { int len; union { int a[1]; struct X x; } u; } x; x.u.a[d] - where we do not know maxsize for variable index accesses to - the array. The simplest way to conservatively deal with this - is to punt in the case that offset + maxsize reaches the - base type boundary. This needs to include possible trailing padding - that is there for alignment purposes. */ - - if (seen_variable_array_ref - && maxsize != -1 - && (!tree_fits_uhwi_p (TYPE_SIZE (base_type)) - || (hbit_offset + maxsize - == (signed) tree_to_uhwi (TYPE_SIZE (base_type))))) - maxsize = -1; - /* In case of a decl or constant base object we can do better. */ if (DECL_P (exp)) diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c index 79884bf4272..4f9b848847a 100644 --- a/gcc/tree-loop-distribution.c +++ b/gcc/tree-loop-distribution.c @@ -1324,7 +1324,7 @@ pg_add_dependence_edges (struct graph *rdg, vec<loop_p> loops, int dir, for (int ii = 0; drs1.iterate (ii, &dr1); ++ii) for (int jj = 0; drs2.iterate (jj, &dr2); ++jj) { - int this_dir = 1; + int this_dir = -1; ddr_p ddr; /* Re-shuffle data-refs to be in dominator order. */ if (rdg_vertex_for_stmt (rdg, DR_STMT (dr1)) diff --git a/gcc/tree-outof-ssa.c b/gcc/tree-outof-ssa.c index 1e982857e14..4dc3f9e4a8d 100644 --- a/gcc/tree-outof-ssa.c +++ b/gcc/tree-outof-ssa.c @@ -548,6 +548,23 @@ eliminate_name (elim_graph g, int T) elim_graph_add_node (g, T); } +/* Return true if this phi argument T should have a copy queued when using + var_map MAP. PHI nodes should contain only ssa_names and invariants. A + test for ssa_name is definitely simpler, but don't let invalid contents + slip through in the meantime. */ + +static inline bool +queue_phi_copy_p (var_map map, tree t) +{ + if (TREE_CODE (t) == SSA_NAME) + { + if (var_to_partition (map, t) == NO_PARTITION) + return true; + return false; + } + gcc_checking_assert (is_gimple_min_invariant (t)); + return true; +} /* Build elimination graph G for basic block BB on incoming PHI edge G->e. */ @@ -577,9 +594,7 @@ eliminate_build (elim_graph g) /* If this argument is a constant, or a SSA_NAME which is being left in SSA form, just queue a copy to be emitted on this edge. */ - if (!phi_ssa_name_p (Ti) - || (TREE_CODE (Ti) == SSA_NAME - && var_to_partition (g->map, Ti) == NO_PARTITION)) + if (queue_phi_copy_p (g->map, Ti)) { /* Save constant copies until all other copies have been emitted on this edge. */ diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h index c4d09fe0821..3aeaeeb114b 100644 --- a/gcc/tree-pass.h +++ b/gcc/tree-pass.h @@ -425,6 +425,7 @@ extern gimple_opt_pass *make_pass_sink_code (gcc::context *ctxt); extern gimple_opt_pass *make_pass_fre (gcc::context *ctxt); extern gimple_opt_pass *make_pass_check_data_deps (gcc::context *ctxt); extern gimple_opt_pass *make_pass_copy_prop (gcc::context *ctxt); +extern gimple_opt_pass *make_pass_isolate_erroneous_paths (gcc::context *ctxt); extern gimple_opt_pass *make_pass_vrp (gcc::context *ctxt); extern gimple_opt_pass *make_pass_uncprop (gcc::context *ctxt); extern gimple_opt_pass *make_pass_return_slot (gcc::context *ctxt); diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c index 1a58ce722ed..a0c4ce3cb57 100644 --- a/gcc/tree-ssa-alias.c +++ b/gcc/tree-ssa-alias.c @@ -559,14 +559,14 @@ ao_ref_alias_set (ao_ref *ref) } /* Init an alias-oracle reference representation from a gimple pointer - PTR and a gimple size SIZE in bytes. If SIZE is NULL_TREE the the + PTR and a gimple size SIZE in bytes. If SIZE is NULL_TREE then the size is assumed to be unknown. The access is assumed to be only to or after of the pointer target, not before it. */ void ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size) { - HOST_WIDE_INT t1, t2, extra_offset = 0; + HOST_WIDE_INT t, extra_offset = 0; ref->ref = NULL_TREE; if (TREE_CODE (ptr) == SSA_NAME) { @@ -576,17 +576,26 @@ ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size) ptr = gimple_assign_rhs1 (stmt); else if (is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR - && tree_fits_shwi_p (gimple_assign_rhs2 (stmt)) - && (t1 = int_cst_value (gimple_assign_rhs2 (stmt))) >= 0) + && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST) { ptr = gimple_assign_rhs1 (stmt); - extra_offset = BITS_PER_UNIT * t1; + extra_offset = BITS_PER_UNIT + * int_cst_value (gimple_assign_rhs2 (stmt)); } } if (TREE_CODE (ptr) == ADDR_EXPR) - ref->base = get_ref_base_and_extent (TREE_OPERAND (ptr, 0), - &ref->offset, &t1, &t2); + { + ref->base = get_addr_base_and_unit_offset (TREE_OPERAND (ptr, 0), &t); + if (ref->base) + ref->offset = BITS_PER_UNIT * t; + else + { + size = NULL_TREE; + ref->offset = 0; + ref->base = get_base_address (TREE_OPERAND (ptr, 0)); + } + } else { ref->base = build2 (MEM_REF, char_type_node, diff --git a/gcc/tree-ssa-alias.h b/gcc/tree-ssa-alias.h index 831cffebecb..581cd82a5f3 100644 --- a/gcc/tree-ssa-alias.h +++ b/gcc/tree-ssa-alias.h @@ -146,18 +146,18 @@ extern GTY(()) struct pt_solution ipa_escaped_pt; range is open-ended. Otherwise return false. */ static inline bool -ranges_overlap_p (unsigned HOST_WIDE_INT pos1, +ranges_overlap_p (HOST_WIDE_INT pos1, unsigned HOST_WIDE_INT size1, - unsigned HOST_WIDE_INT pos2, + HOST_WIDE_INT pos2, unsigned HOST_WIDE_INT size2) { if (pos1 >= pos2 && (size2 == (unsigned HOST_WIDE_INT)-1 - || pos1 < (pos2 + size2))) + || pos1 < (pos2 + (HOST_WIDE_INT) size2))) return true; if (pos2 >= pos1 && (size1 == (unsigned HOST_WIDE_INT)-1 - || pos2 < (pos1 + size1))) + || pos2 < (pos1 + (HOST_WIDE_INT) size1))) return true; return false; diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c index 9072ce11673..585df718221 100644 --- a/gcc/tree-ssa.c +++ b/gcc/tree-ssa.c @@ -236,100 +236,6 @@ flush_pending_stmts (edge e) redirect_edge_var_map_clear (e); } - -/* Data structure used to count the number of dereferences to PTR - inside an expression. */ -struct count_ptr_d -{ - tree ptr; - unsigned num_stores; - unsigned num_loads; -}; - - -/* Helper for count_uses_and_derefs. Called by walk_tree to look for - (ALIGN/MISALIGNED_)INDIRECT_REF nodes for the pointer passed in DATA. */ - -static tree -count_ptr_derefs (tree *tp, int *walk_subtrees, void *data) -{ - struct walk_stmt_info *wi_p = (struct walk_stmt_info *) data; - struct count_ptr_d *count_p = (struct count_ptr_d *) wi_p->info; - - /* Do not walk inside ADDR_EXPR nodes. In the expression &ptr->fld, - pointer 'ptr' is *not* dereferenced, it is simply used to compute - the address of 'fld' as 'ptr + offsetof(fld)'. */ - if (TREE_CODE (*tp) == ADDR_EXPR) - { - *walk_subtrees = 0; - return NULL_TREE; - } - - if (TREE_CODE (*tp) == MEM_REF && TREE_OPERAND (*tp, 0) == count_p->ptr) - { - if (wi_p->is_lhs) - count_p->num_stores++; - else - count_p->num_loads++; - } - - return NULL_TREE; -} - - -/* Count the number of direct and indirect uses for pointer PTR in - statement STMT. The number of direct uses is stored in - *NUM_USES_P. Indirect references are counted separately depending - on whether they are store or load operations. The counts are - stored in *NUM_STORES_P and *NUM_LOADS_P. */ - -void -count_uses_and_derefs (tree ptr, gimple stmt, unsigned *num_uses_p, - unsigned *num_loads_p, unsigned *num_stores_p) -{ - ssa_op_iter i; - tree use; - - *num_uses_p = 0; - *num_loads_p = 0; - *num_stores_p = 0; - - /* Find out the total number of uses of PTR in STMT. */ - FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) - if (use == ptr) - (*num_uses_p)++; - - /* Now count the number of indirect references to PTR. This is - truly awful, but we don't have much choice. There are no parent - pointers inside INDIRECT_REFs, so an expression like - '*x_1 = foo (x_1, *x_1)' needs to be traversed piece by piece to - find all the indirect and direct uses of x_1 inside. The only - shortcut we can take is the fact that GIMPLE only allows - INDIRECT_REFs inside the expressions below. */ - if (is_gimple_assign (stmt) - || gimple_code (stmt) == GIMPLE_RETURN - || gimple_code (stmt) == GIMPLE_ASM - || is_gimple_call (stmt)) - { - struct walk_stmt_info wi; - struct count_ptr_d count; - - count.ptr = ptr; - count.num_stores = 0; - count.num_loads = 0; - - memset (&wi, 0, sizeof (wi)); - wi.info = &count; - walk_gimple_op (stmt, count_ptr_derefs, &wi); - - *num_stores_p = count.num_stores; - *num_loads_p = count.num_loads; - } - - gcc_assert (*num_uses_p >= *num_loads_p + *num_stores_p); -} - - /* Replace the LHS of STMT, an assignment, either a GIMPLE_ASSIGN or a GIMPLE_CALL, with NLHS, in preparation for modifying the RHS to an expression with a different value. diff --git a/gcc/tree-ssa.h b/gcc/tree-ssa.h index ab1c920ff83..89ea5c64c76 100644 --- a/gcc/tree-ssa.h +++ b/gcc/tree-ssa.h @@ -39,8 +39,6 @@ extern edge_var_map_vector *redirect_edge_var_map_vector (edge); extern void redirect_edge_var_map_destroy (void); extern edge ssa_redirect_edge (edge, basic_block); extern void flush_pending_stmts (edge); -extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *, - unsigned *); extern void gimple_replace_ssa_lhs (gimple, tree); extern tree target_for_debug_bind (tree); extern void insert_debug_temp_for_var_def (gimple_stmt_iterator *, tree); diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c index 03fb075193d..6c3f988b991 100644 --- a/gcc/tree-vrp.c +++ b/gcc/tree-vrp.c @@ -4402,57 +4402,6 @@ fp_predicate (gimple stmt) return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); } - -/* If OP can be inferred to be non-zero after STMT executes, return true. */ - -static bool -infer_nonnull_range (gimple stmt, tree op) -{ - /* We can only assume that a pointer dereference will yield - non-NULL if -fdelete-null-pointer-checks is enabled. */ - if (!flag_delete_null_pointer_checks - || !POINTER_TYPE_P (TREE_TYPE (op)) - || gimple_code (stmt) == GIMPLE_ASM) - return false; - - unsigned num_uses, num_loads, num_stores; - - count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores); - if (num_loads + num_stores > 0) - return true; - - if (is_gimple_call (stmt) && !gimple_call_internal_p (stmt)) - { - tree fntype = gimple_call_fntype (stmt); - tree attrs = TYPE_ATTRIBUTES (fntype); - for (; attrs; attrs = TREE_CHAIN (attrs)) - { - attrs = lookup_attribute ("nonnull", attrs); - - /* If "nonnull" wasn't specified, we know nothing about - the argument. */ - if (attrs == NULL_TREE) - return false; - - /* If "nonnull" applies to all the arguments, then ARG - is non-null. */ - if (TREE_VALUE (attrs) == NULL_TREE) - return true; - - /* Now see if op appears in the nonnull list. */ - for (tree t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) - { - int idx = tree_to_shwi (TREE_VALUE (t)) - 1; - tree arg = gimple_call_arg (stmt, idx); - if (op == arg) - return true; - } - } - } - - return false; -} - /* If the range of values taken by OP can be inferred after STMT executes, return the comparison code (COMP_CODE_P) and value (VAL_P) that describes the inferred range. Return true if a range could be @@ -6399,13 +6348,14 @@ all_imm_uses_in_stmt_or_feed_cond (tree var, gimple stmt, basic_block cond_bb) FOR_EACH_IMM_USE_FAST (use_p, iter, var) if (USE_STMT (use_p) != stmt) { - gimple use_stmt = USE_STMT (use_p); + gimple use_stmt = USE_STMT (use_p), use_stmt2; if (is_gimple_debug (use_stmt)) continue; while (is_gimple_assign (use_stmt) + && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME && single_imm_use (gimple_assign_lhs (use_stmt), - &use2_p, &use_stmt)) - ; + &use2_p, &use_stmt2)) + use_stmt = use_stmt2; if (gimple_code (use_stmt) != GIMPLE_COND || gimple_bb (use_stmt) != cond_bb) return false; diff --git a/gcc/tree.c b/gcc/tree.c index 133dbb16da9..562c0107c75 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -4825,7 +4825,7 @@ attribute_value_equal (const_tree attr1, const_tree attr2) return (simple_cst_list_equal (TREE_VALUE (attr1), TREE_VALUE (attr2)) == 1); - if (flag_openmp + if ((flag_openmp || flag_openmp_simd) && TREE_VALUE (attr1) && TREE_VALUE (attr2) && TREE_CODE (TREE_VALUE (attr1)) == OMP_CLAUSE && TREE_CODE (TREE_VALUE (attr2)) == OMP_CLAUSE) diff --git a/gcc/vec.c b/gcc/vec.c index f3c331507d5..78252e0d088 100644 --- a/gcc/vec.c +++ b/gcc/vec.c @@ -187,9 +187,7 @@ vec_prefix::calculate_allocation (vec_prefix *pfx, unsigned reserve, num = pfx->m_num; } else if (!reserve) - /* If there's no vector, and we've not requested anything, then we - will create a NULL vector. */ - return 0; + gcc_unreachable (); /* We must have run out of room. */ gcc_assert (alloc - num < reserve); diff --git a/gcc/vec.h b/gcc/vec.h index f97e022f24a..b1ebda44f5e 100644 --- a/gcc/vec.h +++ b/gcc/vec.h @@ -283,11 +283,7 @@ va_heap::reserve (vec<T, va_heap, vl_embed> *&v, unsigned reserve, bool exact { unsigned alloc = vec_prefix::calculate_allocation (v ? &v->m_vecpfx : 0, reserve, exact); - if (!alloc) - { - release (v); - return; - } + gcc_assert (alloc); if (GATHER_STATISTICS && v) v->m_vecpfx.release_overhead (); |