summaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorbviyer <bviyer@138bc75d-0d04-0410-961f-82ee72b054a4>2012-05-31 18:46:05 +0000
committerbviyer <bviyer@138bc75d-0d04-0410-961f-82ee72b054a4>2012-05-31 18:46:05 +0000
commit3e76c73d74408b80d7e33a9589021ab05ae0f94d (patch)
treeeec692b17e05c30eceb8a803d7f44094c82ca16c /gcc/config
parent1dbe1bb19c674745941ca05ed462363916ae8705 (diff)
parent9635c0c3aa804dd411dfdcaa3322bd6096dbaf0c (diff)
downloadgcc-3e76c73d74408b80d7e33a9589021ab05ae0f94d.tar.gz
Merged with trunk at revision 188063.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/cilkplus@188074 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/alpha/alpha.c1
-rw-r--r--gcc/config/alpha/ev6.md4
-rw-r--r--gcc/config/alpha/vms.h2
-rw-r--r--gcc/config/arm/README-interworking2
-rw-r--r--gcc/config/arm/arm.c23
-rw-r--r--gcc/config/arm/iterators.md2
-rw-r--r--gcc/config/arm/t-arm2
-rw-r--r--gcc/config/arm/vxworks.h2
-rw-r--r--gcc/config/avr/avr.c6
-rw-r--r--gcc/config/avr/avr.h10
-rw-r--r--gcc/config/avr/avr.md2
-rw-r--r--gcc/config/avr/builtins.def2
-rw-r--r--gcc/config/avr/elf.h2
-rw-r--r--gcc/config/avr/t-avr2
-rw-r--r--gcc/config/bfin/bfin.c1
-rw-r--r--gcc/config/c6x/c6x.c4
-rw-r--r--gcc/config/cr16/cr16.c2
-rw-r--r--gcc/config/cr16/cr16.md2
-rw-r--r--gcc/config/cris/cris.h13
-rw-r--r--gcc/config/cris/cris.md10
-rw-r--r--gcc/config/cris/cris.opt4
-rw-r--r--gcc/config/darwin.c2
-rw-r--r--gcc/config/darwin.h8
-rw-r--r--gcc/config/darwin.opt2
-rw-r--r--gcc/config/epiphany/epiphany.c1
-rw-r--r--gcc/config/frv/frv.c1
-rw-r--r--gcc/config/i386/i386-c.c2
-rw-r--r--gcc/config/i386/i386.c54
-rw-r--r--gcc/config/ia64/ia64.c8
-rw-r--r--gcc/config/m32r/m32r.c1
-rw-r--r--gcc/config/m68k/cf.md2
-rw-r--r--gcc/config/mep/mep.c5
-rw-r--r--gcc/config/microblaze/microblaze.c3
-rw-r--r--gcc/config/microblaze/microblaze.h2
-rw-r--r--gcc/config/mips/mips.c3
-rw-r--r--gcc/config/mmix/mmix.c1
-rw-r--r--gcc/config/mmix/mmix.md8
-rw-r--r--gcc/config/mn10300/mn10300.c4
-rw-r--r--gcc/config/mn10300/mn10300.md4
-rw-r--r--gcc/config/pa/pa.c28
-rw-r--r--gcc/config/picochip/picochip.c1
-rw-r--r--gcc/config/picochip/picochip.h2
-rw-r--r--gcc/config/rs6000/a2.md2
-rw-r--r--gcc/config/rs6000/rs6000.c240
-rw-r--r--gcc/config/rs6000/rs6000.md13
-rw-r--r--gcc/config/rs6000/t-linux6411
-rw-r--r--gcc/config/rs6000/t-rs60002
-rw-r--r--gcc/config/rs6000/vector.md2
-rw-r--r--gcc/config/rx/rx.md2
-rw-r--r--gcc/config/rx/rx.opt2
-rw-r--r--gcc/config/s390/2097.md4
-rw-r--r--gcc/config/s390/s390.c4
-rw-r--r--gcc/config/s390/s390.h2
-rw-r--r--gcc/config/score/score.c1
-rw-r--r--gcc/config/sh/predicates.md19
-rw-r--r--gcc/config/sh/sh.c20
-rw-r--r--gcc/config/sh/sh.h18
-rw-r--r--gcc/config/sh/sh.md6
-rw-r--r--gcc/config/sh/sh.opt6
-rw-r--r--gcc/config/sh/sync.md537
-rw-r--r--gcc/config/sparc/sparc.c8
-rw-r--r--gcc/config/sparc/sync.md2
-rw-r--r--gcc/config/spu/spu.c7
-rw-r--r--gcc/config/spu/spu.md2
-rw-r--r--gcc/config/spu/t-spu-elf2
-rw-r--r--gcc/config/tilegx/tilegx.c2
-rw-r--r--gcc/config/tilepro/tilepro.c2
-rw-r--r--gcc/config/v850/v850.c1
-rw-r--r--gcc/config/vms/vms.c4
-rw-r--r--gcc/config/vxworks-dummy.h2
-rw-r--r--gcc/config/vxworks.h2
71 files changed, 861 insertions, 304 deletions
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index 225c9ab3c54..6d15bf70cff 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -43,7 +43,6 @@ along with GCC; see the file COPYING3. If not see
#include "function.h"
#include "diagnostic-core.h"
#include "ggc.h"
-#include "integrate.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
diff --git a/gcc/config/alpha/ev6.md b/gcc/config/alpha/ev6.md
index adfe504bf8b..a16535a6450 100644
--- a/gcc/config/alpha/ev6.md
+++ b/gcc/config/alpha/ev6.md
@@ -147,11 +147,15 @@
(eq_attr "type" "fadd,fcpys,fbr"))
"ev6_fa")
+(define_bypass 6 "ev6_fmul,ev6_fadd" "ev6_fst,ev6_ftoi")
+
(define_insn_reservation "ev6_fcmov" 8
(and (eq_attr "tune" "ev6")
(eq_attr "type" "fcmov"))
"ev6_fa,nothing*3,ev6_fa")
+(define_bypass 10 "ev6_fcmov" "ev6_fst,ev6_ftoi")
+
(define_insn_reservation "ev6_fdivsf" 12
(and (eq_attr "tune" "ev6")
(and (eq_attr "type" "fdiv")
diff --git a/gcc/config/alpha/vms.h b/gcc/config/alpha/vms.h
index 6f90122fef3..03d9b9b229a 100644
--- a/gcc/config/alpha/vms.h
+++ b/gcc/config/alpha/vms.h
@@ -153,7 +153,7 @@ typedef struct {int num_args; enum avms_arg_type atypes[6];} avms_arg_info;
#define DEFAULT_PCC_STRUCT_RETURN 0
-/* Eventhough pointers are 64bits, only 32bit ever remain significant in code
+/* Even though pointers are 64bits, only 32bit ever remain significant in code
addresses. */
#define MASK_RETURN_ADDR \
(flag_vms_pointer_size == VMS_POINTER_SIZE_NONE \
diff --git a/gcc/config/arm/README-interworking b/gcc/config/arm/README-interworking
index 7f2eda83b49..cfa7f66e294 100644
--- a/gcc/config/arm/README-interworking
+++ b/gcc/config/arm/README-interworking
@@ -227,7 +227,7 @@ considerations when building programs and DLLs:
Switching between the ARM and Thumb instruction sets is accomplished
via the BX instruction which takes as an argument a register name.
-Control is transfered to the address held in this register (with the
+Control is transferred to the address held in this register (with the
bottom bit masked out), and if the bottom bit is set, then Thumb
instruction processing is enabled, otherwise ARM instruction
processing is enabled.
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 7a9819705e5..8a862275b5b 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -46,7 +46,6 @@
#include "ggc.h"
#include "except.h"
#include "c-family/c-pragma.h" /* ??? */
-#include "integrate.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
@@ -2588,7 +2587,7 @@ optimal_immediate_sequence (enum rtx_code code, unsigned HOST_WIDE_INT val,
int insns1, insns2;
struct four_ints tmp_sequence;
- /* If we aren't targetting ARM, the best place to start is always at
+ /* If we aren't targeting ARM, the best place to start is always at
the bottom, otherwise look more closely. */
if (TARGET_ARM)
{
@@ -8473,7 +8472,7 @@ cortex_a9_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
&& reg_overlap_mentioned_p (SET_DEST (PATTERN (insn)),
SET_DEST (PATTERN (dep))))
{
- /* FMACS is a special case where the dependant
+ /* FMACS is a special case where the dependent
instruction can be issued 3 cycles before
the normal latency in case of an output
dependency. */
@@ -9459,7 +9458,7 @@ neon_expand_vector_init (rtx target, rtx vals)
/* Construct the vector in memory one field at a time
and load the whole vector. */
- mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
for (i = 0; i < n_elts; i++)
emit_move_insn (adjust_address_nv (mem, inner_mode,
i * GET_MODE_SIZE (inner_mode)),
@@ -16187,7 +16186,7 @@ arm_output_epilogue (rtx sibling)
now we have to use add/sub in those cases. However, the value
of that would be marginal, as both mov and add/sub are 32-bit
in ARM mode, and it would require extra conditionals
- in arm_expand_prologue to distingish ARM-apcs-frame case
+ in arm_expand_prologue to distinguish ARM-apcs-frame case
(where frame pointer is required to point at first register)
and ARM-non-apcs-frame. Therefore, such change is postponed
until real need arise. */
@@ -25638,10 +25637,18 @@ arm_evpc_neon_vrev (struct expand_vec_perm_d *d)
return false;
}
- for (i = 0; i < nelt; i += diff)
+ for (i = 0; i < nelt ; i += diff + 1)
for (j = 0; j <= diff; j += 1)
- if (d->perm[i + j] != i + diff - j)
- return false;
+ {
+ /* This is guaranteed to be true as the value of diff
+ is 7, 3, 1 and we should have enough elements in the
+ queue to generate this. Getting a vector mask with a
+ value of diff other than these values implies that
+ something is wrong by the time we get here. */
+ gcc_assert (i + j < nelt);
+ if (d->perm[i + j] != i + diff - j)
+ return false;
+ }
/* Success! */
if (d->testing_p)
diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index 15672647e51..bb0d44e75e0 100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -36,7 +36,7 @@
;; A list of integer modes that are less than a word
(define_mode_iterator NARROW [QI HI])
-;; A list of all the integer modes upto 64bit
+;; A list of all the integer modes up to 64bit
(define_mode_iterator QHSD [QI HI SI DI])
;; A list of the 32bit and 64bit integer modes
diff --git a/gcc/config/arm/t-arm b/gcc/config/arm/t-arm
index 1128d1904b0..2bc97a65dac 100644
--- a/gcc/config/arm/t-arm
+++ b/gcc/config/arm/t-arm
@@ -78,7 +78,7 @@ arm.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
insn-config.h conditions.h output.h \
$(INSN_ATTR_H) $(FLAGS_H) reload.h $(FUNCTION_H) \
$(EXPR_H) $(OPTABS_H) $(RECOG_H) $(CGRAPH_H) \
- $(GGC_H) except.h $(C_PRAGMA_H) $(INTEGRATE_H) $(TM_P_H) \
+ $(GGC_H) except.h $(C_PRAGMA_H) $(TM_P_H) \
$(TARGET_H) $(TARGET_DEF_H) debug.h langhooks.h $(DF_H) \
intl.h libfuncs.h $(PARAMS_H) $(OPTS_H) $(srcdir)/config/arm/arm-cores.def \
$(srcdir)/config/arm/arm-arches.def $(srcdir)/config/arm/arm-fpus.def
diff --git a/gcc/config/arm/vxworks.h b/gcc/config/arm/vxworks.h
index 887691326e5..391c166336b 100644
--- a/gcc/config/arm/vxworks.h
+++ b/gcc/config/arm/vxworks.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GCC,
- for ARM with targetting the VXWorks run time environment.
+ for ARM with targeting the VXWorks run time environment.
Copyright (C) 1999, 2000, 2003, 2004, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index 38afc7abf93..208f650c9af 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -6840,7 +6840,7 @@ avr_progmem_p (tree decl, tree attributes)
/* Scan type TYP for pointer references to address space ASn.
Return ADDR_SPACE_GENERIC (i.e. 0) if all pointers targeting
the AS are also declared to be CONST.
- Otherwise, return the respective addres space, i.e. a value != 0. */
+ Otherwise, return the respective address space, i.e. a value != 0. */
static addr_space_t
avr_nonconst_pointer_addrspace (tree typ)
@@ -6884,7 +6884,7 @@ avr_nonconst_pointer_addrspace (tree typ)
}
-/* Sanity check NODE so that all pointers targeting non-generic addres spaces
+/* Sanity check NODE so that all pointers targeting non-generic address spaces
go along with CONST qualifier. Writing to these address spaces should
be detected and complained about as early as possible. */
@@ -9727,7 +9727,7 @@ avr_emit_movmemhi (rtx *xop)
/* FIXME: Register allocator does a bad job and might spill address
register(s) inside the loop leading to additional move instruction
to/from stack which could clobber tmp_reg. Thus, do *not* emit
- load and store as seperate insns. Instead, we perform the copy
+ load and store as separate insns. Instead, we perform the copy
by means of one monolithic insn. */
gcc_assert (TMP_REGNO == LPM_REGNO);
diff --git a/gcc/config/avr/avr.h b/gcc/config/avr/avr.h
index dfbd071d192..54c127469e1 100644
--- a/gcc/config/avr/avr.h
+++ b/gcc/config/avr/avr.h
@@ -557,10 +557,10 @@ typedef struct avr_args {
#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
avr_output_addr_vec_elt(STREAM, VALUE)
-#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
- do { \
- if ((POWER) > 1) \
- fprintf (STREAM, "\t.p2align\t%d\n", POWER); \
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do { \
+ if ((POWER) > 0) \
+ fprintf (STREAM, "\t.p2align\t%d\n", POWER); \
} while (0)
#define CASE_VECTOR_MODE HImode
@@ -713,7 +713,7 @@ struct GTY(()) machine_function
int attributes_checked_p;
};
-/* AVR does not round pushes, but the existance of this macro is
+/* AVR does not round pushes, but the existence of this macro is
required in order for pushes to be generated. */
#define PUSH_ROUNDING(X) (X)
diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md
index 3fe06da661c..2b1a83c607a 100644
--- a/gcc/config/avr/avr.md
+++ b/gcc/config/avr/avr.md
@@ -29,7 +29,7 @@
;; k Reverse branch condition.
;;..m..Constant Direct Data memory address.
;; i Print the SFR address quivalent of a CONST_INT or a CONST_INT
-;; RAM address. The resulting addres is suitable to be used in IN/OUT.
+;; RAM address. The resulting address is suitable to be used in IN/OUT.
;; o Displacement for (mem (plus (reg) (const_int))) operands.
;; p POST_INC or PRE_DEC address as a pointer (X, Y, Z)
;; r POST_INC or PRE_DEC address as a register (r26, r28, r30)
diff --git a/gcc/config/avr/builtins.def b/gcc/config/avr/builtins.def
index 24537052eb0..4b04ff1b367 100644
--- a/gcc/config/avr/builtins.def
+++ b/gcc/config/avr/builtins.def
@@ -38,7 +38,7 @@ DEF_BUILTIN ("__builtin_avr_cli", 0, AVR_BUILTIN_CLI, void_ftype_void, CODE_FO
DEF_BUILTIN ("__builtin_avr_wdr", 0, AVR_BUILTIN_WDR, void_ftype_void, CODE_FOR_wdr)
DEF_BUILTIN ("__builtin_avr_sleep", 0, AVR_BUILTIN_SLEEP, void_ftype_void, CODE_FOR_sleep)
-/* Mapped to respective instruction but might alse be folded away
+/* Mapped to respective instruction but might also be folded away
or emit as libgcc call if ISA does not provide the instruction. */
DEF_BUILTIN ("__builtin_avr_swap", 1, AVR_BUILTIN_SWAP, uchar_ftype_uchar, CODE_FOR_rotlqi3_4)
DEF_BUILTIN ("__builtin_avr_fmul", 2, AVR_BUILTIN_FMUL, uint_ftype_uchar_uchar, CODE_FOR_fmul)
diff --git a/gcc/config/avr/elf.h b/gcc/config/avr/elf.h
index 6d79dc38cb8..82a0969d3b0 100644
--- a/gcc/config/avr/elf.h
+++ b/gcc/config/avr/elf.h
@@ -35,7 +35,7 @@
/* Output alignment 2**1 for jump tables. */
#undef ASM_OUTPUT_BEFORE_CASE_LABEL
#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
- fprintf (FILE, "\t.p2align\t1\n");
+ ASM_OUTPUT_ALIGN (FILE, 1);
/* Be conservative in crtstuff.c. */
#undef INIT_SECTION_ASM_OP
diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr
index 24cdd92590a..20dc63a18f3 100644
--- a/gcc/config/avr/t-avr
+++ b/gcc/config/avr/t-avr
@@ -50,7 +50,7 @@ gen-avr-mmcu-texi$(build_exeext): $(srcdir)/config/avr/gen-avr-mmcu-texi.c \
avr-devices.o: s-avr-mmcu-texi
s-avr-mmcu-texi: gen-avr-mmcu-texi$(build_exeext)
- $(RUN_GEN) $< | sed -e 's:\r::g' > avr-mmcu.texi
+ $(RUN_GEN) ./$< | sed -e 's:\r::g' > avr-mmcu.texi
@if cmp -s $(srcdir)/doc/avr-mmcu.texi avr-mmcu.texi; then \
$(STAMP) $@; \
else \
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index 1342c568fd0..3cef847c952 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -44,7 +44,6 @@
#include "recog.h"
#include "optabs.h"
#include "ggc.h"
-#include "integrate.h"
#include "cgraph.h"
#include "langhooks.h"
#include "bfin-protos.h"
diff --git a/gcc/config/c6x/c6x.c b/gcc/config/c6x/c6x.c
index 8a368892bb2..978d0cba82b 100644
--- a/gcc/config/c6x/c6x.c
+++ b/gcc/config/c6x/c6x.c
@@ -40,7 +40,7 @@
#include "tm-preds.h"
#include "tm-constrs.h"
#include "df.h"
-#include "integrate.h"
+#include "function.h"
#include "diagnostic-core.h"
#include "cgraph.h"
#include "cfglayout.h"
@@ -3630,7 +3630,7 @@ typedef struct c6x_sched_context
/* The current scheduling state. */
static struct c6x_sched_context ss;
-/* The following variable value is DFA state before issueing the first insn
+/* The following variable value is DFA state before issuing the first insn
in the current clock cycle. This is used in c6x_variable_issue for
comparison with the state after issuing the last insn in a cycle. */
static state_t prev_cycle_state;
diff --git a/gcc/config/cr16/cr16.c b/gcc/config/cr16/cr16.c
index 852c808f571..df272600c8b 100644
--- a/gcc/config/cr16/cr16.c
+++ b/gcc/config/cr16/cr16.c
@@ -61,7 +61,7 @@
#define FUNC_IS_NORETURN_P(decl) (TREE_THIS_VOLATILE (decl))
/* Predicate that holds when we need to save registers even for 'noreturn'
- functions, to accomodate for unwinding. */
+ functions, to accommodate for unwinding. */
#define MUST_SAVE_REGS_P() \
(flag_unwind_tables || (flag_exceptions && !UI_SJLJ))
diff --git a/gcc/config/cr16/cr16.md b/gcc/config/cr16/cr16.md
index 5e4530c32ce..12072b46f0c 100644
--- a/gcc/config/cr16/cr16.md
+++ b/gcc/config/cr16/cr16.md
@@ -144,7 +144,7 @@
[(set_attr "length" "2")]
)
-;; Arithmetic Instuction Patterns
+;; Arithmetic Instruction Patterns
;; Addition-Subtraction "adddi3/subdi3" insns.
(define_insn "<plusminus_insn>di3"
diff --git a/gcc/config/cris/cris.h b/gcc/config/cris/cris.h
index ff0be0041f0..78fbe684078 100644
--- a/gcc/config/cris/cris.h
+++ b/gcc/config/cris/cris.h
@@ -156,11 +156,13 @@ extern int cris_cpu_version;
" -D__CRIS_arch_tune=" CRIS_DEFAULT_TUNE "}}}}}"\
CRIS_ARCH_CPP_DEFAULT
-/* Override previous definitions (linux.h). */
+/* Override previous definitions (../linux.h). */
#undef CC1_SPEC
#define CC1_SPEC \
"%{metrax4:-march=v3}\
%{metrax100:-march=v8}\
+ %{march=*:-march=%*}\
+ %{mcpu=*:-mcpu=%*}\
%(cc1_subtarget)"
/* For the cris-*-elf subtarget. */
@@ -190,7 +192,9 @@ extern int cris_cpu_version;
MAYBE_AS_NO_MUL_BUG_ABORT \
"%(asm_subtarget)\
%{march=*:%{mcpu=*:%edo not specify both -march=... and -mcpu=...}}\
- %{march=v32:--march=v32} %{mcpu=v32:--march=v32}"
+ %{march=v0|mcpu=v0|march=v3|mcpu=v3|march=v8|mcpu=v8:--march=v0_v10}\
+ %{march=v10|mcpu=v10:--march=v10}\
+ %{march=v32|mcpu=v32:--march=v32}"
/* For the cris-*-elf subtarget. */
#define CRIS_ASM_SUBTARGET_SPEC \
@@ -302,9 +306,14 @@ extern int cris_cpu_version;
#define TARGET_HAS_MUL_INSNS (cris_cpu_version >= CRIS_CPU_NG)
#define TARGET_HAS_LZ (cris_cpu_version >= CRIS_CPU_ETRAX4)
+#define TARGET_HAS_BREAK (cris_cpu_version >= CRIS_CPU_ETRAX4)
#define TARGET_HAS_SWAP (cris_cpu_version >= CRIS_CPU_SVINTO)
#define TARGET_V32 (cris_cpu_version >= CRIS_CPU_V32)
+/* The "break" instruction was introduced with ETRAX 4. */
+#define TARGET_TRAP_USING_BREAK8 \
+ (cris_trap_using_break8 == 2 ? TARGET_HAS_BREAK : cris_trap_using_break8)
+
/* Node: Storage Layout */
#define BITS_BIG_ENDIAN 0
diff --git a/gcc/config/cris/cris.md b/gcc/config/cris/cris.md
index b4ead76f38b..7d691f5a0b5 100644
--- a/gcc/config/cris/cris.md
+++ b/gcc/config/cris/cris.md
@@ -1530,7 +1530,7 @@
"movs<m> %1,%0"
[(set_attr "slottable" "yes,yes,no")])
-;; To do a byte->word extension, extend to dword, exept that the top half
+;; To do a byte->word extension, extend to dword, except that the top half
;; of the register will be clobbered. FIXME: Perhaps this is not needed.
(define_insn "extendqihi2"
@@ -3825,6 +3825,14 @@
""
"nop"
[(set_attr "cc" "none")])
+
+;; Same as the gdb trap breakpoint, will cause a SIGTRAP for
+;; cris-linux* and crisv32-linux*, as intended. Will work in
+;; freestanding environments with sufficient framework.
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 8))]
+ "TARGET_TRAP_USING_BREAK8"
+ "break 8")
;; We need to stop accesses to the stack after the memory is
;; deallocated. Unfortunately, reorg doesn't look at naked clobbers,
diff --git a/gcc/config/cris/cris.opt b/gcc/config/cris/cris.opt
index dc4ab57f010..3c2e338ffee 100644
--- a/gcc/config/cris/cris.opt
+++ b/gcc/config/cris/cris.opt
@@ -175,6 +175,10 @@ Target Report RejectNegative Joined Var(cris_max_stackframe_str)
max-stackframe=
Target Report RejectNegative Joined Undocumented Var(cris_max_stackframe_str)
+mtrap-using-break8
+Target Report Var(cris_trap_using_break8) Init(2)
+Emit traps as \"break 8\", default for CRIS v3 and up. If disabled, calls to abort() are used.
+
; TARGET_SVINTO: Currently this just affects alignment. FIXME:
; Redundant with TARGET_ALIGN_BY_32, or put machine stuff here?
; This and the others below could just as well be variables and
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index 10cbdc39a3f..6805cf1264e 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -3461,7 +3461,7 @@ darwin_function_section (tree decl, enum node_frequency freq,
/* Startup code should go to startup subsection unless it is
unlikely executed (this happens especially with function splitting
- where we can split away unnecesary parts of static constructors). */
+ where we can split away unnecessary parts of static constructors). */
if (startup && freq != NODE_FREQUENCY_UNLIKELY_EXECUTED)
return (weak)
? darwin_sections[text_startup_coal_section]
diff --git a/gcc/config/darwin.h b/gcc/config/darwin.h
index 3e6efd79061..5855778109d 100644
--- a/gcc/config/darwin.h
+++ b/gcc/config/darwin.h
@@ -356,7 +356,9 @@ extern GTY(()) int darwin_ms_struct;
%{!Zbundle:%{pg:%{static:-lgcrt0.o} \
%{!static:%{object:-lgcrt0.o} \
%{!object:%{preload:-lgcrt0.o} \
- %{!preload:-lgcrt1.o %(darwin_crt2)}}}} \
+ %{!preload:-lgcrt1.o \
+ %:version-compare(>= 10.8 mmacosx-version-min= -no_new_main) \
+ %(darwin_crt2)}}}} \
%{!pg:%{static:-lcrt0.o} \
%{!static:%{object:-lcrt0.o} \
%{!object:%{preload:-lcrt0.o} \
@@ -379,7 +381,7 @@ extern GTY(()) int darwin_ms_struct;
#define DARWIN_CRT1_SPEC \
"%:version-compare(!> 10.5 mmacosx-version-min= -lcrt1.o) \
%:version-compare(>< 10.5 10.6 mmacosx-version-min= -lcrt1.10.5.o) \
- %:version-compare(>= 10.6 mmacosx-version-min= -lcrt1.10.6.o) \
+ %:version-compare(>< 10.6 10.8 mmacosx-version-min= -lcrt1.10.6.o) \
%{fgnu-tm: -lcrttms.o}"
/* Default Darwin ASM_SPEC, very simple. */
@@ -414,6 +416,8 @@ extern GTY(()) int darwin_ms_struct;
#define TARGET_WANT_DEBUG_PUB_SECTIONS true
+#define TARGET_FORCE_AT_COMP_DIR true
+
/* When generating stabs debugging, use N_BINCL entries. */
#define DBX_USE_BINCL
diff --git a/gcc/config/darwin.opt b/gcc/config/darwin.opt
index 3fcd35f090d..23419f9b0b1 100644
--- a/gcc/config/darwin.opt
+++ b/gcc/config/darwin.opt
@@ -224,7 +224,7 @@ Generate code suitable for fast turn around debugging
; and cc1plus don't crash if no -mmacosx-version-min is passed. The
; driver will always pass a -mmacosx-version-min, so in normal use the
; Init is never used. Useful for setting the OS on which people
-; ususally debug.
+; usually debug.
mmacosx-version-min=
Target Joined Report Var(darwin_macosx_version_min) Init("10.6")
The earliest MacOS X version on which this program will run
diff --git a/gcc/config/epiphany/epiphany.c b/gcc/config/epiphany/epiphany.c
index f1a8db76353..aca296f0152 100644
--- a/gcc/config/epiphany/epiphany.c
+++ b/gcc/config/epiphany/epiphany.c
@@ -46,7 +46,6 @@ along with GCC; see the file COPYING3. If not see
#include "ggc.h"
#include "tm-constrs.h"
#include "tree-pass.h"
-#include "integrate.h"
/* Which cpu we're compiling for. */
int epiphany_cpu_type;
diff --git a/gcc/config/frv/frv.c b/gcc/config/frv/frv.c
index ace9e437118..d7a111acbbb 100644
--- a/gcc/config/frv/frv.c
+++ b/gcc/config/frv/frv.c
@@ -46,7 +46,6 @@ along with GCC; see the file COPYING3. If not see
#include "target.h"
#include "target-def.h"
#include "targhooks.h"
-#include "integrate.h"
#include "langhooks.h"
#include "df.h"
diff --git a/gcc/config/i386/i386-c.c b/gcc/config/i386/i386-c.c
index 23427bf034f..0f78d8928ed 100644
--- a/gcc/config/i386/i386-c.c
+++ b/gcc/config/i386/i386-c.c
@@ -48,7 +48,7 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
void (*def_or_undef) (cpp_reader *,
const char *))
{
- /* For some of the k6/pentium varients there weren't seperate ISA bits to
+ /* For some of the k6/pentium varients there weren't separate ISA bits to
identify which tune/arch flag was passed, so figure it out here. */
size_t arch_len = strlen (ix86_arch_string);
size_t tune_len = strlen (ix86_tune_string);
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 313b4ab2ebd..8de299216c5 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -2408,7 +2408,6 @@ struct ix86_frame
int va_arg_size;
int red_zone_size;
int outgoing_arguments_size;
- HOST_WIDE_INT frame;
/* The offsets relative to ARG_POINTER. */
HOST_WIDE_INT frame_pointer_offset;
@@ -8963,9 +8962,9 @@ ix86_builtin_setjmp_frame_value (void)
static void
ix86_compute_frame_layout (struct ix86_frame *frame)
{
- unsigned int stack_alignment_needed;
+ unsigned HOST_WIDE_INT stack_alignment_needed;
HOST_WIDE_INT offset;
- unsigned int preferred_alignment;
+ unsigned HOST_WIDE_INT preferred_alignment;
HOST_WIDE_INT size = get_frame_size ();
HOST_WIDE_INT to_allocate;
@@ -9198,7 +9197,7 @@ choose_baseaddr (HOST_WIDE_INT cfa_offset)
if (m->use_fast_prologue_epilogue)
{
/* Choose the base register most likely to allow the most scheduling
- opportunities. Generally FP is valid througout the function,
+ opportunities. Generally FP is valid throughout the function,
while DRAP must be reloaded within the epilogue. But choose either
over the SP due to increased encoding size. */
@@ -12823,13 +12822,13 @@ legitimize_tls_address (rtx x, enum tls_model model, bool for_mov)
case TLS_MODEL_INITIAL_EXEC:
if (TARGET_64BIT)
{
- if (TARGET_SUN_TLS)
+ if (TARGET_SUN_TLS && !TARGET_X32)
{
/* The Sun linker took the AMD64 TLS spec literally
and can only handle %rax as destination of the
initial executable code sequence. */
- dest = gen_reg_rtx (Pmode);
+ dest = gen_reg_rtx (DImode);
emit_insn (gen_tls_initial_exec_64_sun (dest, x));
return dest;
}
@@ -13944,8 +13943,8 @@ get_some_local_dynamic_name (void)
C -- print opcode suffix for set/cmov insn.
c -- like C, but print reversed condition
F,f -- likewise, but for floating-point.
- O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, print the opcode suffix for
- the size of the current operand, otherwise nothing.
+ O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
+ otherwise nothing
R -- print the prefix for register names.
z -- print the opcode suffix for the size of the current operand.
Z -- likewise, with special suffixes for x87 instructions.
@@ -14074,6 +14073,8 @@ ix86_print_operand (FILE *file, rtx x, int code)
("invalid operand size for operand code 'O'");
return;
}
+
+ putc ('.', file);
#endif
return;
@@ -14333,20 +14334,21 @@ ix86_print_operand (FILE *file, rtx x, int code)
}
return;
- case 'C':
- case 'c':
case 'F':
case 'f':
+#ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
+ if (ASSEMBLER_DIALECT == ASM_ATT)
+ putc ('.', file);
+#endif
+
+ case 'C':
+ case 'c':
if (!COMPARISON_P (x))
{
output_operand_lossage ("operand is not a condition code, "
"invalid operand code '%c'", code);
return;
}
-#ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
- if (ASSEMBLER_DIALECT == ASM_ATT)
- putc ('.', file);
-#endif
put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)),
code == 'c' || code == 'f',
code == 'F' || code == 'f',
@@ -19934,7 +19936,7 @@ ix86_expand_vec_perm (rtx operands[])
t1 = gen_reg_rtx (V8SImode);
t2 = gen_reg_rtx (V8SImode);
emit_insn (gen_avx2_permvarv8si (t1, op0, mask));
- emit_insn (gen_avx2_permvarv8si (t2, op0, mask));
+ emit_insn (gen_avx2_permvarv8si (t2, op1, mask));
goto merge_two;
}
return;
@@ -19967,10 +19969,10 @@ ix86_expand_vec_perm (rtx operands[])
case V4SFmode:
t1 = gen_reg_rtx (V8SFmode);
- t2 = gen_reg_rtx (V8SFmode);
- mask = gen_lowpart (V4SFmode, mask);
+ t2 = gen_reg_rtx (V8SImode);
+ mask = gen_lowpart (V4SImode, mask);
emit_insn (gen_avx_vec_concatv8sf (t1, op0, op1));
- emit_insn (gen_avx_vec_concatv8sf (t2, mask, mask));
+ emit_insn (gen_avx_vec_concatv8si (t2, mask, mask));
emit_insn (gen_avx2_permvarv8sf (t1, t1, t2));
emit_insn (gen_avx_vextractf128v8sf (target, t1, const0_rtx));
return;
@@ -33122,7 +33124,7 @@ ix86_count_insn (basic_block bb)
return min_prev_count;
}
-/* Pad short funtion to 4 instructions. */
+/* Pad short function to 4 instructions. */
static void
ix86_pad_short_function (void)
@@ -34420,7 +34422,7 @@ half:
}
else
{
- rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
+ rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
emit_move_insn (mem, target);
@@ -34637,7 +34639,7 @@ ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
}
else
{
- rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
+ rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
emit_move_insn (mem, vec);
@@ -36487,12 +36489,6 @@ expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
gen_rtvec_v (GET_MODE_NUNITS (vmode), rperm));
vperm = force_reg (vmode, vperm);
- if (vmode == V8SImode && d->vmode == V8SFmode)
- {
- vmode = V8SFmode;
- vperm = gen_lowpart (vmode, vperm);
- }
-
target = gen_lowpart (vmode, d->target);
op0 = gen_lowpart (vmode, d->op0);
if (d->one_operand_p)
@@ -36925,7 +36921,7 @@ expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
{
if (d->perm[0] / nelt2 == nonzero_halves[1])
{
- /* Attempt to increase the likelyhood that dfinal
+ /* Attempt to increase the likelihood that dfinal
shuffle will be intra-lane. */
char tmph = nonzero_halves[0];
nonzero_halves[0] = nonzero_halves[1];
@@ -39001,7 +38997,7 @@ fits_dispatch_window (rtx insn)
/* Make disp_cmp and disp_jcc get scheduled at the latest. These
instructions should be given the lowest priority in the
scheduling process in Haifa scheduler to make sure they will be
- scheduled in the same dispatch window as the refrence to them. */
+ scheduled in the same dispatch window as the reference to them. */
if (group == disp_jcc || group == disp_cmp)
return false;
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 8fb5b40da73..e49ee2c74da 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -1585,13 +1585,13 @@ spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
&& GET_MODE (SUBREG_REG (in)) == TImode
&& GET_CODE (SUBREG_REG (in)) == REG)
{
- rtx memt = assign_stack_temp (TImode, 16, 0);
+ rtx memt = assign_stack_temp (TImode, 16);
emit_move_insn (memt, SUBREG_REG (in));
return adjust_address (memt, mode, 0);
}
else if (force && GET_CODE (in) == REG)
{
- rtx memx = assign_stack_temp (mode, 16, 0);
+ rtx memx = assign_stack_temp (mode, 16);
emit_move_insn (memx, in);
return memx;
}
@@ -1716,7 +1716,7 @@ ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
memt = adjust_address (in, TImode, 0);
else
{
- memt = assign_stack_temp (TImode, 16, 0);
+ memt = assign_stack_temp (TImode, 16);
memx = adjust_address (memt, mode, 0);
emit_move_insn (memx, in);
}
@@ -3454,7 +3454,7 @@ output_probe_stack_range (rtx reg1, rtx reg2)
Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
so that the debug info generation code can handle them properly.
- The register save area is layed out like so:
+ The register save area is laid out like so:
cfa+16
[ varargs spill area ]
[ fr register spill area ]
diff --git a/gcc/config/m32r/m32r.c b/gcc/config/m32r/m32r.c
index b27a3a5bb89..a2ae35ae75e 100644
--- a/gcc/config/m32r/m32r.c
+++ b/gcc/config/m32r/m32r.c
@@ -36,7 +36,6 @@
#include "recog.h"
#include "diagnostic-core.h"
#include "ggc.h"
-#include "integrate.h"
#include "df.h"
#include "tm_p.h"
#include "target.h"
diff --git a/gcc/config/m68k/cf.md b/gcc/config/m68k/cf.md
index d6f1e92c3c9..96519dc9e4d 100644
--- a/gcc/config/m68k/cf.md
+++ b/gcc/config/m68k/cf.md
@@ -52,7 +52,7 @@
(define_cpu_unit "cf_dsoc,cf_agex" "cfv123_oep")
-;; A memory unit that is reffered to as 'certain hardware resources' in
+;; A memory unit that is referred to as 'certain hardware resources' in
;; ColdFire reference manuals. This unit remains occupied for two cycles
;; after last dsoc cycle of a store - hence there is a 2 cycle delay between
;; two consecutive stores.
diff --git a/gcc/config/mep/mep.c b/gcc/config/mep/mep.c
index edfff549e2a..8e6cc4d1a8c 100644
--- a/gcc/config/mep/mep.c
+++ b/gcc/config/mep/mep.c
@@ -45,7 +45,6 @@ along with GCC; see the file COPYING3. If not see
#include "tm_p.h"
#include "ggc.h"
#include "diagnostic-core.h"
-#include "integrate.h"
#include "target.h"
#include "target-def.h"
#include "langhooks.h"
@@ -3869,7 +3868,7 @@ static int prev_opcode = 0;
/* This isn't as optimal as it could be, because we don't know what
control register the STC opcode is storing in. We only need to add
- the nop if it's the relevent register, but we add it for irrelevent
+ the nop if it's the relevant register, but we add it for irrelevant
registers also. */
void
@@ -6993,7 +6992,7 @@ core_insn_p (rtx insn)
}
/* Mark coprocessor instructions that can be bundled together with
- the immediately preceeding core instruction. This is later used
+ the immediately preceding core instruction. This is later used
to emit the "+" that tells the assembler to create a VLIW insn.
For unbundled insns, the assembler will automatically add coprocessor
diff --git a/gcc/config/microblaze/microblaze.c b/gcc/config/microblaze/microblaze.c
index b170606bc75..8d08bc282ff 100644
--- a/gcc/config/microblaze/microblaze.c
+++ b/gcc/config/microblaze/microblaze.c
@@ -31,7 +31,6 @@
#include "conditions.h"
#include "insn-flags.h"
#include "insn-attr.h"
-#include "integrate.h"
#include "recog.h"
#include "tree.h"
#include "function.h"
@@ -190,7 +189,7 @@ enum reg_class microblaze_regno_to_class[] =
/* MicroBlaze specific machine attributes.
interrupt_handler - Interrupt handler attribute to add interrupt prologue
and epilogue and use appropriate interrupt return.
- save_volatiles - Similiar to interrupt handler, but use normal return. */
+ save_volatiles - Similar to interrupt handler, but use normal return. */
int interrupt_handler;
int save_volatiles;
diff --git a/gcc/config/microblaze/microblaze.h b/gcc/config/microblaze/microblaze.h
index 92f0f60f1ff..d17d8948335 100644
--- a/gcc/config/microblaze/microblaze.h
+++ b/gcc/config/microblaze/microblaze.h
@@ -546,7 +546,7 @@ typedef struct microblaze_args
#define FUNCTION_MODE SImode
-/* Mode should alwasy be SImode */
+/* Mode should always be SImode */
#define REGISTER_MOVE_COST(MODE, FROM, TO) \
( GR_REG_CLASS_P (FROM) && GR_REG_CLASS_P (TO) ? 2 \
: (FROM) == ST_REGS && GR_REG_CLASS_P (TO) ? 4 \
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 2e6c3001178..122bc98efa0 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -50,7 +50,6 @@ along with GCC; see the file COPYING3. If not see
#include "debug.h"
#include "target.h"
#include "target-def.h"
-#include "integrate.h"
#include "langhooks.h"
#include "cfglayout.h"
#include "sched-int.h"
@@ -17201,7 +17200,7 @@ static void
mips_expand_vi_general (enum machine_mode vmode, enum machine_mode imode,
unsigned nelt, unsigned nvar, rtx target, rtx vals)
{
- rtx mem = assign_stack_temp (vmode, GET_MODE_SIZE (vmode), 0);
+ rtx mem = assign_stack_temp (vmode, GET_MODE_SIZE (vmode));
unsigned int i, isize = GET_MODE_SIZE (imode);
if (nvar < nelt)
diff --git a/gcc/config/mmix/mmix.c b/gcc/config/mmix/mmix.c
index 8f801e6c7ca..3a99cb626af 100644
--- a/gcc/config/mmix/mmix.c
+++ b/gcc/config/mmix/mmix.c
@@ -41,7 +41,6 @@ along with GCC; see the file COPYING3. If not see
#include "dwarf2.h"
#include "debug.h"
#include "tm_p.h"
-#include "integrate.h"
#include "target.h"
#include "target-def.h"
#include "df.h"
diff --git a/gcc/config/mmix/mmix.md b/gcc/config/mmix/mmix.md
index dbd4c0f778d..1cd397a8a14 100644
--- a/gcc/config/mmix/mmix.md
+++ b/gcc/config/mmix/mmix.md
@@ -529,7 +529,7 @@ DIVU %1,%1,%2\;GET %0,:rR\;NEGU %2,0,%0\;CSNN %0,$255,%2")
better way. */
stack_slot
= validize_mem (assign_stack_temp (SFmode,
- GET_MODE_SIZE (SFmode), 0));
+ GET_MODE_SIZE (SFmode)));
emit_insn (gen_floatdisf2 (stack_slot, operands[1]));
emit_move_insn (operands[0], stack_slot);
DONE;
@@ -563,7 +563,7 @@ DIVU %1,%1,%2\;GET %0,:rR\;NEGU %2,0,%0\;CSNN %0,$255,%2")
way. */
stack_slot
= validize_mem (assign_stack_temp (SFmode,
- GET_MODE_SIZE (SFmode), 0));
+ GET_MODE_SIZE (SFmode)));
emit_insn (gen_floatunsdisf2 (stack_slot, operands[1]));
emit_move_insn (operands[0], stack_slot);
DONE;
@@ -645,7 +645,7 @@ DIVU %1,%1,%2\;GET %0,:rR\;NEGU %2,0,%0\;CSNN %0,$255,%2")
way. */
stack_slot
= validize_mem (assign_stack_temp (SFmode,
- GET_MODE_SIZE (SFmode), 0));
+ GET_MODE_SIZE (SFmode)));
emit_insn (gen_truncdfsf2 (stack_slot, operands[1]));
emit_move_insn (operands[0], stack_slot);
DONE;
@@ -678,7 +678,7 @@ DIVU %1,%1,%2\;GET %0,:rR\;NEGU %2,0,%0\;CSNN %0,$255,%2")
better way. */
stack_slot
= validize_mem (assign_stack_temp (SFmode,
- GET_MODE_SIZE (SFmode), 0));
+ GET_MODE_SIZE (SFmode)));
emit_move_insn (stack_slot, operands[1]);
emit_insn (gen_extendsfdf2 (operands[0], stack_slot));
DONE;
diff --git a/gcc/config/mn10300/mn10300.c b/gcc/config/mn10300/mn10300.c
index 1554f94644c..5b9f0699469 100644
--- a/gcc/config/mn10300/mn10300.c
+++ b/gcc/config/mn10300/mn10300.c
@@ -2762,7 +2762,7 @@ mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
Chapter 3 of the MN103E Series Instruction Manual
where it says:
- "When the preceeding instruction is a CPU load or
+ "When the preceding instruction is a CPU load or
store instruction, a following FPU instruction
cannot be executed until the CPU completes the
latency period even though there are no register
@@ -2788,7 +2788,7 @@ mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
return cost;
/* XXX: Verify: The text of 1-7-4 implies that the restriction
- only applies when an INTEGER load/store preceeds an FPU
+ only applies when an INTEGER load/store precedes an FPU
instruction, but is this true ? For now we assume that it is. */
if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
return cost;
diff --git a/gcc/config/mn10300/mn10300.md b/gcc/config/mn10300/mn10300.md
index 91378a79345..a1cbc7a9fd4 100644
--- a/gcc/config/mn10300/mn10300.md
+++ b/gcc/config/mn10300/mn10300.md
@@ -999,7 +999,7 @@
;; ??? Note that AM33 has a third multiply variant that puts the high part
;; into the MDRQ register, however this variant also constrains the inputs
;; to be in DATA_REGS and thus isn't as helpful as it might be considering
-;; the existance of the 4-operand multiply. Nor is there a set of divide
+;; the existence of the 4-operand multiply. Nor is there a set of divide
;; insns that use MDRQ. Given that there is an IMM->MDRQ insn, this would
;; have been very handy for starting udivmodsi4...
@@ -1808,7 +1808,7 @@
)
;; ----------------------------------------------------------------------
-;; MISCELANEOUS
+;; MISCELLANEOUS
;; ----------------------------------------------------------------------
;; Note the use of the (const_int 0) when generating the insn that matches
diff --git a/gcc/config/pa/pa.c b/gcc/config/pa/pa.c
index 56c889db88c..02c00ba6a78 100644
--- a/gcc/config/pa/pa.c
+++ b/gcc/config/pa/pa.c
@@ -37,7 +37,6 @@ along with GCC; see the file COPYING3. If not see
#include "expr.h"
#include "optabs.h"
#include "reload.h"
-#include "integrate.h"
#include "function.h"
#include "diagnostic-core.h"
#include "ggc.h"
@@ -188,6 +187,7 @@ static enum machine_mode pa_c_mode_for_suffix (char);
static section *pa_function_section (tree, enum node_frequency, bool, bool);
static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
static bool pa_legitimate_constant_p (enum machine_mode, rtx);
+static unsigned int pa_section_type_flags (tree, const char *, int);
/* The following extra sections are only used for SOM. */
static GTY(()) section *som_readonly_data_section;
@@ -383,6 +383,8 @@ static size_t n_deferred_plabels = 0;
#undef TARGET_LEGITIMATE_CONSTANT_P
#define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
+#undef TARGET_SECTION_TYPE_FLAGS
+#define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
struct gcc_target targetm = TARGET_INITIALIZER;
@@ -5939,7 +5941,7 @@ pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
}
/* Request a secondary reload with a general scratch register
- for everthing else. ??? Could symbolic operands be handled
+ for everything else. ??? Could symbolic operands be handled
directly when generating non-pic PA 2.0 code? */
sri->icode = (in_p
? direct_optab_handler (reload_in_optab, mode)
@@ -10340,7 +10342,29 @@ pa_legitimate_constant_p (enum machine_mode mode, rtx x)
&& !pa_cint_ok_for_move (INTVAL (x)))
return false;
+ if (function_label_operand (x, mode))
+ return false;
+
return true;
}
+/* Implement TARGET_SECTION_TYPE_FLAGS. */
+
+static unsigned int
+pa_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int flags;
+
+ flags = default_section_type_flags (decl, name, reloc);
+
+ /* Function labels are placed in the constant pool. This can
+ cause a section conflict if decls are put in ".data.rel.ro"
+ or ".data.rel.ro.local" using the __attribute__ construct. */
+ if (strcmp (name, ".data.rel.ro") == 0
+ || strcmp (name, ".data.rel.ro.local") == 0)
+ flags |= SECTION_WRITE | SECTION_RELRO;
+
+ return flags;
+}
+
#include "gt-pa.h"
diff --git a/gcc/config/picochip/picochip.c b/gcc/config/picochip/picochip.c
index 57cbd157f41..2beddce8cc9 100644
--- a/gcc/config/picochip/picochip.c
+++ b/gcc/config/picochip/picochip.c
@@ -40,7 +40,6 @@ along with GCC; see the file COPYING3. If not, see
#include "function.h"
#include "output.h"
#include "basic-block.h"
-#include "integrate.h"
#include "diagnostic-core.h"
#include "ggc.h"
#include "hashtab.h"
diff --git a/gcc/config/picochip/picochip.h b/gcc/config/picochip/picochip.h
index abe6d6432b5..9eb7df94c01 100644
--- a/gcc/config/picochip/picochip.h
+++ b/gcc/config/picochip/picochip.h
@@ -221,7 +221,7 @@ extern enum picochip_dfa_type picochip_schedule_type;
#define CALL_USED_REGISTERS {1,1,1,1,1,1,0,0, 0,0,0,0,1,1,0,1, 1,1,1,1}
#define CALL_REALLY_USED_REGISTERS {1,1,1,1,1,1,0,0, 0,0,0,0,1,1,0,0, 0,1,0,0}
-/* Define the number of the picoChip link and condition psuedo registers. */
+/* Define the number of the picoChip link and condition pseudo registers. */
#define LINK_REGNUM 12
#define CC_REGNUM 17
#define ACC_REGNUM 16
diff --git a/gcc/config/rs6000/a2.md b/gcc/config/rs6000/a2.md
index 851d8949ff7..79fdf913de1 100644
--- a/gcc/config/rs6000/a2.md
+++ b/gcc/config/rs6000/a2.md
@@ -25,7 +25,7 @@
;; The multiplier pipeline.
(define_cpu_unit "mult" "ppca2")
-;; The auxillary processor unit (FP/vector unit).
+;; The auxiliary processor unit (FP/vector unit).
(define_cpu_unit "axu" "ppca2")
;; D.4.6
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index db6597edd36..f6cef090f1e 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -41,7 +41,6 @@
#include "function.h"
#include "output.h"
#include "basic-block.h"
-#include "integrate.h"
#include "diagnostic-core.h"
#include "toplev.h"
#include "ggc.h"
@@ -936,9 +935,8 @@ static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
static bool is_microcoded_insn (rtx);
static bool is_nonpipeline_insn (rtx);
static bool is_cracked_insn (rtx);
-static bool is_load_insn (rtx);
-static rtx get_store_dest (rtx pat);
-static bool is_store_insn (rtx);
+static bool is_load_insn (rtx, rtx *);
+static bool is_store_insn (rtx, rtx *);
static bool set_to_load_agen (rtx,rtx);
static bool insn_terminates_group_p (rtx , enum group_termination);
static bool insn_must_be_first_in_group (rtx);
@@ -2078,7 +2076,7 @@ rs6000_init_hard_regno_mode_ok (bool global_init_p)
/* TODO add SPE and paired floating point vector support. */
- /* Register class constaints for the constraints that depend on compile
+ /* Register class constraints for the constraints that depend on compile
switches. */
if (TARGET_HARD_FLOAT && TARGET_FPRS)
rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS;
@@ -2329,7 +2327,7 @@ darwin_rs6000_override_options (void)
/* Unless the user (not the configurer) has explicitly overridden
it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
- G4 unless targetting the kernel. */
+ G4 unless targeting the kernel. */
if (!flag_mkernel
&& !flag_apple_kext
&& strverscmp (darwin_macosx_version_min, "10.5") >= 0
@@ -2831,7 +2829,7 @@ rs6000_option_override_internal (bool global_init_p)
/* Handle -msched-costly-dep option. */
rs6000_sched_costly_dep
- = (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
+ = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
if (rs6000_sched_costly_dep_str)
{
@@ -4577,7 +4575,7 @@ rs6000_expand_vector_init (rtx target, rtx vals)
of 64-bit items is not supported on Altivec. */
if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
{
- mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
XVECEXP (vals, 0, 0));
x = gen_rtx_UNSPEC (VOIDmode,
@@ -4613,7 +4611,7 @@ rs6000_expand_vector_init (rtx target, rtx vals)
/* Construct the vector in memory one field at a time
and load the whole vector. */
- mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
for (i = 0; i < n_elts; i++)
emit_move_insn (adjust_address_nv (mem, inner_mode,
i * GET_MODE_SIZE (inner_mode)),
@@ -4642,7 +4640,7 @@ rs6000_expand_vector_set (rtx target, rtx val, int elt)
}
/* Load single variable value. */
- mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
x = gen_rtx_UNSPEC (VOIDmode,
gen_rtvec (1, const0_rtx), UNSPEC_LVE);
@@ -4697,7 +4695,7 @@ rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
}
/* Allocate mode-sized buffer. */
- mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
emit_move_insn (mem, vec);
@@ -22786,49 +22784,78 @@ set_to_load_agen (rtx out_insn, rtx in_insn)
return false;
}
-/* The function returns true if the target storage location of
- out_insn is adjacent to the target storage location of in_insn */
-/* Return 1 if memory locations are adjacent. */
+/* Try to determine base/offset/size parts of the given MEM.
+ Return true if successful, false if all the values couldn't
+ be determined.
+
+ This function only looks for REG or REG+CONST address forms.
+ REG+REG address form will return false. */
static bool
-adjacent_mem_locations (rtx insn1, rtx insn2)
+get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
+ HOST_WIDE_INT *size)
{
+ rtx addr_rtx;
+ if MEM_SIZE_KNOWN_P (mem)
+ *size = MEM_SIZE (mem);
+ else
+ return false;
- rtx a = get_store_dest (PATTERN (insn1));
- rtx b = get_store_dest (PATTERN (insn2));
+ if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
+ addr_rtx = XEXP (XEXP (mem, 0), 1);
+ else
+ addr_rtx = (XEXP (mem, 0));
- if ((GET_CODE (XEXP (a, 0)) == REG
- || (GET_CODE (XEXP (a, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
- && (GET_CODE (XEXP (b, 0)) == REG
- || (GET_CODE (XEXP (b, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ if (GET_CODE (addr_rtx) == REG)
+ {
+ *base = addr_rtx;
+ *offset = 0;
+ }
+ else if (GET_CODE (addr_rtx) == PLUS
+ && CONST_INT_P (XEXP (addr_rtx, 1)))
{
- HOST_WIDE_INT val0 = 0, val1 = 0, val_diff;
- rtx reg0, reg1;
+ *base = XEXP (addr_rtx, 0);
+ *offset = INTVAL (XEXP (addr_rtx, 1));
+ }
+ else
+ return false;
- if (GET_CODE (XEXP (a, 0)) == PLUS)
- {
- reg0 = XEXP (XEXP (a, 0), 0);
- val0 = INTVAL (XEXP (XEXP (a, 0), 1));
- }
- else
- reg0 = XEXP (a, 0);
+ return true;
+}
- if (GET_CODE (XEXP (b, 0)) == PLUS)
- {
- reg1 = XEXP (XEXP (b, 0), 0);
- val1 = INTVAL (XEXP (XEXP (b, 0), 1));
- }
- else
- reg1 = XEXP (b, 0);
+/* The function returns true if the target storage location of
+ mem1 is adjacent to the target storage location of mem2 */
+/* Return 1 if memory locations are adjacent. */
- val_diff = val1 - val0;
+static bool
+adjacent_mem_locations (rtx mem1, rtx mem2)
+{
+ rtx reg1, reg2;
+ HOST_WIDE_INT off1, size1, off2, size2;
- return ((REGNO (reg0) == REGNO (reg1))
- && ((MEM_SIZE_KNOWN_P (a) && val_diff == MEM_SIZE (a))
- || (MEM_SIZE_KNOWN_P (b) && val_diff == -MEM_SIZE (b))));
- }
+ if (get_memref_parts (mem1, &reg1, &off1, &size1)
+ && get_memref_parts (mem2, &reg2, &off2, &size2))
+ return ((REGNO (reg1) == REGNO (reg2))
+ && ((off1 + size1 == off2)
+ || (off2 + size2 == off1)));
+
+ return false;
+}
+
+/* This function returns true if it can be determined that the two MEM
+ locations overlap by at least 1 byte based on base reg/offset/size. */
+
+static bool
+mem_locations_overlap (rtx mem1, rtx mem2)
+{
+ rtx reg1, reg2;
+ HOST_WIDE_INT off1, size1, off2, size2;
+
+ if (get_memref_parts (mem1, &reg1, &off1, &size1)
+ && get_memref_parts (mem2, &reg2, &off2, &size2))
+ return ((REGNO (reg1) == REGNO (reg2))
+ && (((off1 <= off2) && (off1 + size1 > off2))
+ || ((off2 <= off1) && (off2 + size2 > off1))));
return false;
}
@@ -22842,6 +22869,7 @@ adjacent_mem_locations (rtx insn1, rtx insn2)
static int
rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
{
+ rtx load_mem, str_mem;
/* On machines (like the 750) which have asymmetric integer units,
where one integer unit can do multiply and divides and the other
can't, reduce the priority of multiply/divide so it is scheduled
@@ -22893,8 +22921,8 @@ rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
}
if (rs6000_cpu == PROCESSOR_POWER6
- && ((load_store_pendulum == -2 && is_load_insn (insn))
- || (load_store_pendulum == 2 && is_store_insn (insn))))
+ && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
+ || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
/* Attach highest priority to insn if the scheduler has just issued two
stores and this instruction is a load, or two loads and this instruction
is a store. Power6 wants loads and stores scheduled alternately
@@ -23019,54 +23047,63 @@ rs6000_use_sched_lookahead_guard (rtx insn)
return 1;
}
-/* Determine is PAT refers to memory. */
+/* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
+ and return true. */
static bool
-is_mem_ref (rtx pat)
+find_mem_ref (rtx pat, rtx *mem_ref)
{
const char * fmt;
int i, j;
- bool ret = false;
/* stack_tie does not produce any real memory traffic. */
if (tie_operand (pat, VOIDmode))
return false;
if (GET_CODE (pat) == MEM)
- return true;
+ {
+ *mem_ref = pat;
+ return true;
+ }
/* Recursively process the pattern. */
fmt = GET_RTX_FORMAT (GET_CODE (pat));
- for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0 && !ret; i--)
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
{
if (fmt[i] == 'e')
- ret |= is_mem_ref (XEXP (pat, i));
+ {
+ if (find_mem_ref (XEXP (pat, i), mem_ref))
+ return true;
+ }
else if (fmt[i] == 'E')
for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
- ret |= is_mem_ref (XVECEXP (pat, i, j));
+ {
+ if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
+ return true;
+ }
}
- return ret;
+ return false;
}
/* Determine if PAT is a PATTERN of a load insn. */
static bool
-is_load_insn1 (rtx pat)
+is_load_insn1 (rtx pat, rtx *load_mem)
{
if (!pat || pat == NULL_RTX)
return false;
if (GET_CODE (pat) == SET)
- return is_mem_ref (SET_SRC (pat));
+ return find_mem_ref (SET_SRC (pat), load_mem);
if (GET_CODE (pat) == PARALLEL)
{
int i;
for (i = 0; i < XVECLEN (pat, 0); i++)
- if (is_load_insn1 (XVECEXP (pat, 0, i)))
+ if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
return true;
}
@@ -23076,7 +23113,7 @@ is_load_insn1 (rtx pat)
/* Determine if INSN loads from memory. */
static bool
-is_load_insn (rtx insn)
+is_load_insn (rtx insn, rtx *load_mem)
{
if (!insn || !INSN_P (insn))
return false;
@@ -23084,26 +23121,26 @@ is_load_insn (rtx insn)
if (GET_CODE (insn) == CALL_INSN)
return false;
- return is_load_insn1 (PATTERN (insn));
+ return is_load_insn1 (PATTERN (insn), load_mem);
}
/* Determine if PAT is a PATTERN of a store insn. */
static bool
-is_store_insn1 (rtx pat)
+is_store_insn1 (rtx pat, rtx *str_mem)
{
if (!pat || pat == NULL_RTX)
return false;
if (GET_CODE (pat) == SET)
- return is_mem_ref (SET_DEST (pat));
+ return find_mem_ref (SET_DEST (pat), str_mem);
if (GET_CODE (pat) == PARALLEL)
{
int i;
for (i = 0; i < XVECLEN (pat, 0); i++)
- if (is_store_insn1 (XVECEXP (pat, 0, i)))
+ if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
return true;
}
@@ -23113,38 +23150,12 @@ is_store_insn1 (rtx pat)
/* Determine if INSN stores to memory. */
static bool
-is_store_insn (rtx insn)
+is_store_insn (rtx insn, rtx *str_mem)
{
if (!insn || !INSN_P (insn))
return false;
- return is_store_insn1 (PATTERN (insn));
-}
-
-/* Return the dest of a store insn. */
-
-static rtx
-get_store_dest (rtx pat)
-{
- gcc_assert (is_store_insn1 (pat));
-
- if (GET_CODE (pat) == SET)
- return SET_DEST (pat);
- else if (GET_CODE (pat) == PARALLEL)
- {
- int i;
-
- for (i = 0; i < XVECLEN (pat, 0); i++)
- {
- rtx inner_pat = XVECEXP (pat, 0, i);
- if (GET_CODE (inner_pat) == SET
- && is_mem_ref (SET_DEST (inner_pat)))
- return inner_pat;
- }
- }
- /* We shouldn't get here, because we should have either a simple
- store insn or a store with update which are covered above. */
- gcc_unreachable();
+ return is_store_insn1 (PATTERN (insn), str_mem);
}
/* Returns whether the dependence between INSN and NEXT is considered
@@ -23155,6 +23166,7 @@ rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
{
rtx insn;
rtx next;
+ rtx load_mem, str_mem;
/* If the flag is not enabled - no dependence is considered costly;
allow all dependent insns in the same group.
@@ -23172,15 +23184,16 @@ rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
next = DEP_CON (dep);
if (rs6000_sched_costly_dep == store_to_load_dep_costly
- && is_load_insn (next)
- && is_store_insn (insn))
+ && is_load_insn (next, &load_mem)
+ && is_store_insn (insn, &str_mem))
/* Prevent load after store in the same group. */
return true;
if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
- && is_load_insn (next)
- && is_store_insn (insn)
- && DEP_TYPE (dep) == REG_DEP_TRUE)
+ && is_load_insn (next, &load_mem)
+ && is_store_insn (insn, &str_mem)
+ && DEP_TYPE (dep) == REG_DEP_TRUE
+ && mem_locations_overlap(str_mem, load_mem))
/* Prevent load after store in the same group if it is a true
dependence. */
return true;
@@ -23307,12 +23320,12 @@ rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
{
int pos;
int i;
- rtx tmp;
+ rtx tmp, load_mem, str_mem;
- if (is_store_insn (last_scheduled_insn))
+ if (is_store_insn (last_scheduled_insn, &str_mem))
/* Issuing a store, swing the load_store_pendulum to the left */
load_store_pendulum--;
- else if (is_load_insn (last_scheduled_insn))
+ else if (is_load_insn (last_scheduled_insn, &load_mem))
/* Issuing a load, swing the load_store_pendulum to the right */
load_store_pendulum++;
else
@@ -23331,7 +23344,7 @@ rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
while (pos >= 0)
{
- if (is_load_insn (ready[pos]))
+ if (is_load_insn (ready[pos], &load_mem))
{
/* Found a load. Move it to the head of the ready list,
and adjust it's priority so that it is more likely to
@@ -23357,7 +23370,7 @@ rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
while (pos >= 0)
{
- if (is_load_insn (ready[pos])
+ if (is_load_insn (ready[pos], &load_mem)
&& !sel_sched_p ()
&& INSN_PRIORITY_KNOWN (ready[pos]))
{
@@ -23384,15 +23397,16 @@ rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
while (pos >= 0)
{
- if (is_store_insn (ready[pos]))
+ if (is_store_insn (ready[pos], &str_mem))
{
+ rtx str_mem2;
/* Maintain the index of the first store found on the
list */
if (first_store_pos == -1)
first_store_pos = pos;
- if (is_store_insn (last_scheduled_insn)
- && adjacent_mem_locations (last_scheduled_insn,ready[pos]))
+ if (is_store_insn (last_scheduled_insn, &str_mem2)
+ && adjacent_mem_locations (str_mem, str_mem2))
{
/* Found an adjacent store. Move it to the head of the
ready list, and adjust it's priority so that it is
@@ -23436,7 +23450,7 @@ rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
while (pos >= 0)
{
- if (is_store_insn (ready[pos])
+ if (is_store_insn (ready[pos], &str_mem)
&& !sel_sched_p ()
&& INSN_PRIORITY_KNOWN (ready[pos]))
{
@@ -23720,7 +23734,7 @@ is_costly_group (rtx *group_insns, rtx next_insn)
if (!insn)
continue;
- FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
{
rtx next = DEP_CON (dep);
@@ -23784,12 +23798,20 @@ force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
if (can_issue_more && !is_branch_slot_insn (next_insn))
can_issue_more--;
- while (can_issue_more > 0)
+ /* Power6 and Power7 have special group ending nop. */
+ if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7)
{
- nop = gen_nop ();
+ nop = gen_group_ending_nop ();
emit_insn_before (nop, next_insn);
- can_issue_more--;
+ can_issue_more = 0;
}
+ else
+ while (can_issue_more > 0)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ can_issue_more--;
+ }
*group_end = true;
return 0;
@@ -27789,7 +27811,7 @@ rs6000_allocate_stack_temp (enum machine_mode mode,
bool offsettable_p,
bool reg_reg_p)
{
- rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
rtx addr = XEXP (stack, 0);
int strict_p = (reload_in_progress || reload_completed);
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index e852c3e5306..ba4acb69de7 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -126,6 +126,7 @@
UNSPEC_LFIWAX
UNSPEC_LFIWZX
UNSPEC_FCTIWUZ
+ UNSPEC_GRP_END_NOP
])
;;
@@ -10037,7 +10038,7 @@
operands[2] = gen_reg_rtx (DFmode);
operands[3] = gen_reg_rtx (DFmode);
operands[4] = gen_reg_rtx (DImode);
- operands[5] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+ operands[5] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode));
})
(define_insn_and_split "*fix_trunctfsi2_internal"
@@ -15594,6 +15595,16 @@
[(const_int 0)]
""
"{cror 0,0,0|nop}")
+
+(define_insn "group_ending_nop"
+ [(unspec [(const_int 0)] UNSPEC_GRP_END_NOP)]
+ ""
+ "*
+{
+ if (rs6000_cpu_attr == CPU_POWER6)
+ return \"ori 1,1,0\";
+ return \"ori 2,2,0\";
+}")
;; Define the subtract-one-and-jump insns, starting with the template
;; so loop.c knows what to generate.
diff --git a/gcc/config/rs6000/t-linux64 b/gcc/config/rs6000/t-linux64
index 6420431214d..fb1af5d8c46 100644
--- a/gcc/config/rs6000/t-linux64
+++ b/gcc/config/rs6000/t-linux64
@@ -26,10 +26,7 @@
# it doesn't tell anything about the 32bit libraries on those systems. Set
# MULTILIB_OSDIRNAMES according to what is found on the target.
-MULTILIB_OPTIONS = m64/m32 msoft-float
-MULTILIB_DIRNAMES = 64 32 nof
-MULTILIB_EXTRA_OPTS = fPIC mstrict-align
-MULTILIB_EXCEPTIONS = m64/msoft-float
-MULTILIB_EXCLUSIONS = m64/!m32/msoft-float
-MULTILIB_OSDIRNAMES = ../lib64 $(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib) nof
-MULTILIB_MATCHES = $(MULTILIB_MATCHES_FLOAT)
+MULTILIB_OPTIONS = m64/m32
+MULTILIB_DIRNAMES = 64 32
+MULTILIB_EXTRA_OPTS = fPIC
+MULTILIB_OSDIRNAMES = ../lib64 $(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)
diff --git a/gcc/config/rs6000/t-rs6000 b/gcc/config/rs6000/t-rs6000
index 5204f589d5d..3ca2732fabc 100644
--- a/gcc/config/rs6000/t-rs6000
+++ b/gcc/config/rs6000/t-rs6000
@@ -25,7 +25,7 @@ rs6000.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(REGS_H) hard-reg-set.h \
real.h insn-config.h conditions.h insn-attr.h flags.h $(RECOG_H) \
$(OBSTACK_H) $(TREE_H) $(EXPR_H) $(OPTABS_H) except.h function.h \
- output.h $(BASIC_BLOCK_H) $(INTEGRATE_H) toplev.h $(GGC_H) $(HASHTAB_H) \
+ output.h $(BASIC_BLOCK_H) toplev.h $(GGC_H) $(HASHTAB_H) \
$(TM_P_H) $(TARGET_H) $(TARGET_DEF_H) langhooks.h reload.h gt-rs6000.h \
cfglayout.h cfgloop.h $(OPTS_H) $(COMMON_TARGET_H)
diff --git a/gcc/config/rs6000/vector.md b/gcc/config/rs6000/vector.md
index 6674054223b..87a52762a4d 100644
--- a/gcc/config/rs6000/vector.md
+++ b/gcc/config/rs6000/vector.md
@@ -172,7 +172,7 @@
-;; Reload patterns for vector operations. We may need an addtional base
+;; Reload patterns for vector operations. We may need an additional base
;; register to convert the reg+offset addressing to reg+reg for vector
;; registers and reg+reg or (reg+reg)&(-16) addressing to just an index
;; register for gpr registers.
diff --git a/gcc/config/rx/rx.md b/gcc/config/rx/rx.md
index 1ba603f4a6e..95ba051a486 100644
--- a/gcc/config/rx/rx.md
+++ b/gcc/config/rx/rx.md
@@ -408,7 +408,7 @@
;; Note - the following set of patterns do not use the "memory_operand"
;; predicate or an "m" constraint because we do not allow symbol_refs
-;; or label_refs as legitmate memory addresses. This matches the
+;; or label_refs as legitimate memory addresses. This matches the
;; behaviour of most of the RX instructions. Only the call/branch
;; instructions are allowed to refer to symbols/labels directly.
;; The call operands are in QImode because that is the value of
diff --git a/gcc/config/rx/rx.opt b/gcc/config/rx/rx.opt
index 308bf0c8ada..76c2f61c79b 100644
--- a/gcc/config/rx/rx.opt
+++ b/gcc/config/rx/rx.opt
@@ -87,7 +87,7 @@ Use the simulator runtime.
mas100-syntax
Target Mask(AS100_SYNTAX) Report
-Generate assembler output that is compatible with the Renesas AS100 assembler. This may restrict some of the compiler's capabilities. The default is to generate GAS compatable syntax.
+Generate assembler output that is compatible with the Renesas AS100 assembler. This may restrict some of the compiler's capabilities. The default is to generate GAS compatible syntax.
;---------------------------------------------------
diff --git a/gcc/config/s390/2097.md b/gcc/config/s390/2097.md
index 77c206ecdbc..333e1b26ff4 100644
--- a/gcc/config/s390/2097.md
+++ b/gcc/config/s390/2097.md
@@ -703,11 +703,11 @@
; Declaration for some pseudo-pipeline stages that reflect the
-; dispatch gap when issueing an INT/FXU/BFU-executed instruction after
+; dispatch gap when issuing an INT/FXU/BFU-executed instruction after
; an instruction executed by a different unit has been executed. The
; approach is that we pretend a pipelined execution of BFU operations
; with as many stages as the gap is long and request that none of
-; these stages is busy when issueing a FXU- or DFU-executed
+; these stages is busy when issuing a FXU- or DFU-executed
; instruction. Similar for FXU- and DFU-executed instructions.
; Declaration for FPU stages.
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index b338cd96136..bc0bf8a991a 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -42,7 +42,6 @@ along with GCC; see the file COPYING3. If not see
#include "reload.h"
#include "diagnostic-core.h"
#include "basic-block.h"
-#include "integrate.h"
#include "ggc.h"
#include "target.h"
#include "target-def.h"
@@ -9044,6 +9043,7 @@ s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
lab_false = create_artificial_label (UNKNOWN_LOCATION);
lab_over = create_artificial_label (UNKNOWN_LOCATION);
addr = create_tmp_var (ptr_type_node, "addr");
+ mark_sym_for_renaming (addr);
t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
t = build2 (GT_EXPR, boolean_type_node, reg, t);
@@ -10533,7 +10533,7 @@ s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
}
/* This function is called via hook TARGET_SCHED_REORDER before
- issueing one insn from list READY which contains *NREADYP entries.
+ issuing one insn from list READY which contains *NREADYP entries.
For target z10 it reorders load instructions to avoid early load
conflicts in the floating point pipeline */
static int
diff --git a/gcc/config/s390/s390.h b/gcc/config/s390/s390.h
index 99c09e8860e..f69b3174b00 100644
--- a/gcc/config/s390/s390.h
+++ b/gcc/config/s390/s390.h
@@ -762,7 +762,7 @@ do { \
/* This value is used in tree-sra to decide whether it might benefical
to split a struct move into several word-size moves. For S/390
only small values make sense here since struct moves are relatively
- cheap thanks to mvc so the small default value choosen for archs
+ cheap thanks to mvc so the small default value chosen for archs
with memmove patterns should be ok. But this value is multiplied
in tree-sra with UNITS_PER_WORD to make a decision so we adjust it
here to compensate for that factor since mvc costs exactly the same
diff --git a/gcc/config/score/score.c b/gcc/config/score/score.c
index 0af0fd50b82..9c68e19ed30 100644
--- a/gcc/config/score/score.c
+++ b/gcc/config/score/score.c
@@ -45,7 +45,6 @@
#include "debug.h"
#include "target.h"
#include "target-def.h"
-#include "integrate.h"
#include "langhooks.h"
#include "cfglayout.h"
#include "df.h"
diff --git a/gcc/config/sh/predicates.md b/gcc/config/sh/predicates.md
index c6d0d464f08..f75675ec096 100644
--- a/gcc/config/sh/predicates.md
+++ b/gcc/config/sh/predicates.md
@@ -879,3 +879,22 @@
}
return 0;
})
+
+;; The atomic_* operand predicates are used for the atomic patterns.
+;; Depending on the particular pattern some operands can be immediate
+;; values. Using these predicates avoids the usage of 'force_reg' in the
+;; expanders.
+(define_predicate "atomic_arith_operand"
+ (ior (match_code "subreg,reg")
+ (and (match_test "satisfies_constraint_I08 (op)")
+ (match_test "mode != QImode")
+ (match_test "mode != HImode")
+ (match_test "TARGET_SH4A_ARCH"))))
+
+(define_predicate "atomic_logical_operand"
+ (ior (match_code "subreg,reg")
+ (and (match_test "satisfies_constraint_K08 (op)")
+ (match_test "mode != QImode")
+ (match_test "mode != HImode")
+ (match_test "TARGET_SH4A_ARCH"))))
+
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index 08ee5b436f5..20e67c63628 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -39,7 +39,6 @@ along with GCC; see the file COPYING3. If not see
#include "insn-attr.h"
#include "diagnostic-core.h"
#include "recog.h"
-#include "integrate.h"
#include "dwarf2.h"
#include "tm_p.h"
#include "target.h"
@@ -393,7 +392,7 @@ static const struct attribute_spec sh_attribute_table[] =
The insn that frees registers is most likely to be the insn with lowest
LUID (original insn order); but such an insn might be there in the stalled
queue (Q) instead of the ready queue (R). To solve this, we skip cycles
- upto a max of 8 cycles so that such insns may move from Q -> R.
+ up to a max of 8 cycles so that such insns may move from Q -> R.
The description of the hooks are as below:
@@ -877,12 +876,27 @@ sh_option_override (void)
align_functions = min_align;
}
+ /* Enable fmac insn for "a * b + c" SFmode calculations when -ffast-math
+ is enabled and -mno-fused-madd is not specified by the user.
+ The fmac insn can't be enabled by default due to the implied
+ FMA semantics. See also PR target/29100. */
+ if (global_options_set.x_TARGET_FMAC == 0 && flag_unsafe_math_optimizations)
+ TARGET_FMAC = 1;
+
if (sh_fixed_range_str)
sh_fix_range (sh_fixed_range_str);
/* This target defaults to strict volatile bitfields. */
if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
flag_strict_volatile_bitfields = 1;
+
+ /* Make sure that only one atomic mode is selected and that the selection
+ is valid for the current target CPU. */
+ if (TARGET_SOFT_ATOMIC && TARGET_HARD_ATOMIC)
+ error ("-msoft-atomic and -mhard-atomic cannot be used at the same time");
+ if (TARGET_HARD_ATOMIC && ! TARGET_SH4A_ARCH)
+ error ("-mhard-atomic is only available for SH4A targets");
+
}
/* Print the operand address in x to the stream. */
@@ -11478,7 +11492,7 @@ sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
We could hold SFmode / SCmode values in XD registers, but that
would require a tertiary reload when reloading from / to memory,
and a secondary reload to reload from / to general regs; that
- seems to be a loosing proposition.
+ seems to be a losing proposition.
We want to allow TImode FP regs so that when V4SFmode is loaded as TImode,
it won't be ferried through GP registers first. */
diff --git a/gcc/config/sh/sh.h b/gcc/config/sh/sh.h
index a6fac9484c2..b5f20f90558 100644
--- a/gcc/config/sh/sh.h
+++ b/gcc/config/sh/sh.h
@@ -172,6 +172,9 @@ do { \
(TARGET_SH1 && ! TARGET_SH2E && ! TARGET_SH5 \
&& ! (TARGET_HITACHI || sh_attr_renesas_p (FUN_DECL)))
+/* Nonzero if either soft or hard atomics are enabled. */
+#define TARGET_ANY_ATOMIC (TARGET_SOFT_ATOMIC | TARGET_HARD_ATOMIC)
+
#ifndef TARGET_CPU_DEFAULT
#define TARGET_CPU_DEFAULT SELECT_SH1
#define SUPPORT_SH1 1
@@ -433,7 +436,20 @@ do { \
"%{m2a*:%eSH2a does not support little-endian}}"
#endif
-#define DRIVER_SELF_SPECS UNSUPPORTED_SH2A
+#define UNSUPPORTED_ATOMIC_OPTIONS \
+"%{msoft-atomic:%{mhard-atomic:%e-msoft-atomic and -mhard-atomic cannot be \
+used at the same time}}"
+
+#if TARGET_CPU_DEFAULT & MASK_SH4A
+#define UNSUPPORTED_HARD_ATOMIC_CPU ""
+#else
+#define UNSUPPORTED_HARD_ATOMIC_CPU \
+"%{!m4a*:%{mhard-atomic:%e-mhard-atomic is only available for SH4A targets}}"
+#endif
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS UNSUPPORTED_SH2A, UNSUPPORTED_ATOMIC_OPTIONS,\
+ UNSUPPORTED_HARD_ATOMIC_CPU
#define ASSEMBLER_DIALECT assembler_dialect
diff --git a/gcc/config/sh/sh.md b/gcc/config/sh/sh.md
index 7167b920641..99d4c625f23 100644
--- a/gcc/config/sh/sh.md
+++ b/gcc/config/sh/sh.md
@@ -577,7 +577,7 @@
(and (eq_attr "type" "cbranch")
(match_test "TARGET_SH2"))
;; SH2e has a hardware bug that pretty much prohibits the use of
- ;; annuled delay slots.
+ ;; annulled delay slots.
[(eq_attr "cond_delay_slot" "yes") (and (eq_attr "cond_delay_slot" "yes")
(not (eq_attr "cpu" "sh2e"))) (nil)])
@@ -631,7 +631,7 @@
[(set_attr "type" "mt_group")])
;; Test low QI subreg against zero.
-;; This avoids unecessary zero extension before the test.
+;; This avoids unnecessary zero extension before the test.
(define_insn "tstqi_t_zero"
[(set (reg:SI T_REG)
@@ -5470,7 +5470,7 @@ label:
;; selected to copy QImode regs. If one of them happens to be allocated
;; on the stack, reload will stick to movqi insn and generate wrong
;; displacement addressing because of the generic m alternatives.
-;; With the movqi_reg_reg being specified before movqi it will be intially
+;; With the movqi_reg_reg being specified before movqi it will be initially
;; picked to load/store regs. If the regs regs are on the stack reload will
;; try other insns and not stick to movqi_reg_reg.
;; The same applies to the movhi variants.
diff --git a/gcc/config/sh/sh.opt b/gcc/config/sh/sh.opt
index 3ab2c51be4a..6a78d04ed10 100644
--- a/gcc/config/sh/sh.opt
+++ b/gcc/config/sh/sh.opt
@@ -321,7 +321,11 @@ Follow Renesas (formerly Hitachi) / SuperH calling conventions
msoft-atomic
Target Report Var(TARGET_SOFT_ATOMIC)
-Use software atomic sequences supported by kernel
+Use gUSA software atomic sequences
+
+mhard-atomic
+Target Report Var(TARGET_HARD_ATOMIC)
+Use hardware atomic sequences
menable-tas
Target Report RejectNegative Var(TARGET_ENABLE_TAS)
diff --git a/gcc/config/sh/sync.md b/gcc/config/sh/sync.md
index 258e048f3c7..79cd765d87f 100644
--- a/gcc/config/sh/sync.md
+++ b/gcc/config/sh/sync.md
@@ -21,10 +21,42 @@
;;
;; Atomic integer operations for the Renesas / SuperH SH CPUs.
;;
+;; On SH CPUs atomic integer operations can be done either in 'software' or
+;; in 'hardware', where true hardware support was introduced with the SH4A.
+;; In addition to that all SH CPUs support the 'tas.b' instruction, which
+;; can be optionally used to implement the 'atomic_test_and_set' builtin.
+;;
+;; tas.b atomic_test_and_set (-menable-tas)
+;;
+;; Depending on the particular hardware configuration, usage of the 'tas.b'
+;; instruction might be undesired or even unsafe. Thus, it has to be
+;; enabled by the user explicitely. If it is not enabled, the
+;; 'atomic_test_and_set' builtin is implemented either with hardware or with
+;; software atomics, depending on which is enabled. It is also possible to
+;; enable the 'tas.b' instruction only, without enabling support for the
+;; other atomic operations.
+;;
+;;
+;; Hardware Atomics (-mhard-atomic, SH4A only)
+;;
+;; Hardware atomics implement all atomic operations using the 'movli.l' and
+;; 'movco.l' instructions that are availble on SH4A. On multi-core hardware
+;; configurations hardware atomics is the only safe mode.
+;; However, it can also be safely used on single-core configurations.
+;; Since these instructions operate on SImode memory only, QImode and HImode
+;; have to be emulated with SImode and subreg masking, which results in
+;; larger code.
+;;
+;;
+;; Software Atomics (-msoft-atomic)
+;;
;; On single-core systems there can only be one execution context running
;; at a given point in time. This allows the usage of rewindable atomic
;; sequences, which effectively emulate locked-load / conditional-store
-;; operations.
+;; operations. This requires complementary support in the interrupt /
+;; exception handling code (e.g. kernel) and does not work safely on multi-
+;; core configurations.
+;;
;; When an execution context is interrupted while it is an atomic
;; sequence, the interrupted context's PC is rewound to the beginning of
;; the atomic sequence by the interrupt / exception handling code, before
@@ -79,15 +111,16 @@
;; For correct operation the atomic sequences must not be rewound after
;; they have passed the write-back instruction.
;;
-;; The current implementation is limited to QImode, HImode and SImode
+;; The current atomic support is limited to QImode, HImode and SImode
;; atomic operations. DImode operations could also be implemented but
;; would require some ABI modifications to support multiple-instruction
;; write-back. This is because SH1/SH2/SH3/SH4 does not have a DImode
;; store instruction. DImode stores must be split into two SImode stores.
;;
-;; For some operations it would be possible to use insns with an immediate
-;; operand such as add #imm,Rn. However, since the original value before
-;; the operation also needs to be available, this is not so handy.
+;; On single-core SH4A CPUs software atomic aware interrupt / exception code
+;; is actually compatible with user code that utilizes hardware atomics.
+;; Since SImode hardware atomic sequences are more compact on SH4A they are
+;; always used, regardless of the selected atomic mode.
(define_c_enum "unspec" [
UNSPEC_ATOMIC
@@ -100,6 +133,7 @@
])
(define_mode_iterator I124 [QI HI SI])
+(define_mode_iterator I12 [QI HI])
(define_mode_attr i124suffix [(QI "b") (HI "w") (SI "l")])
(define_mode_attr i124extend_insn [(QI "exts.b") (HI "exts.w") (SI "mov")])
@@ -108,23 +142,42 @@
(define_code_attr fetchop_name
[(plus "add") (minus "sub") (ior "or") (xor "xor") (and "and")])
+(define_code_attr fetchop_predicate
+ [(plus "atomic_arith_operand") (minus "register_operand")
+ (ior "atomic_logical_operand") (xor "atomic_logical_operand")
+ (and "atomic_logical_operand")])
+
+(define_code_attr fetchop_constraint
+ [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")])
+
+;;------------------------------------------------------------------------------
+;; comapre and swap
+
(define_expand "atomic_compare_and_swap<mode>"
[(match_operand:SI 0 "register_operand" "") ;; bool success output
(match_operand:I124 1 "register_operand" "") ;; oldval output
(match_operand:I124 2 "memory_operand" "") ;; memory
- (match_operand:I124 3 "register_operand" "") ;; expected input
- (match_operand:I124 4 "register_operand" "") ;; newval input
+ (match_operand:I124 3 "atomic_arith_operand" "") ;; expected input
+ (match_operand:I124 4 "atomic_arith_operand" "") ;; newval input
(match_operand:SI 5 "const_int_operand" "") ;; is_weak
(match_operand:SI 6 "const_int_operand" "") ;; success model
(match_operand:SI 7 "const_int_operand" "")] ;; failure model
- "TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
+ "TARGET_ANY_ATOMIC && !TARGET_SHMEDIA"
{
- rtx addr;
+ rtx addr = force_reg (Pmode, XEXP (operands[2], 0));
+ rtx old_val = gen_lowpart (SImode, operands[1]);
+ rtx exp_val = operands[3];
+ rtx new_val = operands[4];
+ rtx atomic_insn;
+
+ if (TARGET_HARD_ATOMIC || (TARGET_SH4A_ARCH && <MODE>mode == SImode))
+ atomic_insn = gen_atomic_compare_and_swap<mode>_hard (old_val, addr,
+ exp_val, new_val);
+ else
+ atomic_insn = gen_atomic_compare_and_swap<mode>_soft (old_val, addr,
+ exp_val, new_val);
+ emit_insn (atomic_insn);
- addr = force_reg (Pmode, XEXP (operands[2], 0));
- emit_insn (gen_atomic_compare_and_swap<mode>_soft
- (gen_lowpart (SImode, operands[1]), addr, operands[3],
- operands[4]));
if (<MODE>mode == QImode)
emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[1]),
operands[1]));
@@ -135,6 +188,67 @@
DONE;
})
+(define_insn "atomic_compare_and_swapsi_hard"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
+ [(mem:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "arith_operand" "rI08")
+ (match_operand:SI 3 "arith_operand" "rI08")]
+ UNSPECV_CMPXCHG_1))
+ (set (mem:SI (match_dup 1))
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_2))
+ (set (reg:SI T_REG)
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
+ (clobber (reg:SI R0_REG))]
+ "TARGET_ANY_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r0: movli.l @%1,r0" "\n"
+ " cmp/eq %2,r0" "\n"
+ " bf{.|/}s 0f" "\n"
+ " mov r0,%0" "\n"
+ " mov %3,r0" "\n"
+ " movco.l r0,@%1" "\n"
+ " bf 0b" "\n"
+ "0:";
+}
+ [(set_attr "length" "14")])
+
+(define_insn "atomic_compare_and_swap<mode>_hard"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
+ [(mem:I12 (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:I12 2 "register_operand" "r")
+ (match_operand:I12 3 "register_operand" "r")]
+ UNSPECV_CMPXCHG_1))
+ (set (mem:I12 (match_dup 1))
+ (unspec_volatile:I12 [(const_int 0)] UNSPECV_CMPXCHG_2))
+ (set (reg:SI T_REG)
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3))
+ (clobber (reg:SI R0_REG))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=&r"))
+ (clobber (match_scratch:SI 6 "=1"))]
+ "TARGET_HARD_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r mov #-4,%5" "\n"
+ " <i124extend_insn> %2,%4" "\n"
+ " and %1,%5" "\n"
+ " xor %5,%1" "\n"
+ " add r15,%1" "\n"
+ " add #-4,%1" "\n"
+ "0: movli.l @%5,r0" "\n"
+ " mov.l r0,@-r15" "\n"
+ " mov.<i124suffix> @%1,%0" "\n"
+ " mov.<i124suffix> %3,@%1" "\n"
+ " cmp/eq %4,%0" "\n"
+ " bf{.|/}s 0f" "\n"
+ " mov.l @r15+,r0" "\n"
+ " movco.l r0,@%5" "\n"
+ " bf 0b" "\n"
+ "0:";
+}
+ [(set_attr "length" "30")])
+
(define_insn "atomic_compare_and_swap<mode>_soft"
[(set (match_operand:SI 0 "register_operand" "=&u")
(unspec_volatile:SI
@@ -151,7 +265,7 @@
(clobber (reg:SI R1_REG))]
"TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
{
- return "mova 1f,r0" "\n"
+ return "\r mova 1f,r0" "\n"
" <i124extend_insn> %2,%4" "\n"
" .align 2" "\n"
" mov r15,r1" "\n"
@@ -164,16 +278,27 @@
}
[(set_attr "length" "20")])
+;;------------------------------------------------------------------------------
+;; read - write - return old value
+
(define_expand "atomic_exchange<mode>"
[(match_operand:I124 0 "register_operand" "") ;; oldval output
(match_operand:I124 1 "memory_operand" "") ;; memory
- (match_operand:I124 2 "register_operand" "") ;; newval input
+ (match_operand:I124 2 "atomic_arith_operand" "") ;; newval input
(match_operand:SI 3 "const_int_operand" "")] ;; memory model
- "TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
+ "TARGET_ANY_ATOMIC && !TARGET_SHMEDIA"
{
rtx addr = force_reg (Pmode, XEXP (operands[1], 0));
- emit_insn (gen_atomic_exchange<mode>_soft
- (operands[0], addr, operands[2]));
+ rtx val = operands[2];
+ rtx atomic_insn;
+
+ if (TARGET_HARD_ATOMIC || (TARGET_SH4A_ARCH && <MODE>mode == SImode))
+ atomic_insn = gen_atomic_exchange<mode>_hard (operands[0], addr, val);
+ else
+ atomic_insn = gen_atomic_exchange<mode>_soft (operands[0], addr, val);
+
+ emit_insn (atomic_insn);
+
if (<MODE>mode == QImode)
emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
operands[0]));
@@ -183,6 +308,49 @@
DONE;
})
+(define_insn "atomic_exchangesi_hard"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (mem:SI (match_operand:SI 1 "register_operand" "r")))
+ (set (mem:SI (match_dup 1))
+ (unspec:SI
+ [(match_operand:SI 2 "arith_operand" "rI08")] UNSPEC_ATOMIC))
+ (clobber (reg:SI R0_REG))]
+ "TARGET_ANY_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r0: movli.l @%1,r0" "\n"
+ " mov r0,%0" "\n"
+ " mov %2,r0" "\n"
+ " movco.l r0,@%1" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "10")])
+
+(define_insn "atomic_exchange<mode>_hard"
+ [(set (match_operand:I12 0 "register_operand" "=&r")
+ (mem:I12 (match_operand:SI 1 "register_operand" "r")))
+ (set (mem:I12 (match_dup 1))
+ (unspec:I12
+ [(match_operand:I12 2 "register_operand" "r")] UNSPEC_ATOMIC))
+ (clobber (reg:SI R0_REG))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=1"))]
+ "TARGET_HARD_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r mov #-4,%3" "\n"
+ " and %1,%3" "\n"
+ " xor %3,%1" "\n"
+ " add r15,%1" "\n"
+ " add #-4,%1" "\n"
+ "0: movli.l @%3,r0" "\n"
+ " mov.l r0,@-r15" "\n"
+ " mov.<i124suffix> @%1,%0" "\n"
+ " mov.<i124suffix> %2,@%1" "\n"
+ " mov.l @r15+,r0" "\n"
+ " movco.l r0,@%3" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "24")])
+
(define_insn "atomic_exchange<mode>_soft"
[(set (match_operand:I124 0 "register_operand" "=&u")
(mem:I124 (match_operand:SI 1 "register_operand" "u")))
@@ -193,7 +361,7 @@
(clobber (reg:SI R1_REG))]
"TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
{
- return "mova 1f,r0" "\n"
+ return "\r mova 1f,r0" "\n"
" .align 2" "\n"
" mov r15,r1" "\n"
" mov #(0f-1f),r15" "\n"
@@ -203,22 +371,32 @@
}
[(set_attr "length" "14")])
+;;------------------------------------------------------------------------------
+;; read - add|sub|or|and|xor|nand - write - return old value
+
(define_expand "atomic_fetch_<fetchop_name><mode>"
[(set (match_operand:I124 0 "register_operand" "")
(match_operand:I124 1 "memory_operand" ""))
(set (match_dup 1)
(unspec:I124
[(FETCHOP:I124 (match_dup 1)
- (match_operand:I124 2 "register_operand" ""))]
+ (match_operand:I124 2 "<fetchop_predicate>" ""))]
UNSPEC_ATOMIC))
(match_operand:SI 3 "const_int_operand" "")]
- "TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
+ "TARGET_ANY_ATOMIC && !TARGET_SHMEDIA"
{
- rtx addr;
+ rtx addr = force_reg (Pmode, XEXP (operands[1], 0));
+ rtx atomic_insn;
+
+ if (TARGET_HARD_ATOMIC || (TARGET_SH4A_ARCH && <MODE>mode == SImode))
+ atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_hard (operands[0], addr,
+ operands[2]);
+ else
+ atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft (operands[0],
+ addr,
+ operands[2]);
+ emit_insn (atomic_insn);
- addr = force_reg (Pmode, XEXP (operands[1], 0));
- emit_insn (gen_atomic_fetch_<fetchop_name><mode>_soft
- (operands[0], addr, operands[2]));
if (<MODE>mode == QImode)
emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
operands[0]));
@@ -228,6 +406,55 @@
DONE;
})
+(define_insn "atomic_fetch_<fetchop_name>si_hard"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (mem:SI (match_operand:SI 1 "register_operand" "r")))
+ (set (mem:SI (match_dup 1))
+ (unspec:SI
+ [(FETCHOP:SI (mem:SI (match_dup 1))
+ (match_operand:SI 2 "<fetchop_predicate>" "<fetchop_constraint>"))]
+ UNSPEC_ATOMIC))
+ (clobber (reg:SI R0_REG))]
+ "TARGET_ANY_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r0: movli.l @%1,r0" "\n"
+ " mov r0,%0" "\n"
+ " <fetchop_name> %2,r0" "\n"
+ " movco.l r0,@%1" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "10")])
+
+(define_insn "atomic_fetch_<fetchop_name><mode>_hard"
+ [(set (match_operand:I12 0 "register_operand" "=&r")
+ (mem:I12 (match_operand:SI 1 "register_operand" "r")))
+ (set (mem:I12 (match_dup 1))
+ (unspec:I12
+ [(FETCHOP:I12 (mem:I12 (match_dup 1))
+ (match_operand:I12 2 "<fetchop_predicate>" "<fetchop_constraint>"))]
+ UNSPEC_ATOMIC))
+ (clobber (reg:SI R0_REG))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=1"))]
+ "TARGET_HARD_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r mov #-4,%3" "\n"
+ " and %1,%3" "\n"
+ " xor %3,%1" "\n"
+ " add r15,%1" "\n"
+ " add #-4,%1" "\n"
+ "0: movli.l @%3,r0" "\n"
+ " mov.l r0,@-r15" "\n"
+ " mov.<i124suffix> @%1,r0" "\n"
+ " mov r0,%0" "\n"
+ " <fetchop_name> %2,r0" "\n"
+ " mov.<i124suffix> r0,@%1" "\n"
+ " mov.l @r15+,r0" "\n"
+ " movco.l r0,@%3" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "28")])
+
(define_insn "atomic_fetch_<fetchop_name><mode>_soft"
[(set (match_operand:I124 0 "register_operand" "=&u")
(mem:I124 (match_operand:SI 1 "register_operand" "u")))
@@ -241,7 +468,7 @@
(clobber (reg:SI R1_REG))]
"TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
{
- return "mova 1f,r0" "\n"
+ return "\r mova 1f,r0" "\n"
" .align 2" "\n"
" mov r15,r1" "\n"
" mov #(0f-1f),r15" "\n"
@@ -259,16 +486,23 @@
(set (match_dup 1)
(unspec:I124
[(not:I124 (and:I124 (match_dup 1)
- (match_operand:I124 2 "register_operand" "")))]
+ (match_operand:I124 2 "atomic_logical_operand" "")))]
UNSPEC_ATOMIC))
(match_operand:SI 3 "const_int_operand" "")]
- "TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
+ "TARGET_ANY_ATOMIC && !TARGET_SHMEDIA"
{
- rtx addr;
+ rtx addr = force_reg (Pmode, XEXP (operands[1], 0));
+ rtx atomic_insn;
+
+ if (TARGET_HARD_ATOMIC || (TARGET_SH4A_ARCH && <MODE>mode == SImode))
+ atomic_insn = gen_atomic_fetch_nand<mode>_hard (operands[0], addr,
+ operands[2]);
+ else
+ atomic_insn = gen_atomic_fetch_nand<mode>_soft (operands[0], addr,
+ operands[2]);
+
+ emit_insn (atomic_insn);
- addr = force_reg (Pmode, XEXP (operands[1], 0));
- emit_insn (gen_atomic_fetch_nand<mode>_soft
- (operands[0], addr, operands[2]));
if (<MODE>mode == QImode)
emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
operands[0]));
@@ -278,6 +512,57 @@
DONE;
})
+(define_insn "atomic_fetch_nandsi_hard"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (mem:SI (match_operand:SI 1 "register_operand" "r")))
+ (set (mem:SI (match_dup 1))
+ (unspec:SI
+ [(not:SI (and:SI (mem:SI (match_dup 1))
+ (match_operand:SI 2 "logical_operand" "rK08")))]
+ UNSPEC_ATOMIC))
+ (clobber (reg:SI R0_REG))]
+ "TARGET_ANY_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r0: movli.l @%1,r0" "\n"
+ " mov r0,%0" "\n"
+ " and %2,r0" "\n"
+ " not r0,r0" "\n"
+ " movco.l r0,@%1" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "12")])
+
+(define_insn "atomic_fetch_nand<mode>_hard"
+ [(set (match_operand:I12 0 "register_operand" "=&r")
+ (mem:I12 (match_operand:SI 1 "register_operand" "r")))
+ (set (mem:I12 (match_dup 1))
+ (unspec:I12
+ [(not:I12 (and:I12 (mem:I12 (match_dup 1))
+ (match_operand:I12 2 "logical_operand" "rK08")))]
+ UNSPEC_ATOMIC))
+ (clobber (reg:SI R0_REG))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=1"))]
+ "TARGET_HARD_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r mov #-4,%3" "\n"
+ " and %1,%3" "\n"
+ " xor %3,%1" "\n"
+ " add r15,%1" "\n"
+ " add #-4,%1" "\n"
+ "0: movli.l @%3,r0" "\n"
+ " mov.l r0,@-r15" "\n"
+ " mov.<i124suffix> @%1,r0" "\n"
+ " mov r0,%0" "\n"
+ " and %2,r0" "\n"
+ " not r0,r0" "\n"
+ " mov.<i124suffix> r0,@%1" "\n"
+ " mov.l @r15+,r0" "\n"
+ " movco.l r0,@%3" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "30")])
+
(define_insn "atomic_fetch_nand<mode>_soft"
[(set (match_operand:I124 0 "register_operand" "=&u")
(mem:I124 (match_operand:SI 1 "register_operand" "u")))
@@ -291,7 +576,7 @@
(clobber (reg:SI R1_REG))]
"TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
{
- return "mova 1f,r0" "\n"
+ return "\r mova 1f,r0" "\n"
" mov r15,r1" "\n"
" .align 2" "\n"
" mov #(0f-1f),r15" "\n"
@@ -304,23 +589,32 @@
}
[(set_attr "length" "20")])
+;;------------------------------------------------------------------------------
+;; read - add|sub|or|and|xor|nand - write - return new value
+
(define_expand "atomic_<fetchop_name>_fetch<mode>"
[(set (match_operand:I124 0 "register_operand" "")
(FETCHOP:I124
(match_operand:I124 1 "memory_operand" "")
- (match_operand:I124 2 "register_operand" "")))
+ (match_operand:I124 2 "<fetchop_predicate>" "")))
(set (match_dup 1)
(unspec:I124
[(FETCHOP:I124 (match_dup 1) (match_dup 2))]
UNSPEC_ATOMIC))
(match_operand:SI 3 "const_int_operand" "")]
- "TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
+ "TARGET_ANY_ATOMIC && !TARGET_SHMEDIA"
{
- rtx addr;
+ rtx addr = force_reg (Pmode, XEXP (operands[1], 0));
+ rtx atomic_insn;
+
+ if (TARGET_HARD_ATOMIC || (TARGET_SH4A_ARCH && <MODE>mode == SImode))
+ atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_hard (operands[0], addr,
+ operands[2]);
+ else
+ atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft (operands[0], addr,
+ operands[2]);
+ emit_insn (atomic_insn);
- addr = force_reg (Pmode, XEXP (operands[1], 0));
- emit_insn (gen_atomic_<fetchop_name>_fetch<mode>_soft
- (operands[0], addr, operands[2]));
if (<MODE>mode == QImode)
emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
operands[0]));
@@ -330,6 +624,56 @@
DONE;
})
+(define_insn "atomic_<fetchop_name>_fetchsi_hard"
+ [(set (match_operand:SI 0 "register_operand" "=&z")
+ (FETCHOP:SI
+ (mem:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "<fetchop_predicate>" "<fetchop_constraint>")))
+ (set (mem:SI (match_dup 1))
+ (unspec:SI
+ [(FETCHOP:SI (mem:SI (match_dup 1)) (match_dup 2))]
+ UNSPEC_ATOMIC))]
+ "TARGET_ANY_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r0: movli.l @%1,%0" "\n"
+ " <fetchop_name> %2,%0" "\n"
+ " movco.l %0,@%1" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "8")])
+
+(define_insn "atomic_<fetchop_name>_fetch<mode>_hard"
+ [(set (match_operand:I12 0 "register_operand" "=&r")
+ (FETCHOP:I12
+ (mem:I12 (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:I12 2 "<fetchop_predicate>" "<fetchop_constraint>")))
+ (set (mem:I12 (match_dup 1))
+ (unspec:I12
+ [(FETCHOP:I12 (mem:I12 (match_dup 1)) (match_dup 2))]
+ UNSPEC_ATOMIC))
+
+ (clobber (reg:SI R0_REG))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=1"))]
+ "TARGET_HARD_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r mov #-4,%3" "\n"
+ " and %1,%3" "\n"
+ " xor %3,%1" "\n"
+ " add r15,%1" "\n"
+ " add #-4,%1" "\n"
+ "0: movli.l @%3,r0" "\n"
+ " mov.l r0,@-r15" "\n"
+ " mov.<i124suffix> @%1,r0" "\n"
+ " <fetchop_name> %2,r0" "\n"
+ " mov.<i124suffix> r0,@%1" "\n"
+ " mov r0,%0" "\n"
+ " mov.l @r15+,r0" "\n"
+ " movco.l r0,@%3" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "28")])
+
(define_insn "atomic_<fetchop_name>_fetch<mode>_soft"
[(set (match_operand:I124 0 "register_operand" "=&u")
(FETCHOP:I124
@@ -343,7 +687,7 @@
(clobber (reg:SI R1_REG))]
"TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
{
- return "mova 1f,r0" "\n"
+ return "\r mova 1f,r0" "\n"
" mov r15,r1" "\n"
" .align 2" "\n"
" mov #(0f-1f),r15" "\n"
@@ -358,19 +702,25 @@
[(set (match_operand:I124 0 "register_operand" "")
(not:I124 (and:I124
(match_operand:I124 1 "memory_operand" "")
- (match_operand:I124 2 "register_operand" ""))))
+ (match_operand:I124 2 "atomic_logical_operand" ""))))
(set (match_dup 1)
(unspec:I124
[(not:I124 (and:I124 (match_dup 1) (match_dup 2)))]
UNSPEC_ATOMIC))
(match_operand:SI 3 "const_int_operand" "")]
- "TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
+ "TARGET_ANY_ATOMIC && !TARGET_SHMEDIA"
{
- rtx addr;
+ rtx addr = force_reg (Pmode, XEXP (operands[1], 0));
+ rtx atomic_insn;
+
+ if (TARGET_HARD_ATOMIC || (TARGET_SH4A_ARCH && <MODE>mode == SImode))
+ atomic_insn = gen_atomic_nand_fetch<mode>_hard (operands[0], addr,
+ operands[2]);
+ else
+ atomic_insn = gen_atomic_nand_fetch<mode>_soft (operands[0], addr,
+ operands[2]);
+ emit_insn (atomic_insn);
- addr = force_reg (Pmode, XEXP (operands[1], 0));
- emit_insn (gen_atomic_nand_fetch<mode>_soft
- (operands[0], addr, operands[2]));
if (<MODE>mode == QImode)
emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]),
operands[0]));
@@ -380,6 +730,54 @@
DONE;
})
+(define_insn "atomic_nand_fetchsi_hard"
+ [(set (match_operand:SI 0 "register_operand" "=&z")
+ (not:SI (and:SI (mem:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "logical_operand" "rK08"))))
+ (set (mem:SI (match_dup 1))
+ (unspec:SI
+ [(not:SI (and:SI (mem:SI (match_dup 1)) (match_dup 2)))]
+ UNSPEC_ATOMIC))]
+ "TARGET_ANY_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r0: movli.l @%1,%0" "\n"
+ " and %2,%0" "\n"
+ " not %0,%0" "\n"
+ " movco.l %0,@%1" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "10")])
+
+(define_insn "atomic_nand_fetch<mode>_hard"
+ [(set (match_operand:I12 0 "register_operand" "=&r")
+ (not:I12 (and:I12 (mem:I12 (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:I12 2 "logical_operand" "rK08"))))
+ (set (mem:I12 (match_dup 1))
+ (unspec:I12
+ [(not:I12 (and:I12 (mem:I12 (match_dup 1)) (match_dup 2)))]
+ UNSPEC_ATOMIC))
+ (clobber (reg:SI R0_REG))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=1"))]
+ "TARGET_HARD_ATOMIC && TARGET_SH4A_ARCH"
+{
+ return "\r mov #-4,%3" "\n"
+ " and %1,%3" "\n"
+ " xor %3,%1" "\n"
+ " add r15,%1" "\n"
+ " add #-4,%1" "\n"
+ "0: movli.l @%3,r0" "\n"
+ " mov.l r0,@-r15" "\n"
+ " mov.<i124suffix> @%1,r0" "\n"
+ " and %2,r0" "\n"
+ " not r0,%0" "\n"
+ " mov.<i124suffix> %0,@%1" "\n"
+ " mov.l @r15+,r0" "\n"
+ " movco.l r0,@%3" "\n"
+ " bf 0b";
+}
+ [(set_attr "length" "28")])
+
(define_insn "atomic_nand_fetch<mode>_soft"
[(set (match_operand:I124 0 "register_operand" "=&u")
(not:I124 (and:I124
@@ -393,7 +791,7 @@
(clobber (reg:SI R1_REG))]
"TARGET_SOFT_ATOMIC && !TARGET_SHMEDIA"
{
- return "mova 1f,r0" "\n"
+ return "\r mova 1f,r0" "\n"
" .align 2" "\n"
" mov r15,r1" "\n"
" mov #(0f-1f),r15" "\n"
@@ -405,11 +803,14 @@
}
[(set_attr "length" "18")])
+;;------------------------------------------------------------------------------
+;; read - test against zero - or with 0x80 - write - return test result
+
(define_expand "atomic_test_and_set"
[(match_operand:SI 0 "register_operand" "") ;; bool result output
(match_operand:QI 1 "memory_operand" "") ;; memory
(match_operand:SI 2 "const_int_operand" "")] ;; model
- "(TARGET_SOFT_ATOMIC || TARGET_ENABLE_TAS) && !TARGET_SHMEDIA"
+ "(TARGET_ANY_ATOMIC || TARGET_ENABLE_TAS) && !TARGET_SHMEDIA"
{
rtx addr = force_reg (Pmode, XEXP (operands[1], 0));
@@ -417,11 +818,13 @@
emit_insn (gen_tasb (addr));
else
{
- rtx val;
-
- val = gen_int_mode (targetm.atomic_test_and_set_trueval, QImode);
+ rtx val = gen_int_mode (targetm.atomic_test_and_set_trueval, QImode);
val = force_reg (QImode, val);
- emit_insn (gen_atomic_test_and_set_soft (addr, val));
+
+ if (TARGET_HARD_ATOMIC)
+ emit_insn (gen_atomic_test_and_set_hard (addr, val));
+ else
+ emit_insn (gen_atomic_test_and_set_soft (addr, val));
}
/* The result of the test op is the inverse of what we are
@@ -452,7 +855,7 @@
(clobber (reg:SI R1_REG))]
"TARGET_SOFT_ATOMIC && !TARGET_ENABLE_TAS && !TARGET_SHMEDIA"
{
- return "mova 1f,r0" "\n"
+ return "\r mova 1f,r0" "\n"
" .align 2" "\n"
" mov r15,r1" "\n"
" mov #(0f-1f),r15" "\n"
@@ -463,3 +866,31 @@
}
[(set_attr "length" "16")])
+(define_insn "atomic_test_and_set_hard"
+ [(set (reg:SI T_REG)
+ (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r"))
+ (const_int 0)))
+ (set (mem:QI (match_dup 0))
+ (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC))
+ (clobber (reg:SI R0_REG))
+ (clobber (match_scratch:SI 2 "=&r"))
+ (clobber (match_scratch:SI 3 "=&r"))
+ (clobber (match_scratch:SI 4 "=0"))]
+ "TARGET_HARD_ATOMIC && !TARGET_ENABLE_TAS && TARGET_SH4A_ARCH"
+{
+ return "\r mov #-4,%2" "\n"
+ " and %0,%2" "\n"
+ " xor %2,%0" "\n"
+ " add r15,%0" "\n"
+ " add #-4,%0" "\n"
+ "0: movli.l @%2,r0" "\n"
+ " mov.l r0,@-r15" "\n"
+ " mov.b @%0,%3" "\n"
+ " mov.b %1,@%0" "\n"
+ " mov.l @r15+,r0" "\n"
+ " movco.l r0,@%2" "\n"
+ " bf 0b" "\n"
+ " tst %3,%3";
+}
+ [(set_attr "length" "26")])
+
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index 4cb381e60e9..54fce8f6ddb 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -2738,7 +2738,7 @@ emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
}
else
{
- this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
+ this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
/* Operand 0 is the return value. We'll copy it out later. */
if (i > 0)
@@ -7431,7 +7431,7 @@ sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
}
else
{
- slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
emit_move_insn (slot0, x);
}
@@ -7444,7 +7444,7 @@ sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
}
else
{
- slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
+ slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
emit_move_insn (slot1, y);
}
@@ -11631,7 +11631,7 @@ sparc_expand_vector_init (rtx target, rtx vals)
}
}
- mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
for (i = 0; i < n_elts; i++)
emit_move_insn (adjust_address_nv (mem, inner_mode,
i * GET_MODE_SIZE (inner_mode)),
diff --git a/gcc/config/sparc/sync.md b/gcc/config/sparc/sync.md
index d07d572c614..d11f6636490 100644
--- a/gcc/config/sparc/sync.md
+++ b/gcc/config/sparc/sync.md
@@ -45,7 +45,7 @@
})
;; A compiler-only memory barrier. Generic code, when checking for the
-;; existance of various named patterns, uses asm("":::"memory") when we
+;; existence of various named patterns, uses asm("":::"memory") when we
;; don't need an actual instruction. Here, it's easiest to pretend that
;; membar 0 is such a barrier. Further, this gives us a nice hook to
;; ignore all such barriers on Sparc V7.
diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c
index dc5ca45dd57..b81bf5e8f2d 100644
--- a/gcc/config/spu/spu.c
+++ b/gcc/config/spu/spu.c
@@ -35,7 +35,6 @@
#include "function.h"
#include "output.h"
#include "basic-block.h"
-#include "integrate.h"
#include "diagnostic-core.h"
#include "ggc.h"
#include "hashtab.h"
@@ -2870,7 +2869,7 @@ spu_machine_dependent_reorg (void)
prop = prev;
/* If this is the JOIN block of a simple IF-THEN then
- propogate the hint to the HEADER block. */
+ propagate the hint to the HEADER block. */
else if (prev && prev2
&& EDGE_COUNT (bb->preds) == 2
&& EDGE_COUNT (prev->preds) == 1
@@ -3124,7 +3123,7 @@ spu_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
prev_priority = INSN_PRIORITY (insn);
}
- /* Always try issueing more insns. spu_sched_reorder will decide
+ /* Always try issuing more insns. spu_sched_reorder will decide
when the cycle should be advanced. */
return 1;
}
@@ -3231,7 +3230,7 @@ spu_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
used to effect it. */
if (in_spu_reorg && spu_dual_nops < 10)
{
- /* When we are at an even address and we are not issueing nops to
+ /* When we are at an even address and we are not issuing nops to
improve scheduling then we need to advance the cycle. */
if ((spu_sched_length & 7) == 0 && prev_clock_var == clock
&& (spu_dual_nops == 0
diff --git a/gcc/config/spu/spu.md b/gcc/config/spu/spu.md
index 3178a6df593..03ed4575591 100644
--- a/gcc/config/spu/spu.md
+++ b/gcc/config/spu/spu.md
@@ -4209,7 +4209,7 @@ selb\t%0,%4,%0,%3"
""
{ spu_expand_prologue (); DONE; })
-;; "blockage" is only emited in epilogue. This is what it took to
+;; "blockage" is only emitted in epilogue. This is what it took to
;; make "basic block reordering" work with the insns sequence
;; generated by the spu_expand_epilogue (taken from mips.md)
diff --git a/gcc/config/spu/t-spu-elf b/gcc/config/spu/t-spu-elf
index 50c8d0353f5..84fbbb28ad2 100644
--- a/gcc/config/spu/t-spu-elf
+++ b/gcc/config/spu/t-spu-elf
@@ -22,7 +22,7 @@ spu.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(REGS_H) hard-reg-set.h \
real.h insn-config.h conditions.h insn-attr.h flags.h $(RECOG_H) \
$(OBSTACK_H) $(TREE_H) $(EXPR_H) $(OPTABS_H) except.h function.h \
- output.h $(BASIC_BLOCK_H) $(INTEGRATE_H) $(GGC_H) $(HASHTAB_H) \
+ output.h $(BASIC_BLOCK_H) $(GGC_H) $(HASHTAB_H) \
$(TM_P_H) $(TARGET_H) $(TARGET_DEF_H) langhooks.h reload.h cfglayout.h \
$(srcdir)/config/spu/spu-protos.h \
$(srcdir)/config/spu/spu-builtins.def
diff --git a/gcc/config/tilegx/tilegx.c b/gcc/config/tilegx/tilegx.c
index a23e193ee3a..7ca4eb68be4 100644
--- a/gcc/config/tilegx/tilegx.c
+++ b/gcc/config/tilegx/tilegx.c
@@ -37,7 +37,7 @@
#include "tm-constrs.h"
#include "target.h"
#include "target-def.h"
-#include "integrate.h"
+#include "function.h"
#include "dwarf2.h"
#include "timevar.h"
#include "gimple.h"
diff --git a/gcc/config/tilepro/tilepro.c b/gcc/config/tilepro/tilepro.c
index 2b18b4f6622..ce28d9e8221 100644
--- a/gcc/config/tilepro/tilepro.c
+++ b/gcc/config/tilepro/tilepro.c
@@ -38,7 +38,7 @@
#include "tm-constrs.h"
#include "target.h"
#include "target-def.h"
-#include "integrate.h"
+#include "function.h"
#include "dwarf2.h"
#include "timevar.h"
#include "gimple.h"
diff --git a/gcc/config/v850/v850.c b/gcc/config/v850/v850.c
index e2a72b0b3d5..186327da7ac 100644
--- a/gcc/config/v850/v850.c
+++ b/gcc/config/v850/v850.c
@@ -37,7 +37,6 @@
#include "function.h"
#include "diagnostic-core.h"
#include "ggc.h"
-#include "integrate.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
diff --git a/gcc/config/vms/vms.c b/gcc/config/vms/vms.c
index d4ebd18730b..d23e8a8456a 100644
--- a/gcc/config/vms/vms.c
+++ b/gcc/config/vms/vms.c
@@ -99,12 +99,12 @@ static const struct vms_crtl_name vms_crtl_names[] =
#define NBR_CRTL_NAMES (sizeof (vms_crtl_names) / sizeof (*vms_crtl_names))
-/* List of aliased identifiers. They must be persistant accross gc. */
+/* List of aliased identifiers. They must be persistent across gc. */
static GTY(()) VEC(tree,gc) *aliases_id;
/* Add a CRTL translation. This simply use the transparent alias
- mechanism, which is platform independant and works with the
+ mechanism, which is platform independent and works with the
#pragma extern_prefix (which set the assembler name). */
static void
diff --git a/gcc/config/vxworks-dummy.h b/gcc/config/vxworks-dummy.h
index e3ea6ad6a98..e2ea7fa4d64 100644
--- a/gcc/config/vxworks-dummy.h
+++ b/gcc/config/vxworks-dummy.h
@@ -22,7 +22,7 @@ a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-/* True if we're targetting VxWorks. */
+/* True if we're targeting VxWorks. */
#ifndef TARGET_VXWORKS
#define TARGET_VXWORKS 0
#endif
diff --git a/gcc/config/vxworks.h b/gcc/config/vxworks.h
index 04ee945d650..000de3604f6 100644
--- a/gcc/config/vxworks.h
+++ b/gcc/config/vxworks.h
@@ -20,7 +20,7 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
-/* Assert that we are targetting VxWorks. */
+/* Assert that we are targeting VxWorks. */
#undef TARGET_VXWORKS
#define TARGET_VXWORKS 1