summaryrefslogtreecommitdiff
path: root/gcc/config/mips/mips.h
diff options
context:
space:
mode:
authorRichard Sandiford <rsandifo@nildram.co.uk>2007-10-24 17:46:39 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2007-10-24 17:46:39 +0000
commita1c6b246b55b9c50aaf5d15e180d13f01b903cf5 (patch)
treea8ba7bae7f0a9ef3a148c5177da7c152b6e2e57c /gcc/config/mips/mips.h
parent32e520abfcfb27d1d0949b1fe8fc8217678ecea3 (diff)
downloadgcc-a1c6b246b55b9c50aaf5d15e180d13f01b903cf5.tar.gz
mips.h (MOVE_MAX): Use UNITS_PER_WORD and describe MIPS-specific implementation details.
gcc/ * config/mips/mips.h (MOVE_MAX): Use UNITS_PER_WORD and describe MIPS-specific implementation details. (MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER): New macro. (MIPS_MAX_MOVE_BYTES_STRAIGHT): Likewise. (MOVE_RATIO): Define to MIPS_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD for targets with movmemsi. (MOVE_BY_PIECES_P): Define. * config/mips/mips.c (MAX_MOVE_REGS, MAX_MOVE_BYTES): Delete. (mips_block_move_loop): Add a bytes_per_iter argument. (mips_expand_block_move): Use MIPS_MAX_MOVE_BYTES_STRAIGHT. Update call to mips_block_move_loop. From-SVN: r129605
Diffstat (limited to 'gcc/config/mips/mips.h')
-rw-r--r--gcc/config/mips/mips.h69
1 files changed, 49 insertions, 20 deletions
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index e1794c2830d..b4778a8577d 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -2338,9 +2338,10 @@ typedef struct mips_args {
#define DEFAULT_SIGNED_CHAR 1
#endif
-/* Max number of bytes we can move from memory to memory
- in one reasonably fast instruction. */
-#define MOVE_MAX (TARGET_64BIT ? 8 : 4)
+/* Although LDC1 and SDC1 provide 64-bit moves on 32-bit targets,
+ we generally don't want to use them for copying arbitrary data.
+ A single N-word move is usually the same cost as N single-word moves. */
+#define MOVE_MAX UNITS_PER_WORD
#define MAX_MOVE_MAX 8
/* Define this macro as a C expression which is nonzero if
@@ -2769,6 +2770,18 @@ while (0)
#undef PTRDIFF_TYPE
#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
+/* The maximum number of bytes that can be copied by one iteration of
+ a movmemsi loop; see mips_block_move_loop. */
+#define MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER \
+ (UNITS_PER_WORD * 4)
+
+/* The maximum number of bytes that can be copied by a straight-line
+ implementation of movmemsi; see mips_block_move_straight. We want
+ to make sure that any loop-based implementation will iterate at
+ least twice. */
+#define MIPS_MAX_MOVE_BYTES_STRAIGHT \
+ (MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
+
/* The base cost of a memcpy call, for MOVE_RATIO and friends. These
values were determined experimentally by benchmarking with CSiBE.
In theory, the call overhead is higher for TARGET_ABICALLS (especially
@@ -2778,23 +2791,39 @@ while (0)
#define MIPS_CALL_RATIO 8
-/* Define MOVE_RATIO to encourage use of movmemsi when enabled,
- since it should always generate code at least as good as
- move_by_pieces(). But when inline movmemsi pattern is disabled
- (i.e., with -mips16 or -mmemcpy), instead use a value approximating
- the length of a memcpy call sequence, so that move_by_pieces will
- generate inline code if it is shorter than a function call.
- Since move_by_pieces_ninsns() counts memory-to-memory moves, but
- we'll have to generate a load/store pair for each, halve the value of
- MIPS_CALL_RATIO to take that into account.
- The default value for MOVE_RATIO when HAVE_movmemsi is true is 2.
- There is no point to setting it to less than this to try to disable
- move_by_pieces entirely, because that also disables some desirable
- tree-level optimizations, specifically related to optimizing a
- one-byte string copy into a simple move byte operation. */
-
-#define MOVE_RATIO \
- ((TARGET_MIPS16 || TARGET_MEMCPY) ? MIPS_CALL_RATIO / 2 : 2)
+/* Any loop-based implementation of movmemsi will have at least
+ MIPS_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
+ moves, so allow individual copies of fewer elements.
+
+ When movmemsi is not available, use a value approximating
+ the length of a memcpy call sequence, so that move_by_pieces
+ will generate inline code if it is shorter than a function call.
+ Since move_by_pieces_ninsns counts memory-to-memory moves, but
+ we'll have to generate a load/store pair for each, halve the
+ value of MIPS_CALL_RATIO to take that into account. */
+
+#define MOVE_RATIO \
+ (HAVE_movmemsi \
+ ? MIPS_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
+ : MIPS_CALL_RATIO / 2)
+
+/* movmemsi is meant to generate code that is at least as good as
+ move_by_pieces. However, movmemsi effectively uses a by-pieces
+ implementation both for moves smaller than a word and for word-aligned
+ moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT bytes. We should
+ allow the tree-level optimisers to do such moves by pieces, as it
+ often exposes other optimization opportunities. We might as well
+ continue to use movmemsi at the rtl level though, as it produces
+ better code when scheduling is disabled (such as at -O). */
+
+#define MOVE_BY_PIECES_P(SIZE, ALIGN) \
+ (HAVE_movmemsi \
+ ? (!currently_expanding_to_rtl \
+ && ((ALIGN) < BITS_PER_WORD \
+ ? (SIZE) < UNITS_PER_WORD \
+ : (SIZE) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)) \
+ : (move_by_pieces_ninsns (SIZE, ALIGN, MOVE_MAX_PIECES + 1) \
+ < (unsigned int) MOVE_RATIO))
/* For CLEAR_RATIO, when optimizing for size, give a better estimate
of the length of a memset call, but use the default otherwise. */