summaryrefslogtreecommitdiff
path: root/gcc/stor-layout.c
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2013-04-11 09:13:11 +0000
committer <>2014-04-23 12:05:38 +0000
commit6af3fdec2262dd94954acc5e426ef71cbd4521d3 (patch)
tree9be02de9a80f7935892a2d03741adee44723e65d /gcc/stor-layout.c
parent19be2b4342ac32e9edc78ce6fed8f61b63ae98d1 (diff)
downloadgcc-tarball-6af3fdec2262dd94954acc5e426ef71cbd4521d3.tar.gz
Imported from /home/lorry/working-area/delta_gcc-tarball/gcc-4.7.3.tar.bz2.gcc-4.7.3
Diffstat (limited to 'gcc/stor-layout.c')
-rw-r--r--gcc/stor-layout.c574
1 files changed, 383 insertions, 191 deletions
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index a225dbfbd8..944f2d2cef 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -1,7 +1,7 @@
/* C-compiler utilities for types and variables storage layout
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
- Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ 2011 Free Software Foundation, Inc.
This file is part of GCC.
@@ -65,10 +65,6 @@ static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
#endif
extern void debug_rli (record_layout_info);
-/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
-
-static GTY(()) VEC(tree,gc) *pending_sizes;
-
/* Show that REFERENCE_TYPES are internal and should use address_mode.
Called only by front end. */
@@ -78,48 +74,12 @@ internal_reference_types (void)
reference_types_internal = 1;
}
-/* Get a VEC of all the objects put on the pending sizes list. */
-
-VEC(tree,gc) *
-get_pending_sizes (void)
-{
- VEC(tree,gc) *chain = pending_sizes;
-
- pending_sizes = 0;
- return chain;
-}
-
-/* Add EXPR to the pending sizes list. */
-
-void
-put_pending_size (tree expr)
-{
- /* Strip any simple arithmetic from EXPR to see if it has an underlying
- SAVE_EXPR. */
- expr = skip_simple_arithmetic (expr);
-
- if (TREE_CODE (expr) == SAVE_EXPR)
- VEC_safe_push (tree, gc, pending_sizes, expr);
-}
-
-/* Put a chain of objects into the pending sizes list, which must be
- empty. */
-
-void
-put_pending_sizes (VEC(tree,gc) *chain)
-{
- gcc_assert (!pending_sizes);
- pending_sizes = chain;
-}
-
/* Given a size SIZE that may not be a constant, return a SAVE_EXPR
to serve as the actual size-expression for a type or decl. */
tree
variable_size (tree size)
{
- tree save;
-
/* Obviously. */
if (TREE_CONSTANT (size))
return size;
@@ -129,42 +89,13 @@ variable_size (tree size)
if (CONTAINS_PLACEHOLDER_P (size))
return self_referential_size (size);
- /* If the language-processor is to take responsibility for variable-sized
- items (e.g., languages which have elaboration procedures like Ada),
- just return SIZE unchanged. */
- if (lang_hooks.decls.global_bindings_p () < 0)
- return size;
-
- size = save_expr (size);
-
- /* If an array with a variable number of elements is declared, and
- the elements require destruction, we will emit a cleanup for the
- array. That cleanup is run both on normal exit from the block
- and in the exception-handler for the block. Normally, when code
- is used in both ordinary code and in an exception handler it is
- `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
- not wish to do that here; the array-size is the same in both
- places. */
- save = skip_simple_arithmetic (size);
-
- if (cfun && cfun->dont_save_pending_sizes_p)
- /* The front-end doesn't want us to keep a list of the expressions
- that determine sizes for variable size objects. Trust it. */
- return size;
-
+ /* If we are in the global binding level, we can't make a SAVE_EXPR
+ since it may end up being shared across functions, so it is up
+ to the front-end to deal with this case. */
if (lang_hooks.decls.global_bindings_p ())
- {
- if (TREE_CONSTANT (size))
- error ("type size can%'t be explicitly evaluated");
- else
- error ("variable-size type declared outside of any function");
-
- return size_one_node;
- }
-
- put_pending_size (save);
+ return size;
- return size;
+ return save_expr (size);
}
/* An array of functions used for self-referential size computation. */
@@ -246,6 +177,9 @@ copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
else if (code == SAVE_EXPR)
return error_mark_node;
+ else if (code == STATEMENT_LIST)
+ gcc_unreachable ();
+
return copy_tree_r (tp, walk_subtrees, data);
}
@@ -546,6 +480,34 @@ get_mode_alignment (enum machine_mode mode)
return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
}
+/* Return the natural mode of an array, given that it is SIZE bytes in
+ total and has elements of type ELEM_TYPE. */
+
+static enum machine_mode
+mode_for_array (tree elem_type, tree size)
+{
+ tree elem_size;
+ unsigned HOST_WIDE_INT int_size, int_elem_size;
+ bool limit_p;
+
+ /* One-element arrays get the component type's mode. */
+ elem_size = TYPE_SIZE (elem_type);
+ if (simple_cst_equal (size, elem_size))
+ return TYPE_MODE (elem_type);
+
+ limit_p = true;
+ if (host_integerp (size, 1) && host_integerp (elem_size, 1))
+ {
+ int_size = tree_low_cst (size, 1);
+ int_elem_size = tree_low_cst (elem_size, 1);
+ if (int_elem_size > 0
+ && int_size % int_elem_size == 0
+ && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
+ int_size / int_elem_size))
+ limit_p = false;
+ }
+ return mode_for_size_tree (size, MODE_INT, limit_p);
+}
/* Subroutine of layout_decl: Force alignment required for the data type.
But if the decl itself wants greater alignment, don't override that. */
@@ -660,12 +622,13 @@ layout_decl (tree decl, unsigned int known_align)
/* See if we can use an ordinary integer mode for a bit-field.
Conditions are: a fixed size that is correct for another mode,
occupying a complete byte or bytes on proper boundary,
- and not volatile or not -fstrict-volatile-bitfields. */
+ and not -fstrict-volatile-bitfields. If the latter is set,
+ we unfortunately can't check TREE_THIS_VOLATILE, as a cast
+ may make a volatile object later. */
if (TYPE_SIZE (type) != 0
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
&& GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
- && !(TREE_THIS_VOLATILE (decl)
- && flag_strict_volatile_bitfields > 0))
+ && flag_strict_volatile_bitfields <= 0)
{
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
@@ -973,7 +936,8 @@ update_alignment_for_field (record_layout_info rli, tree field,
applies if there was an immediately prior, nonzero-size
bitfield. (That's the way it is, experimentally.) */
if ((!is_bitfield && !DECL_PACKED (field))
- || (!integer_zerop (DECL_SIZE (field))
+ || ((DECL_SIZE (field) == NULL_TREE
+ || !integer_zerop (DECL_SIZE (field)))
? !DECL_PACKED (field)
: (rli->prev_field
&& DECL_BIT_FIELD_TYPE (rli->prev_field)
@@ -1177,15 +1141,14 @@ place_field (record_layout_info rli, tree field)
}
/* Does this field automatically have alignment it needs by virtue
- of the fields that precede it and the record's own alignment?
- We already align ms_struct fields, so don't re-align them. */
- if (known_align < desired_align
- && !targetm.ms_bitfield_layout_p (rli->t))
+ of the fields that precede it and the record's own alignment? */
+ if (known_align < desired_align)
{
/* No, we need to skip space before this field.
Bump the cumulative size to multiple of field alignment. */
- if (DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
+ if (!targetm.ms_bitfield_layout_p (rli->t)
+ && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
warning (OPT_Wpadded, "padding struct to align %q+D", field);
/* If the alignment is still within offset_align, just align
@@ -1207,7 +1170,8 @@ place_field (record_layout_info rli, tree field)
if (! TREE_CONSTANT (rli->offset))
rli->offset_align = desired_align;
-
+ if (targetm.ms_bitfield_layout_p (rli->t))
+ rli->prev_field = NULL;
}
/* Handle compatibility with PCC. Note that if the record has any
@@ -1758,6 +1722,221 @@ finalize_type_size (tree type)
}
}
+/* Return a new underlying object for a bitfield started with FIELD. */
+
+static tree
+start_bitfield_representative (tree field)
+{
+ tree repr = make_node (FIELD_DECL);
+ DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
+ /* Force the representative to begin at a BITS_PER_UNIT aligned
+ boundary - C++ may use tail-padding of a base object to
+ continue packing bits so the bitfield region does not start
+ at bit zero (see g++.dg/abi/bitfield5.C for example).
+ Unallocated bits may happen for other reasons as well,
+ for example Ada which allows explicit bit-granular structure layout. */
+ DECL_FIELD_BIT_OFFSET (repr)
+ = size_binop (BIT_AND_EXPR,
+ DECL_FIELD_BIT_OFFSET (field),
+ bitsize_int (~(BITS_PER_UNIT - 1)));
+ SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
+ DECL_SIZE (repr) = DECL_SIZE (field);
+ DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
+ DECL_PACKED (repr) = DECL_PACKED (field);
+ DECL_CONTEXT (repr) = DECL_CONTEXT (field);
+ return repr;
+}
+
+/* Finish up a bitfield group that was started by creating the underlying
+ object REPR with the last field in the bitfield group FIELD. */
+
+static void
+finish_bitfield_representative (tree repr, tree field)
+{
+ unsigned HOST_WIDE_INT bitsize, maxbitsize;
+ enum machine_mode mode;
+ tree nextf, size;
+
+ size = size_diffop (DECL_FIELD_OFFSET (field),
+ DECL_FIELD_OFFSET (repr));
+ gcc_assert (host_integerp (size, 1));
+ bitsize = (tree_low_cst (size, 1) * BITS_PER_UNIT
+ + tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)
+ + tree_low_cst (DECL_SIZE (field), 1));
+
+ /* Round up bitsize to multiples of BITS_PER_UNIT. */
+ bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
+
+ /* Now nothing tells us how to pad out bitsize ... */
+ nextf = DECL_CHAIN (field);
+ while (nextf && TREE_CODE (nextf) != FIELD_DECL)
+ nextf = DECL_CHAIN (nextf);
+ if (nextf)
+ {
+ tree maxsize;
+ /* If there was an error, the field may be not laid out
+ correctly. Don't bother to do anything. */
+ if (TREE_TYPE (nextf) == error_mark_node)
+ return;
+ maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
+ DECL_FIELD_OFFSET (repr));
+ if (host_integerp (maxsize, 1))
+ {
+ maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
+ + tree_low_cst (DECL_FIELD_BIT_OFFSET (nextf), 1)
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
+ /* If the group ends within a bitfield nextf does not need to be
+ aligned to BITS_PER_UNIT. Thus round up. */
+ maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
+ }
+ else
+ maxbitsize = bitsize;
+ }
+ else
+ {
+ /* ??? If you consider that tail-padding of this struct might be
+ re-used when deriving from it we cannot really do the following
+ and thus need to set maxsize to bitsize? Also we cannot
+ generally rely on maxsize to fold to an integer constant, so
+ use bitsize as fallback for this case. */
+ tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)),
+ DECL_FIELD_OFFSET (repr));
+ if (host_integerp (maxsize, 1))
+ maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
+ - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
+ else
+ maxbitsize = bitsize;
+ }
+
+ /* Only if we don't artificially break up the representative in
+ the middle of a large bitfield with different possibly
+ overlapping representatives. And all representatives start
+ at byte offset. */
+ gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
+
+ /* Find the smallest nice mode to use. */
+ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_BITSIZE (mode) >= bitsize)
+ break;
+ if (mode != VOIDmode
+ && (GET_MODE_BITSIZE (mode) > maxbitsize
+ || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE))
+ mode = VOIDmode;
+
+ if (mode == VOIDmode)
+ {
+ /* We really want a BLKmode representative only as a last resort,
+ considering the member b in
+ struct { int a : 7; int b : 17; int c; } __attribute__((packed));
+ Otherwise we simply want to split the representative up
+ allowing for overlaps within the bitfield region as required for
+ struct { int a : 7; int b : 7;
+ int c : 10; int d; } __attribute__((packed));
+ [0, 15] HImode for a and b, [8, 23] HImode for c. */
+ DECL_SIZE (repr) = bitsize_int (bitsize);
+ DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
+ DECL_MODE (repr) = BLKmode;
+ TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
+ bitsize / BITS_PER_UNIT);
+ }
+ else
+ {
+ unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
+ DECL_SIZE (repr) = bitsize_int (modesize);
+ DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
+ DECL_MODE (repr) = mode;
+ TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
+ }
+
+ /* Remember whether the bitfield group is at the end of the
+ structure or not. */
+ DECL_CHAIN (repr) = nextf;
+}
+
+/* Compute and set FIELD_DECLs for the underlying objects we should
+ use for bitfield access for the structure laid out with RLI. */
+
+static void
+finish_bitfield_layout (record_layout_info rli)
+{
+ tree field, prev;
+ tree repr = NULL_TREE;
+
+ /* Unions would be special, for the ease of type-punning optimizations
+ we could use the underlying type as hint for the representative
+ if the bitfield would fit and the representative would not exceed
+ the union in size. */
+ if (TREE_CODE (rli->t) != RECORD_TYPE)
+ return;
+
+ for (prev = NULL_TREE, field = TYPE_FIELDS (rli->t);
+ field; field = DECL_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ /* In the C++ memory model, consecutive bit fields in a structure are
+ considered one memory location and updating a memory location
+ may not store into adjacent memory locations. */
+ if (!repr
+ && DECL_BIT_FIELD_TYPE (field))
+ {
+ /* Start new representative. */
+ repr = start_bitfield_representative (field);
+ }
+ else if (repr
+ && ! DECL_BIT_FIELD_TYPE (field))
+ {
+ /* Finish off new representative. */
+ finish_bitfield_representative (repr, prev);
+ repr = NULL_TREE;
+ }
+ else if (DECL_BIT_FIELD_TYPE (field))
+ {
+ gcc_assert (repr != NULL_TREE);
+
+ /* Zero-size bitfields finish off a representative and
+ do not have a representative themselves. This is
+ required by the C++ memory model. */
+ if (integer_zerop (DECL_SIZE (field)))
+ {
+ finish_bitfield_representative (repr, prev);
+ repr = NULL_TREE;
+ }
+
+ /* We assume that either DECL_FIELD_OFFSET of the representative
+ and each bitfield member is a constant or they are equal.
+ This is because we need to be able to compute the bit-offset
+ of each field relative to the representative in get_bit_range
+ during RTL expansion.
+ If these constraints are not met, simply force a new
+ representative to be generated. That will at most
+ generate worse code but still maintain correctness with
+ respect to the C++ memory model. */
+ else if (!((host_integerp (DECL_FIELD_OFFSET (repr), 1)
+ && host_integerp (DECL_FIELD_OFFSET (field), 1))
+ || operand_equal_p (DECL_FIELD_OFFSET (repr),
+ DECL_FIELD_OFFSET (field), 0)))
+ {
+ finish_bitfield_representative (repr, prev);
+ repr = start_bitfield_representative (field);
+ }
+ }
+ else
+ continue;
+
+ if (repr)
+ DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
+
+ prev = field;
+ }
+
+ if (repr)
+ finish_bitfield_representative (repr, prev);
+}
+
/* Do all of the work required to layout the type indicated by RLI,
once the fields have been laid out. This function will call `free'
for RLI, unless FREE_P is false. Passing a value other than false
@@ -1778,6 +1957,9 @@ finish_record_layout (record_layout_info rli, int free_p)
/* Perform any last tweaks to the TYPE_SIZE, etc. */
finalize_type_size (rli->t);
+ /* Compute bitfield representatives. */
+ finish_bitfield_layout (rli);
+
/* Propagate TYPE_PACKED to variants. With C++ templates,
handle_packed_attribute is too early to do this. */
for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
@@ -1922,13 +2104,21 @@ layout_type (tree type)
TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
TYPE_SIZE_UNIT (innertype),
- size_int (nunits), 0);
+ size_int (nunits));
TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
- bitsize_int (nunits), 0);
-
- /* Always naturally align vectors. This prevents ABI changes
- depending on whether or not native vector modes are supported. */
- TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
+ bitsize_int (nunits));
+
+ /* For vector types, we do not default to the mode's alignment.
+ Instead, query a target hook, defaulting to natural alignment.
+ This prevents ABI changes depending on whether or not native
+ vector modes are supported. */
+ TYPE_ALIGN (type) = targetm.vector_alignment (type);
+
+ /* However, if the underlying mode requires a bigger alignment than
+ what the target hook provides, we cannot use the mode. For now,
+ simply reject that case. */
+ gcc_assert (TYPE_ALIGN (type)
+ >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
break;
}
@@ -1996,15 +2186,15 @@ layout_type (tree type)
if (integer_zerop (element_size))
length = size_zero_node;
- /* The initial subtraction should happen in the original type so
- that (possible) negative values are handled appropriately. */
+ /* The computation should happen in the original signedness so
+ that (possible) negative values are handled appropriately
+ when determining overflow. */
else
length
- = size_binop (PLUS_EXPR, size_one_node,
- fold_convert (sizetype,
- fold_build2 (MINUS_EXPR,
- TREE_TYPE (lb),
- ub, lb)));
+ = fold_convert (sizetype,
+ size_binop (PLUS_EXPR,
+ build_int_cst (TREE_TYPE (lb), 1),
+ size_binop (MINUS_EXPR, ub, lb)));
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
fold_convert (bitsizetype,
@@ -2039,14 +2229,8 @@ layout_type (tree type)
&& (TYPE_MODE (TREE_TYPE (type)) != BLKmode
|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
{
- /* One-element arrays get the component type's mode. */
- if (simple_cst_equal (TYPE_SIZE (type),
- TYPE_SIZE (TREE_TYPE (type))))
- SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type)));
- else
- SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type),
- MODE_INT, 1));
-
+ SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
+ TYPE_SIZE (type)));
if (TYPE_MODE (type) != BLKmode
&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
@@ -2131,12 +2315,12 @@ vector_type_mode (const_tree t)
gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
- mode = t->type.mode;
+ mode = t->type_common.mode;
if (VECTOR_MODE_P (mode)
&& (!targetm.vector_mode_supported_p (mode)
|| !have_regs_of_mode[mode]))
{
- enum machine_mode innermode = TREE_TYPE (t)->type.mode;
+ enum machine_mode innermode = TREE_TYPE (t)->type_common.mode;
/* For integers, try mapping it to a same-sized scalar mode. */
if (GET_MODE_CLASS (innermode) == MODE_INT)
@@ -2232,93 +2416,76 @@ make_accum_type (int precision, int unsignedp, int satp)
return type;
}
-/* Initialize sizetype and bitsizetype to a reasonable and temporary
- value to enable integer types to be created. */
+/* Initialize sizetypes so layout_type can use them. */
void
initialize_sizetypes (void)
{
- tree t = make_node (INTEGER_TYPE);
- int precision = GET_MODE_BITSIZE (SImode);
-
- SET_TYPE_MODE (t, SImode);
- TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
- TYPE_IS_SIZETYPE (t) = 1;
- TYPE_UNSIGNED (t) = 1;
- TYPE_SIZE (t) = build_int_cst (t, precision);
- TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
- TYPE_PRECISION (t) = precision;
-
- set_min_and_max_values_for_integral_type (t, precision, true);
-
- sizetype = t;
- bitsizetype = build_distinct_type_copy (t);
-}
-
-/* Make sizetype a version of TYPE, and initialize *sizetype accordingly.
- We do this by overwriting the stub sizetype and bitsizetype nodes created
- by initialize_sizetypes. This makes sure that (a) anything stubby about
- them no longer exists and (b) any INTEGER_CSTs created with such a type,
- remain valid. */
-
-void
-set_sizetype (tree type)
-{
- tree t, max;
- int oprecision = TYPE_PRECISION (type);
- /* The *bitsizetype types use a precision that avoids overflows when
- calculating signed sizes / offsets in bits. However, when
- cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
- precision. */
- int precision
- = MIN (oprecision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
- precision
- = GET_MODE_PRECISION (smallest_mode_for_size (precision, MODE_INT));
- if (precision > HOST_BITS_PER_WIDE_INT * 2)
- precision = HOST_BITS_PER_WIDE_INT * 2;
-
- /* sizetype must be an unsigned type. */
- gcc_assert (TYPE_UNSIGNED (type));
-
- t = build_distinct_type_copy (type);
- /* We want to use sizetype's cache, as we will be replacing that type. */
- TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
- TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
- TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
- TYPE_UID (t) = TYPE_UID (sizetype);
- TYPE_IS_SIZETYPE (t) = 1;
-
- /* Replace our original stub sizetype. */
- memcpy (sizetype, t, tree_size (sizetype));
- TYPE_MAIN_VARIANT (sizetype) = sizetype;
- TYPE_CANONICAL (sizetype) = sizetype;
-
+ int precision, bprecision;
+
+ /* Get sizetypes precision from the SIZE_TYPE target macro. */
+ if (strcmp (SIZE_TYPE, "unsigned int") == 0)
+ precision = INT_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "long unsigned int") == 0)
+ precision = LONG_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "long long unsigned int") == 0)
+ precision = LONG_LONG_TYPE_SIZE;
+ else if (strcmp (SIZE_TYPE, "short unsigned int") == 0)
+ precision = SHORT_TYPE_SIZE;
+ else
+ gcc_unreachable ();
+
+ bprecision
+ = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
+ bprecision
+ = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
+ if (bprecision > HOST_BITS_PER_WIDE_INT * 2)
+ bprecision = HOST_BITS_PER_WIDE_INT * 2;
+
+ /* Create stubs for sizetype and bitsizetype so we can create constants. */
+ sizetype = make_node (INTEGER_TYPE);
+ TYPE_NAME (sizetype) = get_identifier ("sizetype");
+ TYPE_PRECISION (sizetype) = precision;
+ TYPE_UNSIGNED (sizetype) = 1;
+ TYPE_IS_SIZETYPE (sizetype) = 1;
+ bitsizetype = make_node (INTEGER_TYPE);
+ TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
+ TYPE_PRECISION (bitsizetype) = bprecision;
+ TYPE_UNSIGNED (bitsizetype) = 1;
+ TYPE_IS_SIZETYPE (bitsizetype) = 1;
+
+ /* Now layout both types manually. */
+ SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
+ TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
+ TYPE_SIZE (sizetype) = bitsize_int (precision);
+ TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
+ set_min_and_max_values_for_integral_type (sizetype, precision,
+ /*is_unsigned=*/true);
/* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
sign-extended in a way consistent with force_fit_type. */
- max = TYPE_MAX_VALUE (sizetype);
TYPE_MAX_VALUE (sizetype)
- = double_int_to_tree (sizetype, tree_to_double_int (max));
-
- t = make_node (INTEGER_TYPE);
- TYPE_NAME (t) = get_identifier ("bit_size_type");
- /* We want to use bitsizetype's cache, as we will be replacing that type. */
- TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
- TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
- TYPE_PRECISION (t) = precision;
- TYPE_UID (t) = TYPE_UID (bitsizetype);
- TYPE_IS_SIZETYPE (t) = 1;
-
- /* Replace our original stub bitsizetype. */
- memcpy (bitsizetype, t, tree_size (bitsizetype));
- TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
- TYPE_CANONICAL (bitsizetype) = bitsizetype;
-
- fixup_unsigned_type (bitsizetype);
+ = double_int_to_tree (sizetype,
+ tree_to_double_int (TYPE_MAX_VALUE (sizetype)));
+
+ SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
+ TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
+ TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
+ TYPE_SIZE_UNIT (bitsizetype)
+ = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
+ set_min_and_max_values_for_integral_type (bitsizetype, bprecision,
+ /*is_unsigned=*/true);
+ /* bitsizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
+ sign-extended in a way consistent with force_fit_type. */
+ TYPE_MAX_VALUE (bitsizetype)
+ = double_int_to_tree (bitsizetype,
+ tree_to_double_int (TYPE_MAX_VALUE (bitsizetype)));
/* Create the signed variants of *sizetype. */
- ssizetype = make_signed_type (oprecision);
+ ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
+ TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
TYPE_IS_SIZETYPE (ssizetype) = 1;
- sbitsizetype = make_signed_type (precision);
+ sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
+ TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
TYPE_IS_SIZETYPE (sbitsizetype) = 1;
}
@@ -2427,6 +2594,13 @@ fixup_unsigned_type (tree type)
/* Find the best machine mode to use when referencing a bit field of length
BITSIZE bits starting at BITPOS.
+ BITREGION_START is the bit position of the first bit in this
+ sequence of bit fields. BITREGION_END is the last bit in this
+ sequence. If these two fields are non-zero, we should restrict the
+ memory access to a maximum sized chunk of
+ BITREGION_END - BITREGION_START + 1. Otherwise, we are allowed to touch
+ any adjacent non bit-fields.
+
The underlying object is known to be aligned to a boundary of ALIGN bits.
If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
larger than LARGEST_MODE (usually SImode).
@@ -2444,18 +2618,29 @@ fixup_unsigned_type (tree type)
decide which of the above modes should be used. */
enum machine_mode
-get_best_mode (int bitsize, int bitpos, unsigned int align,
+get_best_mode (int bitsize, int bitpos,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
+ unsigned int align,
enum machine_mode largest_mode, int volatilep)
{
enum machine_mode mode;
unsigned int unit = 0;
+ unsigned HOST_WIDE_INT maxbits;
+
+ /* If unset, no restriction. */
+ if (!bitregion_end)
+ maxbits = MAX_FIXED_MODE_SIZE;
+ else
+ maxbits = bitregion_end - bitregion_start + 1;
/* Find the narrowest integer mode that contains the bit field. */
for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
unit = GET_MODE_BITSIZE (mode);
- if ((bitpos % unit) + bitsize <= unit)
+ if (unit == GET_MODE_PRECISION (mode)
+ && (bitpos % unit) + bitsize <= unit)
break;
}
@@ -2468,7 +2653,10 @@ get_best_mode (int bitsize, int bitpos, unsigned int align,
(Though at least one Unix compiler ignores this problem:
that on the Sequent 386 machine. */
|| MIN (unit, BIGGEST_ALIGNMENT) > align
- || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
+ || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode))
+ || unit > maxbits
+ || (bitregion_end
+ && bitpos - (bitpos % unit) + unit > bitregion_end + 1))
return VOIDmode;
if ((SLOW_BYTE_ACCESS && ! volatilep)
@@ -2480,11 +2668,15 @@ get_best_mode (int bitsize, int bitpos, unsigned int align,
tmode = GET_MODE_WIDER_MODE (tmode))
{
unit = GET_MODE_BITSIZE (tmode);
- if (bitpos / unit == (bitpos + bitsize - 1) / unit
+ if (unit == GET_MODE_PRECISION (tmode)
+ && bitpos / unit == (bitpos + bitsize - 1) / unit
&& unit <= BITS_PER_WORD
&& unit <= MIN (align, BIGGEST_ALIGNMENT)
+ && unit <= maxbits
&& (largest_mode == VOIDmode
- || unit <= GET_MODE_BITSIZE (largest_mode)))
+ || unit <= GET_MODE_BITSIZE (largest_mode))
+ && (bitregion_end == 0
+ || bitpos - (bitpos % unit) + unit <= bitregion_end + 1))
wide_mode = tmode;
}