Mercurial > hg > CbC > CbC_gcc
annotate gcc/stor-layout.c @ 143:76e1cf5455ef
add cbc_gc test
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Sun, 23 Dec 2018 19:24:05 +0900 |
parents | 84e7813d76e9 |
children | 1830386684a0 |
rev | line source |
---|---|
0 | 1 /* C-compiler utilities for types and variables storage layout |
131 | 2 Copyright (C) 1987-2018 Free Software Foundation, Inc. |
0 | 3 |
4 This file is part of GCC. | |
5 | |
6 GCC is free software; you can redistribute it and/or modify it under | |
7 the terms of the GNU General Public License as published by the Free | |
8 Software Foundation; either version 3, or (at your option) any later | |
9 version. | |
10 | |
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
111 | 24 #include "target.h" |
25 #include "function.h" | |
26 #include "rtl.h" | |
0 | 27 #include "tree.h" |
111 | 28 #include "memmodel.h" |
0 | 29 #include "tm_p.h" |
111 | 30 #include "stringpool.h" |
31 #include "regs.h" | |
32 #include "emit-rtl.h" | |
33 #include "cgraph.h" | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
34 #include "diagnostic-core.h" |
111 | 35 #include "fold-const.h" |
36 #include "stor-layout.h" | |
37 #include "varasm.h" | |
38 #include "print-tree.h" | |
0 | 39 #include "langhooks.h" |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
40 #include "tree-inline.h" |
111 | 41 #include "dumpfile.h" |
42 #include "gimplify.h" | |
131 | 43 #include "attribs.h" |
111 | 44 #include "debug.h" |
0 | 45 |
46 /* Data type for the expressions representing sizes of data types. | |
47 It is the first integer type laid out. */ | |
111 | 48 tree sizetype_tab[(int) stk_type_kind_last]; |
0 | 49 |
50 /* If nonzero, this is an upper limit on alignment of structure fields. | |
51 The value is measured in bits. */ | |
52 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT; | |
53 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
54 static tree self_referential_size (tree); |
0 | 55 static void finalize_record_size (record_layout_info); |
56 static void finalize_type_size (tree); | |
57 static void place_union_field (record_layout_info, tree); | |
58 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, | |
59 HOST_WIDE_INT, tree); | |
60 extern void debug_rli (record_layout_info); | |
61 | |
62 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR | |
63 to serve as the actual size-expression for a type or decl. */ | |
64 | |
65 tree | |
66 variable_size (tree size) | |
67 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
68 /* Obviously. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
69 if (TREE_CONSTANT (size)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
70 return size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
71 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
72 /* If the size is self-referential, we can't make a SAVE_EXPR (see |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
73 save_expr for the rationale). But we can do something else. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
74 if (CONTAINS_PLACEHOLDER_P (size)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
75 return self_referential_size (size); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
76 |
111 | 77 /* If we are in the global binding level, we can't make a SAVE_EXPR |
78 since it may end up being shared across functions, so it is up | |
79 to the front-end to deal with this case. */ | |
80 if (lang_hooks.decls.global_bindings_p ()) | |
0 | 81 return size; |
82 | |
111 | 83 return save_expr (size); |
0 | 84 } |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
85 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
86 /* An array of functions used for self-referential size computation. */ |
111 | 87 static GTY(()) vec<tree, va_gc> *size_functions; |
88 | |
89 /* Return true if T is a self-referential component reference. */ | |
90 | |
91 static bool | |
92 self_referential_component_ref_p (tree t) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
93 { |
111 | 94 if (TREE_CODE (t) != COMPONENT_REF) |
95 return false; | |
96 | |
97 while (REFERENCE_CLASS_P (t)) | |
98 t = TREE_OPERAND (t, 0); | |
99 | |
100 return (TREE_CODE (t) == PLACEHOLDER_EXPR); | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
101 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
102 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
103 /* Similar to copy_tree_r but do not copy component references involving |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
104 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
105 and substituted in substitute_in_expr. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
106 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
107 static tree |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
108 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
109 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
110 enum tree_code code = TREE_CODE (*tp); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
111 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
112 /* Stop at types, decls, constants like copy_tree_r. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
113 if (TREE_CODE_CLASS (code) == tcc_type |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
114 || TREE_CODE_CLASS (code) == tcc_declaration |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
115 || TREE_CODE_CLASS (code) == tcc_constant) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
116 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
117 *walk_subtrees = 0; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
118 return NULL_TREE; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
119 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
120 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
121 /* This is the pattern built in ada/make_aligning_type. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
122 else if (code == ADDR_EXPR |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
123 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
124 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
125 *walk_subtrees = 0; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
126 return NULL_TREE; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
127 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
128 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
129 /* Default case: the component reference. */ |
111 | 130 else if (self_referential_component_ref_p (*tp)) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
131 { |
111 | 132 *walk_subtrees = 0; |
133 return NULL_TREE; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
134 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
135 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
136 /* We're not supposed to have them in self-referential size trees |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
137 because we wouldn't properly control when they are evaluated. |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
138 However, not creating superfluous SAVE_EXPRs requires accurate |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
139 tracking of readonly-ness all the way down to here, which we |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
140 cannot always guarantee in practice. So punt in this case. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
141 else if (code == SAVE_EXPR) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
142 return error_mark_node; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
143 |
111 | 144 else if (code == STATEMENT_LIST) |
145 gcc_unreachable (); | |
146 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
147 return copy_tree_r (tp, walk_subtrees, data); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
148 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
149 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
150 /* Given a SIZE expression that is self-referential, return an equivalent |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
151 expression to serve as the actual size expression for a type. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
152 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
153 static tree |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
154 self_referential_size (tree size) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
155 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
156 static unsigned HOST_WIDE_INT fnno = 0; |
111 | 157 vec<tree> self_refs = vNULL; |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
158 tree param_type_list = NULL, param_decl_list = NULL; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
159 tree t, ref, return_type, fntype, fnname, fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
160 unsigned int i; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
161 char buf[128]; |
111 | 162 vec<tree, va_gc> *args = NULL; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
163 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
164 /* Do not factor out simple operations. */ |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
165 t = skip_simple_constant_arithmetic (size); |
111 | 166 if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t)) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
167 return size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
168 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
169 /* Collect the list of self-references in the expression. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
170 find_placeholder_in_expr (size, &self_refs); |
111 | 171 gcc_assert (self_refs.length () > 0); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
172 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
173 /* Obtain a private copy of the expression. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
174 t = size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
175 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
176 return size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
177 size = t; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
178 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
179 /* Build the parameter and argument lists in parallel; also |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
180 substitute the former for the latter in the expression. */ |
111 | 181 vec_alloc (args, self_refs.length ()); |
182 FOR_EACH_VEC_ELT (self_refs, i, ref) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
183 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
184 tree subst, param_name, param_type, param_decl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
185 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
186 if (DECL_P (ref)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
187 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
188 /* We shouldn't have true variables here. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
189 gcc_assert (TREE_READONLY (ref)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
190 subst = ref; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
191 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
192 /* This is the pattern built in ada/make_aligning_type. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
193 else if (TREE_CODE (ref) == ADDR_EXPR) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
194 subst = ref; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
195 /* Default case: the component reference. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
196 else |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
197 subst = TREE_OPERAND (ref, 1); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
198 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
199 sprintf (buf, "p%d", i); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
200 param_name = get_identifier (buf); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
201 param_type = TREE_TYPE (ref); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
202 param_decl |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
203 = build_decl (input_location, PARM_DECL, param_name, param_type); |
111 | 204 DECL_ARG_TYPE (param_decl) = param_type; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
205 DECL_ARTIFICIAL (param_decl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
206 TREE_READONLY (param_decl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
207 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
208 size = substitute_in_expr (size, subst, param_decl); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
209 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
210 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
211 param_decl_list = chainon (param_decl, param_decl_list); |
111 | 212 args->quick_push (ref); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
213 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
214 |
111 | 215 self_refs.release (); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
216 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
217 /* Append 'void' to indicate that the number of parameters is fixed. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
218 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
219 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
220 /* The 3 lists have been created in reverse order. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
221 param_type_list = nreverse (param_type_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
222 param_decl_list = nreverse (param_decl_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
223 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
224 /* Build the function type. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
225 return_type = TREE_TYPE (size); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
226 fntype = build_function_type (return_type, param_type_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
227 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
228 /* Build the function declaration. */ |
111 | 229 sprintf (buf, "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
230 fnname = get_file_function_name (buf); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
231 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype); |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
232 for (t = param_decl_list; t; t = DECL_CHAIN (t)) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
233 DECL_CONTEXT (t) = fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
234 DECL_ARGUMENTS (fndecl) = param_decl_list; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
235 DECL_RESULT (fndecl) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
236 = build_decl (input_location, RESULT_DECL, 0, return_type); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
237 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
238 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
239 /* The function has been created by the compiler and we don't |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
240 want to emit debug info for it. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
241 DECL_ARTIFICIAL (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
242 DECL_IGNORED_P (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
243 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
244 /* It is supposed to be "const" and never throw. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
245 TREE_READONLY (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
246 TREE_NOTHROW (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
247 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
248 /* We want it to be inlined when this is deemed profitable, as |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
249 well as discarded if every call has been integrated. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
250 DECL_DECLARED_INLINE_P (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
251 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
252 /* It is made up of a unique return statement. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
253 DECL_INITIAL (fndecl) = make_node (BLOCK); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
254 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
255 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
256 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
257 TREE_STATIC (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
258 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
259 /* Put it onto the list of size functions. */ |
111 | 260 vec_safe_push (size_functions, fndecl); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
261 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
262 /* Replace the original expression with a call to the size function. */ |
111 | 263 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
264 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
265 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
266 /* Take, queue and compile all the size functions. It is essential that |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
267 the size functions be gimplified at the very end of the compilation |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
268 in order to guarantee transparent handling of self-referential sizes. |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
269 Otherwise the GENERIC inliner would not be able to inline them back |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
270 at each of their call sites, thus creating artificial non-constant |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
271 size expressions which would trigger nasty problems later on. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
272 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
273 void |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
274 finalize_size_functions (void) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
275 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
276 unsigned int i; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
277 tree fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
278 |
111 | 279 for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
280 { |
111 | 281 allocate_struct_function (fndecl, false); |
282 set_cfun (NULL); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
283 dump_function (TDI_original, fndecl); |
111 | 284 |
285 /* As these functions are used to describe the layout of variable-length | |
286 structures, debug info generation needs their implementation. */ | |
287 debug_hooks->size_function (fndecl); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
288 gimplify_function_tree (fndecl); |
111 | 289 cgraph_node::finalize_function (fndecl, false); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
290 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
291 |
111 | 292 vec_free (size_functions); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
293 } |
0 | 294 |
111 | 295 /* Return a machine mode of class MCLASS with SIZE bits of precision, |
296 if one exists. The mode may have padding bits as well the SIZE | |
297 value bits. If LIMIT is nonzero, disregard modes wider than | |
298 MAX_FIXED_MODE_SIZE. */ | |
299 | |
300 opt_machine_mode | |
131 | 301 mode_for_size (poly_uint64 size, enum mode_class mclass, int limit) |
0 | 302 { |
111 | 303 machine_mode mode; |
304 int i; | |
0 | 305 |
131 | 306 if (limit && maybe_gt (size, (unsigned int) MAX_FIXED_MODE_SIZE)) |
111 | 307 return opt_machine_mode (); |
0 | 308 |
309 /* Get the first mode which has this size, in the specified class. */ | |
111 | 310 FOR_EACH_MODE_IN_CLASS (mode, mclass) |
131 | 311 if (known_eq (GET_MODE_PRECISION (mode), size)) |
0 | 312 return mode; |
313 | |
111 | 314 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT) |
315 for (i = 0; i < NUM_INT_N_ENTS; i ++) | |
131 | 316 if (known_eq (int_n_data[i].bitsize, size) |
111 | 317 && int_n_enabled_p[i]) |
318 return int_n_data[i].m; | |
319 | |
320 return opt_machine_mode (); | |
0 | 321 } |
322 | |
323 /* Similar, except passed a tree node. */ | |
324 | |
111 | 325 opt_machine_mode |
0 | 326 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit) |
327 { | |
328 unsigned HOST_WIDE_INT uhwi; | |
329 unsigned int ui; | |
330 | |
111 | 331 if (!tree_fits_uhwi_p (size)) |
332 return opt_machine_mode (); | |
333 uhwi = tree_to_uhwi (size); | |
0 | 334 ui = uhwi; |
335 if (uhwi != ui) | |
111 | 336 return opt_machine_mode (); |
0 | 337 return mode_for_size (ui, mclass, limit); |
338 } | |
339 | |
111 | 340 /* Return the narrowest mode of class MCLASS that contains at least |
341 SIZE bits. Abort if no such mode exists. */ | |
342 | |
343 machine_mode | |
131 | 344 smallest_mode_for_size (poly_uint64 size, enum mode_class mclass) |
0 | 345 { |
111 | 346 machine_mode mode = VOIDmode; |
347 int i; | |
0 | 348 |
349 /* Get the first mode which has at least this size, in the | |
350 specified class. */ | |
111 | 351 FOR_EACH_MODE_IN_CLASS (mode, mclass) |
131 | 352 if (known_ge (GET_MODE_PRECISION (mode), size)) |
111 | 353 break; |
354 | |
131 | 355 gcc_assert (mode != VOIDmode); |
356 | |
111 | 357 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT) |
358 for (i = 0; i < NUM_INT_N_ENTS; i ++) | |
131 | 359 if (known_ge (int_n_data[i].bitsize, size) |
360 && known_lt (int_n_data[i].bitsize, GET_MODE_PRECISION (mode)) | |
111 | 361 && int_n_enabled_p[i]) |
362 mode = int_n_data[i].m; | |
363 | |
364 return mode; | |
0 | 365 } |
366 | |
111 | 367 /* Return an integer mode of exactly the same size as MODE, if one exists. */ |
368 | |
369 opt_scalar_int_mode | |
370 int_mode_for_mode (machine_mode mode) | |
0 | 371 { |
372 switch (GET_MODE_CLASS (mode)) | |
373 { | |
374 case MODE_INT: | |
375 case MODE_PARTIAL_INT: | |
111 | 376 return as_a <scalar_int_mode> (mode); |
0 | 377 |
378 case MODE_COMPLEX_INT: | |
379 case MODE_COMPLEX_FLOAT: | |
380 case MODE_FLOAT: | |
381 case MODE_DECIMAL_FLOAT: | |
382 case MODE_FRACT: | |
383 case MODE_ACCUM: | |
384 case MODE_UFRACT: | |
385 case MODE_UACCUM: | |
131 | 386 case MODE_VECTOR_BOOL: |
387 case MODE_VECTOR_INT: | |
388 case MODE_VECTOR_FLOAT: | |
0 | 389 case MODE_VECTOR_FRACT: |
390 case MODE_VECTOR_ACCUM: | |
391 case MODE_VECTOR_UFRACT: | |
392 case MODE_VECTOR_UACCUM: | |
111 | 393 return int_mode_for_size (GET_MODE_BITSIZE (mode), 0); |
0 | 394 |
395 case MODE_RANDOM: | |
396 if (mode == BLKmode) | |
111 | 397 return opt_scalar_int_mode (); |
398 | |
399 /* fall through */ | |
0 | 400 |
401 case MODE_CC: | |
402 default: | |
403 gcc_unreachable (); | |
404 } | |
405 } | |
406 | |
111 | 407 /* Find a mode that can be used for efficient bitwise operations on MODE, |
408 if one exists. */ | |
409 | |
410 opt_machine_mode | |
411 bitwise_mode_for_mode (machine_mode mode) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
412 { |
111 | 413 /* Quick exit if we already have a suitable mode. */ |
414 scalar_int_mode int_mode; | |
415 if (is_a <scalar_int_mode> (mode, &int_mode) | |
416 && GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE) | |
417 return int_mode; | |
418 | |
419 /* Reuse the sanity checks from int_mode_for_mode. */ | |
420 gcc_checking_assert ((int_mode_for_mode (mode), true)); | |
421 | |
131 | 422 poly_int64 bitsize = GET_MODE_BITSIZE (mode); |
423 | |
111 | 424 /* Try to replace complex modes with complex modes. In general we |
425 expect both components to be processed independently, so we only | |
426 care whether there is a register for the inner mode. */ | |
427 if (COMPLEX_MODE_P (mode)) | |
428 { | |
429 machine_mode trial = mode; | |
430 if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT | |
431 || mode_for_size (bitsize, MODE_COMPLEX_INT, false).exists (&trial)) | |
432 && have_regs_of_mode[GET_MODE_INNER (trial)]) | |
433 return trial; | |
434 } | |
435 | |
436 /* Try to replace vector modes with vector modes. Also try using vector | |
437 modes if an integer mode would be too big. */ | |
131 | 438 if (VECTOR_MODE_P (mode) |
439 || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE)) | |
111 | 440 { |
441 machine_mode trial = mode; | |
442 if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT | |
443 || mode_for_size (bitsize, MODE_VECTOR_INT, 0).exists (&trial)) | |
444 && have_regs_of_mode[trial] | |
445 && targetm.vector_mode_supported_p (trial)) | |
446 return trial; | |
447 } | |
448 | |
449 /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */ | |
450 return mode_for_size (bitsize, MODE_INT, true); | |
451 } | |
452 | |
453 /* Find a type that can be used for efficient bitwise operations on MODE. | |
454 Return null if no such mode exists. */ | |
455 | |
456 tree | |
457 bitwise_type_for_mode (machine_mode mode) | |
458 { | |
459 if (!bitwise_mode_for_mode (mode).exists (&mode)) | |
460 return NULL_TREE; | |
461 | |
462 unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode); | |
463 tree inner_type = build_nonstandard_integer_type (inner_size, true); | |
464 | |
465 if (VECTOR_MODE_P (mode)) | |
466 return build_vector_type_for_mode (inner_type, mode); | |
467 | |
468 if (COMPLEX_MODE_P (mode)) | |
469 return build_complex_type (inner_type); | |
470 | |
471 gcc_checking_assert (GET_MODE_INNER (mode) == mode); | |
472 return inner_type; | |
473 } | |
474 | |
475 /* Find a mode that is suitable for representing a vector with NUNITS | |
476 elements of mode INNERMODE, if one exists. The returned mode can be | |
477 either an integer mode or a vector mode. */ | |
478 | |
479 opt_machine_mode | |
131 | 480 mode_for_vector (scalar_mode innermode, poly_uint64 nunits) |
111 | 481 { |
482 machine_mode mode; | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
483 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
484 /* First, look for a supported vector type. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
485 if (SCALAR_FLOAT_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
486 mode = MIN_MODE_VECTOR_FLOAT; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
487 else if (SCALAR_FRACT_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
488 mode = MIN_MODE_VECTOR_FRACT; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
489 else if (SCALAR_UFRACT_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
490 mode = MIN_MODE_VECTOR_UFRACT; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
491 else if (SCALAR_ACCUM_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
492 mode = MIN_MODE_VECTOR_ACCUM; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
493 else if (SCALAR_UACCUM_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
494 mode = MIN_MODE_VECTOR_UACCUM; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
495 else |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
496 mode = MIN_MODE_VECTOR_INT; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
497 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
498 /* Do not check vector_mode_supported_p here. We'll do that |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
499 later in vector_type_mode. */ |
111 | 500 FOR_EACH_MODE_FROM (mode, mode) |
131 | 501 if (known_eq (GET_MODE_NUNITS (mode), nunits) |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
502 && GET_MODE_INNER (mode) == innermode) |
111 | 503 return mode; |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
504 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
505 /* For integers, try mapping it to a same-sized scalar mode. */ |
111 | 506 if (GET_MODE_CLASS (innermode) == MODE_INT) |
507 { | |
131 | 508 poly_uint64 nbits = nunits * GET_MODE_BITSIZE (innermode); |
111 | 509 if (int_mode_for_size (nbits, 0).exists (&mode) |
510 && have_regs_of_mode[mode]) | |
511 return mode; | |
512 } | |
513 | |
514 return opt_machine_mode (); | |
515 } | |
516 | |
517 /* Return the mode for a vector that has NUNITS integer elements of | |
518 INT_BITS bits each, if such a mode exists. The mode can be either | |
519 an integer mode or a vector mode. */ | |
520 | |
521 opt_machine_mode | |
131 | 522 mode_for_int_vector (unsigned int int_bits, poly_uint64 nunits) |
111 | 523 { |
524 scalar_int_mode int_mode; | |
525 machine_mode vec_mode; | |
526 if (int_mode_for_size (int_bits, 0).exists (&int_mode) | |
527 && mode_for_vector (int_mode, nunits).exists (&vec_mode)) | |
528 return vec_mode; | |
529 return opt_machine_mode (); | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
530 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
531 |
0 | 532 /* Return the alignment of MODE. This will be bounded by 1 and |
533 BIGGEST_ALIGNMENT. */ | |
534 | |
535 unsigned int | |
111 | 536 get_mode_alignment (machine_mode mode) |
0 | 537 { |
538 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT)); | |
539 } | |
540 | |
111 | 541 /* Return the natural mode of an array, given that it is SIZE bytes in |
542 total and has elements of type ELEM_TYPE. */ | |
543 | |
544 static machine_mode | |
545 mode_for_array (tree elem_type, tree size) | |
546 { | |
547 tree elem_size; | |
131 | 548 poly_uint64 int_size, int_elem_size; |
549 unsigned HOST_WIDE_INT num_elems; | |
111 | 550 bool limit_p; |
551 | |
552 /* One-element arrays get the component type's mode. */ | |
553 elem_size = TYPE_SIZE (elem_type); | |
554 if (simple_cst_equal (size, elem_size)) | |
555 return TYPE_MODE (elem_type); | |
556 | |
557 limit_p = true; | |
131 | 558 if (poly_int_tree_p (size, &int_size) |
559 && poly_int_tree_p (elem_size, &int_elem_size) | |
560 && maybe_ne (int_elem_size, 0U) | |
561 && constant_multiple_p (int_size, int_elem_size, &num_elems)) | |
111 | 562 { |
131 | 563 machine_mode elem_mode = TYPE_MODE (elem_type); |
564 machine_mode mode; | |
565 if (targetm.array_mode (elem_mode, num_elems).exists (&mode)) | |
566 return mode; | |
567 if (targetm.array_mode_supported_p (elem_mode, num_elems)) | |
111 | 568 limit_p = false; |
569 } | |
570 return mode_for_size_tree (size, MODE_INT, limit_p).else_blk (); | |
571 } | |
0 | 572 |
573 /* Subroutine of layout_decl: Force alignment required for the data type. | |
574 But if the decl itself wants greater alignment, don't override that. */ | |
575 | |
576 static inline void | |
577 do_type_align (tree type, tree decl) | |
578 { | |
579 if (TYPE_ALIGN (type) > DECL_ALIGN (decl)) | |
580 { | |
111 | 581 SET_DECL_ALIGN (decl, TYPE_ALIGN (type)); |
0 | 582 if (TREE_CODE (decl) == FIELD_DECL) |
583 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type); | |
584 } | |
111 | 585 if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl)) |
586 SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type)); | |
0 | 587 } |
588 | |
589 /* Set the size, mode and alignment of a ..._DECL node. | |
590 TYPE_DECL does need this for C++. | |
591 Note that LABEL_DECL and CONST_DECL nodes do not need this, | |
592 and FUNCTION_DECL nodes have them set up in a special (and simple) way. | |
593 Don't call layout_decl for them. | |
594 | |
595 KNOWN_ALIGN is the amount of alignment we can assume this | |
596 decl has with no special effort. It is relevant only for FIELD_DECLs | |
597 and depends on the previous fields. | |
598 All that matters about KNOWN_ALIGN is which powers of 2 divide it. | |
599 If KNOWN_ALIGN is 0, it means, "as much alignment as you like": | |
600 the record will be aligned to suit. */ | |
601 | |
602 void | |
603 layout_decl (tree decl, unsigned int known_align) | |
604 { | |
605 tree type = TREE_TYPE (decl); | |
606 enum tree_code code = TREE_CODE (decl); | |
607 rtx rtl = NULL_RTX; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
608 location_t loc = DECL_SOURCE_LOCATION (decl); |
0 | 609 |
610 if (code == CONST_DECL) | |
611 return; | |
612 | |
613 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL | |
111 | 614 || code == TYPE_DECL || code == FIELD_DECL); |
0 | 615 |
616 rtl = DECL_RTL_IF_SET (decl); | |
617 | |
618 if (type == error_mark_node) | |
619 type = void_type_node; | |
620 | |
621 /* Usually the size and mode come from the data type without change, | |
622 however, the front-end may set the explicit width of the field, so its | |
623 size may not be the same as the size of its type. This happens with | |
624 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it | |
625 also happens with other fields. For example, the C++ front-end creates | |
626 zero-sized fields corresponding to empty base classes, and depends on | |
627 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the | |
628 size in bytes from the size in bits. If we have already set the mode, | |
629 don't set it again since we can be called twice for FIELD_DECLs. */ | |
630 | |
631 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type); | |
632 if (DECL_MODE (decl) == VOIDmode) | |
111 | 633 SET_DECL_MODE (decl, TYPE_MODE (type)); |
0 | 634 |
635 if (DECL_SIZE (decl) == 0) | |
636 { | |
637 DECL_SIZE (decl) = TYPE_SIZE (type); | |
638 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type); | |
639 } | |
640 else if (DECL_SIZE_UNIT (decl) == 0) | |
641 DECL_SIZE_UNIT (decl) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
642 = fold_convert_loc (loc, sizetype, |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
643 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl), |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
644 bitsize_unit_node)); |
0 | 645 |
646 if (code != FIELD_DECL) | |
647 /* For non-fields, update the alignment from the type. */ | |
648 do_type_align (type, decl); | |
649 else | |
650 /* For fields, it's a bit more complicated... */ | |
651 { | |
652 bool old_user_align = DECL_USER_ALIGN (decl); | |
653 bool zero_bitfield = false; | |
654 bool packed_p = DECL_PACKED (decl); | |
655 unsigned int mfa; | |
656 | |
657 if (DECL_BIT_FIELD (decl)) | |
658 { | |
659 DECL_BIT_FIELD_TYPE (decl) = type; | |
660 | |
661 /* A zero-length bit-field affects the alignment of the next | |
662 field. In essence such bit-fields are not influenced by | |
663 any packing due to #pragma pack or attribute packed. */ | |
664 if (integer_zerop (DECL_SIZE (decl)) | |
665 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl))) | |
666 { | |
667 zero_bitfield = true; | |
668 packed_p = false; | |
669 if (PCC_BITFIELD_TYPE_MATTERS) | |
670 do_type_align (type, decl); | |
671 else | |
672 { | |
673 #ifdef EMPTY_FIELD_BOUNDARY | |
674 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl)) | |
675 { | |
111 | 676 SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY); |
0 | 677 DECL_USER_ALIGN (decl) = 0; |
678 } | |
679 #endif | |
680 } | |
681 } | |
682 | |
683 /* See if we can use an ordinary integer mode for a bit-field. | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
684 Conditions are: a fixed size that is correct for another mode, |
111 | 685 occupying a complete byte or bytes on proper boundary. */ |
0 | 686 if (TYPE_SIZE (type) != 0 |
687 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST | |
111 | 688 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT) |
0 | 689 { |
111 | 690 machine_mode xmode; |
691 if (mode_for_size_tree (DECL_SIZE (decl), | |
692 MODE_INT, 1).exists (&xmode)) | |
0 | 693 { |
111 | 694 unsigned int xalign = GET_MODE_ALIGNMENT (xmode); |
695 if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl)) | |
696 && (known_align == 0 || known_align >= xalign)) | |
697 { | |
698 SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl))); | |
699 SET_DECL_MODE (decl, xmode); | |
700 DECL_BIT_FIELD (decl) = 0; | |
701 } | |
0 | 702 } |
703 } | |
704 | |
705 /* Turn off DECL_BIT_FIELD if we won't need it set. */ | |
706 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode | |
707 && known_align >= TYPE_ALIGN (type) | |
708 && DECL_ALIGN (decl) >= TYPE_ALIGN (type)) | |
709 DECL_BIT_FIELD (decl) = 0; | |
710 } | |
711 else if (packed_p && DECL_USER_ALIGN (decl)) | |
712 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and | |
713 round up; we'll reduce it again below. We want packing to | |
714 supersede USER_ALIGN inherited from the type, but defer to | |
715 alignment explicitly specified on the field decl. */; | |
716 else | |
717 do_type_align (type, decl); | |
718 | |
719 /* If the field is packed and not explicitly aligned, give it the | |
720 minimum alignment. Note that do_type_align may set | |
721 DECL_USER_ALIGN, so we need to check old_user_align instead. */ | |
722 if (packed_p | |
723 && !old_user_align) | |
111 | 724 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT)); |
0 | 725 |
726 if (! packed_p && ! DECL_USER_ALIGN (decl)) | |
727 { | |
728 /* Some targets (i.e. i386, VMS) limit struct field alignment | |
729 to a lower boundary than alignment of variables unless | |
730 it was overridden by attribute aligned. */ | |
731 #ifdef BIGGEST_FIELD_ALIGNMENT | |
111 | 732 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), |
733 (unsigned) BIGGEST_FIELD_ALIGNMENT)); | |
0 | 734 #endif |
735 #ifdef ADJUST_FIELD_ALIGN | |
111 | 736 SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl), |
737 DECL_ALIGN (decl))); | |
0 | 738 #endif |
739 } | |
740 | |
741 if (zero_bitfield) | |
742 mfa = initial_max_fld_align * BITS_PER_UNIT; | |
743 else | |
744 mfa = maximum_field_alignment; | |
745 /* Should this be controlled by DECL_USER_ALIGN, too? */ | |
746 if (mfa != 0) | |
111 | 747 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa)); |
0 | 748 } |
749 | |
750 /* Evaluate nonconstant size only once, either now or as soon as safe. */ | |
751 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) | |
752 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl)); | |
753 if (DECL_SIZE_UNIT (decl) != 0 | |
754 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST) | |
755 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl)); | |
756 | |
757 /* If requested, warn about definitions of large data objects. */ | |
131 | 758 if ((code == PARM_DECL || (code == VAR_DECL && !DECL_NONLOCAL_FRAME (decl))) |
759 && !DECL_EXTERNAL (decl)) | |
0 | 760 { |
761 tree size = DECL_SIZE_UNIT (decl); | |
762 | |
131 | 763 if (size != 0 && TREE_CODE (size) == INTEGER_CST) |
0 | 764 { |
131 | 765 /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated |
766 as if PTRDIFF_MAX had been specified, with the value | |
767 being that on the target rather than the host. */ | |
768 unsigned HOST_WIDE_INT max_size = warn_larger_than_size; | |
769 if (max_size == HOST_WIDE_INT_MAX) | |
770 max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node)); | |
771 | |
772 if (compare_tree_int (size, max_size) > 0) | |
773 warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds " | |
774 "maximum object size %wu", | |
775 decl, size, max_size); | |
0 | 776 } |
777 } | |
778 | |
779 /* If the RTL was already set, update its mode and mem attributes. */ | |
780 if (rtl) | |
781 { | |
782 PUT_MODE (rtl, DECL_MODE (decl)); | |
783 SET_DECL_RTL (decl, 0); | |
111 | 784 if (MEM_P (rtl)) |
785 set_mem_attributes (rtl, decl, 1); | |
0 | 786 SET_DECL_RTL (decl, rtl); |
787 } | |
788 } | |
789 | |
111 | 790 /* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the |
791 results of a previous call to layout_decl and calls it again. */ | |
0 | 792 |
793 void | |
794 relayout_decl (tree decl) | |
795 { | |
796 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0; | |
111 | 797 SET_DECL_MODE (decl, VOIDmode); |
0 | 798 if (!DECL_USER_ALIGN (decl)) |
111 | 799 SET_DECL_ALIGN (decl, 0); |
800 if (DECL_RTL_SET_P (decl)) | |
801 SET_DECL_RTL (decl, 0); | |
0 | 802 |
803 layout_decl (decl, 0); | |
804 } | |
805 | |
806 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or | |
807 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which | |
808 is to be passed to all other layout functions for this record. It is the | |
809 responsibility of the caller to call `free' for the storage returned. | |
810 Note that garbage collection is not permitted until we finish laying | |
811 out the record. */ | |
812 | |
813 record_layout_info | |
814 start_record_layout (tree t) | |
815 { | |
816 record_layout_info rli = XNEW (struct record_layout_info_s); | |
817 | |
818 rli->t = t; | |
819 | |
820 /* If the type has a minimum specified alignment (via an attribute | |
821 declaration, for example) use it -- otherwise, start with a | |
822 one-byte alignment. */ | |
823 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t)); | |
824 rli->unpacked_align = rli->record_align; | |
825 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT); | |
826 | |
827 #ifdef STRUCTURE_SIZE_BOUNDARY | |
828 /* Packed structures don't need to have minimum size. */ | |
829 if (! TYPE_PACKED (t)) | |
830 { | |
831 unsigned tmp; | |
832 | |
833 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */ | |
834 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY; | |
835 if (maximum_field_alignment != 0) | |
836 tmp = MIN (tmp, maximum_field_alignment); | |
837 rli->record_align = MAX (rli->record_align, tmp); | |
838 } | |
839 #endif | |
840 | |
841 rli->offset = size_zero_node; | |
842 rli->bitpos = bitsize_zero_node; | |
843 rli->prev_field = 0; | |
111 | 844 rli->pending_statics = 0; |
0 | 845 rli->packed_maybe_necessary = 0; |
846 rli->remaining_in_alignment = 0; | |
847 | |
848 return rli; | |
849 } | |
850 | |
131 | 851 /* Fold sizetype value X to bitsizetype, given that X represents a type |
852 size or offset. */ | |
853 | |
854 static tree | |
855 bits_from_bytes (tree x) | |
856 { | |
857 if (POLY_INT_CST_P (x)) | |
858 /* The runtime calculation isn't allowed to overflow sizetype; | |
859 increasing the runtime values must always increase the size | |
860 or offset of the object. This means that the object imposes | |
861 a maximum value on the runtime parameters, but we don't record | |
862 what that is. */ | |
863 return build_poly_int_cst | |
864 (bitsizetype, | |
865 poly_wide_int::from (poly_int_cst_value (x), | |
866 TYPE_PRECISION (bitsizetype), | |
867 TYPE_SIGN (TREE_TYPE (x)))); | |
868 x = fold_convert (bitsizetype, x); | |
869 gcc_checking_assert (x); | |
870 return x; | |
871 } | |
872 | |
111 | 873 /* Return the combined bit position for the byte offset OFFSET and the |
874 bit position BITPOS. | |
875 | |
876 These functions operate on byte and bit positions present in FIELD_DECLs | |
877 and assume that these expressions result in no (intermediate) overflow. | |
878 This assumption is necessary to fold the expressions as much as possible, | |
879 so as to avoid creating artificially variable-sized types in languages | |
880 supporting variable-sized types like Ada. */ | |
0 | 881 |
882 tree | |
883 bit_from_pos (tree offset, tree bitpos) | |
884 { | |
885 return size_binop (PLUS_EXPR, bitpos, | |
131 | 886 size_binop (MULT_EXPR, bits_from_bytes (offset), |
0 | 887 bitsize_unit_node)); |
888 } | |
889 | |
111 | 890 /* Return the combined truncated byte position for the byte offset OFFSET and |
891 the bit position BITPOS. */ | |
892 | |
0 | 893 tree |
894 byte_from_pos (tree offset, tree bitpos) | |
895 { | |
111 | 896 tree bytepos; |
897 if (TREE_CODE (bitpos) == MULT_EXPR | |
898 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node)) | |
899 bytepos = TREE_OPERAND (bitpos, 0); | |
900 else | |
901 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node); | |
902 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos)); | |
0 | 903 } |
904 | |
111 | 905 /* Split the bit position POS into a byte offset *POFFSET and a bit |
906 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */ | |
907 | |
0 | 908 void |
909 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align, | |
910 tree pos) | |
911 { | |
111 | 912 tree toff_align = bitsize_int (off_align); |
913 if (TREE_CODE (pos) == MULT_EXPR | |
914 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align)) | |
915 { | |
916 *poffset = size_binop (MULT_EXPR, | |
917 fold_convert (sizetype, TREE_OPERAND (pos, 0)), | |
918 size_int (off_align / BITS_PER_UNIT)); | |
919 *pbitpos = bitsize_zero_node; | |
920 } | |
921 else | |
922 { | |
923 *poffset = size_binop (MULT_EXPR, | |
924 fold_convert (sizetype, | |
925 size_binop (FLOOR_DIV_EXPR, pos, | |
926 toff_align)), | |
927 size_int (off_align / BITS_PER_UNIT)); | |
928 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align); | |
929 } | |
0 | 930 } |
931 | |
932 /* Given a pointer to bit and byte offsets and an offset alignment, | |
933 normalize the offsets so they are within the alignment. */ | |
934 | |
935 void | |
936 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align) | |
937 { | |
938 /* If the bit position is now larger than it should be, adjust it | |
939 downwards. */ | |
940 if (compare_tree_int (*pbitpos, off_align) >= 0) | |
941 { | |
111 | 942 tree offset, bitpos; |
943 pos_from_bit (&offset, &bitpos, off_align, *pbitpos); | |
944 *poffset = size_binop (PLUS_EXPR, *poffset, offset); | |
945 *pbitpos = bitpos; | |
0 | 946 } |
947 } | |
948 | |
949 /* Print debugging information about the information in RLI. */ | |
950 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
951 DEBUG_FUNCTION void |
0 | 952 debug_rli (record_layout_info rli) |
953 { | |
954 print_node_brief (stderr, "type", rli->t, 0); | |
955 print_node_brief (stderr, "\noffset", rli->offset, 0); | |
956 print_node_brief (stderr, " bitpos", rli->bitpos, 0); | |
957 | |
958 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n", | |
959 rli->record_align, rli->unpacked_align, | |
960 rli->offset_align); | |
961 | |
962 /* The ms_struct code is the only that uses this. */ | |
963 if (targetm.ms_bitfield_layout_p (rli->t)) | |
964 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment); | |
965 | |
966 if (rli->packed_maybe_necessary) | |
967 fprintf (stderr, "packed may be necessary\n"); | |
968 | |
111 | 969 if (!vec_safe_is_empty (rli->pending_statics)) |
0 | 970 { |
971 fprintf (stderr, "pending statics:\n"); | |
131 | 972 debug (rli->pending_statics); |
0 | 973 } |
974 } | |
975 | |
976 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and | |
977 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */ | |
978 | |
979 void | |
980 normalize_rli (record_layout_info rli) | |
981 { | |
982 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align); | |
983 } | |
984 | |
985 /* Returns the size in bytes allocated so far. */ | |
986 | |
987 tree | |
988 rli_size_unit_so_far (record_layout_info rli) | |
989 { | |
990 return byte_from_pos (rli->offset, rli->bitpos); | |
991 } | |
992 | |
993 /* Returns the size in bits allocated so far. */ | |
994 | |
995 tree | |
996 rli_size_so_far (record_layout_info rli) | |
997 { | |
998 return bit_from_pos (rli->offset, rli->bitpos); | |
999 } | |
1000 | |
1001 /* FIELD is about to be added to RLI->T. The alignment (in bits) of | |
1002 the next available location within the record is given by KNOWN_ALIGN. | |
1003 Update the variable alignment fields in RLI, and return the alignment | |
1004 to give the FIELD. */ | |
1005 | |
1006 unsigned int | |
1007 update_alignment_for_field (record_layout_info rli, tree field, | |
1008 unsigned int known_align) | |
1009 { | |
1010 /* The alignment required for FIELD. */ | |
1011 unsigned int desired_align; | |
1012 /* The type of this field. */ | |
1013 tree type = TREE_TYPE (field); | |
1014 /* True if the field was explicitly aligned by the user. */ | |
1015 bool user_align; | |
1016 bool is_bitfield; | |
1017 | |
1018 /* Do not attempt to align an ERROR_MARK node */ | |
1019 if (TREE_CODE (type) == ERROR_MARK) | |
1020 return 0; | |
1021 | |
1022 /* Lay out the field so we know what alignment it needs. */ | |
1023 layout_decl (field, known_align); | |
1024 desired_align = DECL_ALIGN (field); | |
1025 user_align = DECL_USER_ALIGN (field); | |
1026 | |
1027 is_bitfield = (type != error_mark_node | |
1028 && DECL_BIT_FIELD_TYPE (field) | |
1029 && ! integer_zerop (TYPE_SIZE (type))); | |
1030 | |
1031 /* Record must have at least as much alignment as any field. | |
1032 Otherwise, the alignment of the field within the record is | |
1033 meaningless. */ | |
1034 if (targetm.ms_bitfield_layout_p (rli->t)) | |
1035 { | |
1036 /* Here, the alignment of the underlying type of a bitfield can | |
1037 affect the alignment of a record; even a zero-sized field | |
1038 can do this. The alignment should be to the alignment of | |
1039 the type, except that for zero-size bitfields this only | |
1040 applies if there was an immediately prior, nonzero-size | |
1041 bitfield. (That's the way it is, experimentally.) */ | |
131 | 1042 if (!is_bitfield |
111 | 1043 || ((DECL_SIZE (field) == NULL_TREE |
1044 || !integer_zerop (DECL_SIZE (field))) | |
0 | 1045 ? !DECL_PACKED (field) |
1046 : (rli->prev_field | |
1047 && DECL_BIT_FIELD_TYPE (rli->prev_field) | |
1048 && ! integer_zerop (DECL_SIZE (rli->prev_field))))) | |
1049 { | |
1050 unsigned int type_align = TYPE_ALIGN (type); | |
131 | 1051 if (!is_bitfield && DECL_PACKED (field)) |
1052 type_align = desired_align; | |
1053 else | |
1054 type_align = MAX (type_align, desired_align); | |
0 | 1055 if (maximum_field_alignment != 0) |
1056 type_align = MIN (type_align, maximum_field_alignment); | |
1057 rli->record_align = MAX (rli->record_align, type_align); | |
1058 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); | |
1059 } | |
1060 } | |
1061 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS) | |
1062 { | |
1063 /* Named bit-fields cause the entire structure to have the | |
1064 alignment implied by their type. Some targets also apply the same | |
1065 rules to unnamed bitfields. */ | |
1066 if (DECL_NAME (field) != 0 | |
1067 || targetm.align_anon_bitfield ()) | |
1068 { | |
1069 unsigned int type_align = TYPE_ALIGN (type); | |
1070 | |
1071 #ifdef ADJUST_FIELD_ALIGN | |
1072 if (! TYPE_USER_ALIGN (type)) | |
111 | 1073 type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
0 | 1074 #endif |
1075 | |
1076 /* Targets might chose to handle unnamed and hence possibly | |
1077 zero-width bitfield. Those are not influenced by #pragmas | |
1078 or packed attributes. */ | |
1079 if (integer_zerop (DECL_SIZE (field))) | |
1080 { | |
1081 if (initial_max_fld_align) | |
1082 type_align = MIN (type_align, | |
1083 initial_max_fld_align * BITS_PER_UNIT); | |
1084 } | |
1085 else if (maximum_field_alignment != 0) | |
1086 type_align = MIN (type_align, maximum_field_alignment); | |
1087 else if (DECL_PACKED (field)) | |
1088 type_align = MIN (type_align, BITS_PER_UNIT); | |
1089 | |
1090 /* The alignment of the record is increased to the maximum | |
1091 of the current alignment, the alignment indicated on the | |
1092 field (i.e., the alignment specified by an __aligned__ | |
1093 attribute), and the alignment indicated by the type of | |
1094 the field. */ | |
1095 rli->record_align = MAX (rli->record_align, desired_align); | |
1096 rli->record_align = MAX (rli->record_align, type_align); | |
1097 | |
1098 if (warn_packed) | |
1099 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); | |
1100 user_align |= TYPE_USER_ALIGN (type); | |
1101 } | |
1102 } | |
1103 else | |
1104 { | |
1105 rli->record_align = MAX (rli->record_align, desired_align); | |
1106 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); | |
1107 } | |
1108 | |
1109 TYPE_USER_ALIGN (rli->t) |= user_align; | |
1110 | |
1111 return desired_align; | |
1112 } | |
1113 | |
111 | 1114 /* Issue a warning if the record alignment, RECORD_ALIGN, is less than |
1115 the field alignment of FIELD or FIELD isn't aligned. */ | |
1116 | |
1117 static void | |
1118 handle_warn_if_not_align (tree field, unsigned int record_align) | |
1119 { | |
1120 tree type = TREE_TYPE (field); | |
1121 | |
1122 if (type == error_mark_node) | |
1123 return; | |
1124 | |
1125 unsigned int warn_if_not_align = 0; | |
1126 | |
1127 int opt_w = 0; | |
1128 | |
1129 if (warn_if_not_aligned) | |
1130 { | |
1131 warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field); | |
1132 if (!warn_if_not_align) | |
1133 warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type); | |
1134 if (warn_if_not_align) | |
1135 opt_w = OPT_Wif_not_aligned; | |
1136 } | |
1137 | |
1138 if (!warn_if_not_align | |
1139 && warn_packed_not_aligned | |
131 | 1140 && lookup_attribute ("aligned", TYPE_ATTRIBUTES (type))) |
111 | 1141 { |
1142 warn_if_not_align = TYPE_ALIGN (type); | |
1143 opt_w = OPT_Wpacked_not_aligned; | |
1144 } | |
1145 | |
1146 if (!warn_if_not_align) | |
1147 return; | |
1148 | |
1149 tree context = DECL_CONTEXT (field); | |
1150 | |
1151 warn_if_not_align /= BITS_PER_UNIT; | |
1152 record_align /= BITS_PER_UNIT; | |
1153 if ((record_align % warn_if_not_align) != 0) | |
1154 warning (opt_w, "alignment %u of %qT is less than %u", | |
1155 record_align, context, warn_if_not_align); | |
1156 | |
131 | 1157 tree off = byte_position (field); |
1158 if (!multiple_of_p (TREE_TYPE (off), off, size_int (warn_if_not_align))) | |
1159 { | |
1160 if (TREE_CODE (off) == INTEGER_CST) | |
1161 warning (opt_w, "%q+D offset %E in %qT isn%'t aligned to %u", | |
1162 field, off, context, warn_if_not_align); | |
1163 else | |
1164 warning (opt_w, "%q+D offset %E in %qT may not be aligned to %u", | |
1165 field, off, context, warn_if_not_align); | |
1166 } | |
111 | 1167 } |
1168 | |
0 | 1169 /* Called from place_field to handle unions. */ |
1170 | |
1171 static void | |
1172 place_union_field (record_layout_info rli, tree field) | |
1173 { | |
1174 update_alignment_for_field (rli, field, /*known_align=*/0); | |
1175 | |
1176 DECL_FIELD_OFFSET (field) = size_zero_node; | |
1177 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node; | |
1178 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT); | |
111 | 1179 handle_warn_if_not_align (field, rli->record_align); |
0 | 1180 |
1181 /* If this is an ERROR_MARK return *after* having set the | |
1182 field at the start of the union. This helps when parsing | |
1183 invalid fields. */ | |
1184 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK) | |
1185 return; | |
1186 | |
111 | 1187 if (AGGREGATE_TYPE_P (TREE_TYPE (field)) |
1188 && TYPE_TYPELESS_STORAGE (TREE_TYPE (field))) | |
1189 TYPE_TYPELESS_STORAGE (rli->t) = 1; | |
1190 | |
0 | 1191 /* We assume the union's size will be a multiple of a byte so we don't |
1192 bother with BITPOS. */ | |
1193 if (TREE_CODE (rli->t) == UNION_TYPE) | |
1194 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field)); | |
1195 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1196 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field), |
0 | 1197 DECL_SIZE_UNIT (field), rli->offset); |
1198 } | |
1199 | |
1200 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated | |
1201 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more | |
1202 units of alignment than the underlying TYPE. */ | |
1203 static int | |
1204 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset, | |
1205 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type) | |
1206 { | |
1207 /* Note that the calculation of OFFSET might overflow; we calculate it so | |
1208 that we still get the right result as long as ALIGN is a power of two. */ | |
1209 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset; | |
1210 | |
1211 offset = offset % align; | |
1212 return ((offset + size + align - 1) / align | |
111 | 1213 > tree_to_uhwi (TYPE_SIZE (type)) / align); |
0 | 1214 } |
1215 | |
1216 /* RLI contains information about the layout of a RECORD_TYPE. FIELD | |
1217 is a FIELD_DECL to be added after those fields already present in | |
1218 T. (FIELD is not actually added to the TYPE_FIELDS list here; | |
1219 callers that desire that behavior must manually perform that step.) */ | |
1220 | |
1221 void | |
1222 place_field (record_layout_info rli, tree field) | |
1223 { | |
1224 /* The alignment required for FIELD. */ | |
1225 unsigned int desired_align; | |
1226 /* The alignment FIELD would have if we just dropped it into the | |
1227 record as it presently stands. */ | |
1228 unsigned int known_align; | |
1229 unsigned int actual_align; | |
1230 /* The type of this field. */ | |
1231 tree type = TREE_TYPE (field); | |
1232 | |
1233 gcc_assert (TREE_CODE (field) != ERROR_MARK); | |
1234 | |
1235 /* If FIELD is static, then treat it like a separate variable, not | |
1236 really like a structure field. If it is a FUNCTION_DECL, it's a | |
1237 method. In both cases, all we do is lay out the decl, and we do | |
1238 it *after* the record is laid out. */ | |
111 | 1239 if (VAR_P (field)) |
0 | 1240 { |
111 | 1241 vec_safe_push (rli->pending_statics, field); |
0 | 1242 return; |
1243 } | |
1244 | |
1245 /* Enumerators and enum types which are local to this class need not | |
1246 be laid out. Likewise for initialized constant fields. */ | |
1247 else if (TREE_CODE (field) != FIELD_DECL) | |
1248 return; | |
1249 | |
1250 /* Unions are laid out very differently than records, so split | |
1251 that code off to another function. */ | |
1252 else if (TREE_CODE (rli->t) != RECORD_TYPE) | |
1253 { | |
1254 place_union_field (rli, field); | |
1255 return; | |
1256 } | |
1257 | |
1258 else if (TREE_CODE (type) == ERROR_MARK) | |
1259 { | |
1260 /* Place this field at the current allocation position, so we | |
1261 maintain monotonicity. */ | |
1262 DECL_FIELD_OFFSET (field) = rli->offset; | |
1263 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; | |
1264 SET_DECL_OFFSET_ALIGN (field, rli->offset_align); | |
111 | 1265 handle_warn_if_not_align (field, rli->record_align); |
0 | 1266 return; |
1267 } | |
1268 | |
111 | 1269 if (AGGREGATE_TYPE_P (type) |
1270 && TYPE_TYPELESS_STORAGE (type)) | |
1271 TYPE_TYPELESS_STORAGE (rli->t) = 1; | |
1272 | |
0 | 1273 /* Work out the known alignment so far. Note that A & (-A) is the |
1274 value of the least-significant bit in A that is one. */ | |
1275 if (! integer_zerop (rli->bitpos)) | |
111 | 1276 known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos)); |
0 | 1277 else if (integer_zerop (rli->offset)) |
1278 known_align = 0; | |
111 | 1279 else if (tree_fits_uhwi_p (rli->offset)) |
0 | 1280 known_align = (BITS_PER_UNIT |
111 | 1281 * least_bit_hwi (tree_to_uhwi (rli->offset))); |
0 | 1282 else |
1283 known_align = rli->offset_align; | |
1284 | |
1285 desired_align = update_alignment_for_field (rli, field, known_align); | |
1286 if (known_align == 0) | |
1287 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); | |
1288 | |
1289 if (warn_packed && DECL_PACKED (field)) | |
1290 { | |
1291 if (known_align >= TYPE_ALIGN (type)) | |
1292 { | |
1293 if (TYPE_ALIGN (type) > desired_align) | |
1294 { | |
1295 if (STRICT_ALIGNMENT) | |
1296 warning (OPT_Wattributes, "packed attribute causes " | |
1297 "inefficient alignment for %q+D", field); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1298 /* Don't warn if DECL_PACKED was set by the type. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1299 else if (!TYPE_PACKED (rli->t)) |
0 | 1300 warning (OPT_Wattributes, "packed attribute is " |
1301 "unnecessary for %q+D", field); | |
1302 } | |
1303 } | |
1304 else | |
1305 rli->packed_maybe_necessary = 1; | |
1306 } | |
1307 | |
1308 /* Does this field automatically have alignment it needs by virtue | |
111 | 1309 of the fields that precede it and the record's own alignment? */ |
131 | 1310 if (known_align < desired_align |
1311 && (! targetm.ms_bitfield_layout_p (rli->t) | |
1312 || rli->prev_field == NULL)) | |
0 | 1313 { |
1314 /* No, we need to skip space before this field. | |
1315 Bump the cumulative size to multiple of field alignment. */ | |
1316 | |
111 | 1317 if (!targetm.ms_bitfield_layout_p (rli->t) |
1318 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1319 warning (OPT_Wpadded, "padding struct to align %q+D", field); |
0 | 1320 |
1321 /* If the alignment is still within offset_align, just align | |
1322 the bit position. */ | |
1323 if (desired_align < rli->offset_align) | |
1324 rli->bitpos = round_up (rli->bitpos, desired_align); | |
1325 else | |
1326 { | |
1327 /* First adjust OFFSET by the partial bits, then align. */ | |
1328 rli->offset | |
1329 = size_binop (PLUS_EXPR, rli->offset, | |
1330 fold_convert (sizetype, | |
1331 size_binop (CEIL_DIV_EXPR, rli->bitpos, | |
1332 bitsize_unit_node))); | |
1333 rli->bitpos = bitsize_zero_node; | |
1334 | |
1335 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT); | |
1336 } | |
1337 | |
1338 if (! TREE_CONSTANT (rli->offset)) | |
1339 rli->offset_align = desired_align; | |
1340 } | |
1341 | |
1342 /* Handle compatibility with PCC. Note that if the record has any | |
1343 variable-sized fields, we need not worry about compatibility. */ | |
1344 if (PCC_BITFIELD_TYPE_MATTERS | |
1345 && ! targetm.ms_bitfield_layout_p (rli->t) | |
1346 && TREE_CODE (field) == FIELD_DECL | |
1347 && type != error_mark_node | |
1348 && DECL_BIT_FIELD (field) | |
1349 && (! DECL_PACKED (field) | |
1350 /* Enter for these packed fields only to issue a warning. */ | |
1351 || TYPE_ALIGN (type) <= BITS_PER_UNIT) | |
1352 && maximum_field_alignment == 0 | |
1353 && ! integer_zerop (DECL_SIZE (field)) | |
111 | 1354 && tree_fits_uhwi_p (DECL_SIZE (field)) |
1355 && tree_fits_uhwi_p (rli->offset) | |
1356 && tree_fits_uhwi_p (TYPE_SIZE (type))) | |
0 | 1357 { |
1358 unsigned int type_align = TYPE_ALIGN (type); | |
1359 tree dsize = DECL_SIZE (field); | |
111 | 1360 HOST_WIDE_INT field_size = tree_to_uhwi (dsize); |
1361 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset); | |
1362 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); | |
0 | 1363 |
1364 #ifdef ADJUST_FIELD_ALIGN | |
1365 if (! TYPE_USER_ALIGN (type)) | |
111 | 1366 type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
0 | 1367 #endif |
1368 | |
1369 /* A bit field may not span more units of alignment of its type | |
1370 than its type itself. Advance to next boundary if necessary. */ | |
1371 if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) | |
1372 { | |
1373 if (DECL_PACKED (field)) | |
1374 { | |
1375 if (warn_packed_bitfield_compat == 1) | |
1376 inform | |
1377 (input_location, | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1378 "offset of packed bit-field %qD has changed in GCC 4.4", |
0 | 1379 field); |
1380 } | |
1381 else | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1382 rli->bitpos = round_up (rli->bitpos, type_align); |
0 | 1383 } |
1384 | |
1385 if (! DECL_PACKED (field)) | |
1386 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); | |
111 | 1387 |
1388 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t, | |
1389 TYPE_WARN_IF_NOT_ALIGN (type)); | |
0 | 1390 } |
1391 | |
1392 #ifdef BITFIELD_NBYTES_LIMITED | |
1393 if (BITFIELD_NBYTES_LIMITED | |
1394 && ! targetm.ms_bitfield_layout_p (rli->t) | |
1395 && TREE_CODE (field) == FIELD_DECL | |
1396 && type != error_mark_node | |
1397 && DECL_BIT_FIELD_TYPE (field) | |
1398 && ! DECL_PACKED (field) | |
1399 && ! integer_zerop (DECL_SIZE (field)) | |
111 | 1400 && tree_fits_uhwi_p (DECL_SIZE (field)) |
1401 && tree_fits_uhwi_p (rli->offset) | |
1402 && tree_fits_uhwi_p (TYPE_SIZE (type))) | |
0 | 1403 { |
1404 unsigned int type_align = TYPE_ALIGN (type); | |
1405 tree dsize = DECL_SIZE (field); | |
111 | 1406 HOST_WIDE_INT field_size = tree_to_uhwi (dsize); |
1407 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset); | |
1408 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); | |
0 | 1409 |
1410 #ifdef ADJUST_FIELD_ALIGN | |
1411 if (! TYPE_USER_ALIGN (type)) | |
111 | 1412 type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
0 | 1413 #endif |
1414 | |
1415 if (maximum_field_alignment != 0) | |
1416 type_align = MIN (type_align, maximum_field_alignment); | |
1417 /* ??? This test is opposite the test in the containing if | |
1418 statement, so this code is unreachable currently. */ | |
1419 else if (DECL_PACKED (field)) | |
1420 type_align = MIN (type_align, BITS_PER_UNIT); | |
1421 | |
1422 /* A bit field may not span the unit of alignment of its type. | |
1423 Advance to next boundary if necessary. */ | |
1424 if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) | |
1425 rli->bitpos = round_up (rli->bitpos, type_align); | |
1426 | |
1427 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); | |
111 | 1428 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t, |
1429 TYPE_WARN_IF_NOT_ALIGN (type)); | |
0 | 1430 } |
1431 #endif | |
1432 | |
1433 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details. | |
1434 A subtlety: | |
1435 When a bit field is inserted into a packed record, the whole | |
1436 size of the underlying type is used by one or more same-size | |
1437 adjacent bitfields. (That is, if its long:3, 32 bits is | |
1438 used in the record, and any additional adjacent long bitfields are | |
1439 packed into the same chunk of 32 bits. However, if the size | |
1440 changes, a new field of that size is allocated.) In an unpacked | |
1441 record, this is the same as using alignment, but not equivalent | |
1442 when packing. | |
1443 | |
1444 Note: for compatibility, we use the type size, not the type alignment | |
1445 to determine alignment, since that matches the documentation */ | |
1446 | |
1447 if (targetm.ms_bitfield_layout_p (rli->t)) | |
1448 { | |
1449 tree prev_saved = rli->prev_field; | |
1450 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL; | |
1451 | |
1452 /* This is a bitfield if it exists. */ | |
1453 if (rli->prev_field) | |
1454 { | |
131 | 1455 bool realign_p = known_align < desired_align; |
1456 | |
0 | 1457 /* If both are bitfields, nonzero, and the same size, this is |
1458 the middle of a run. Zero declared size fields are special | |
1459 and handled as "end of run". (Note: it's nonzero declared | |
1460 size, but equal type sizes!) (Since we know that both | |
1461 the current and previous fields are bitfields by the | |
1462 time we check it, DECL_SIZE must be present for both.) */ | |
1463 if (DECL_BIT_FIELD_TYPE (field) | |
1464 && !integer_zerop (DECL_SIZE (field)) | |
1465 && !integer_zerop (DECL_SIZE (rli->prev_field)) | |
111 | 1466 && tree_fits_shwi_p (DECL_SIZE (rli->prev_field)) |
1467 && tree_fits_uhwi_p (TYPE_SIZE (type)) | |
0 | 1468 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))) |
1469 { | |
1470 /* We're in the middle of a run of equal type size fields; make | |
1471 sure we realign if we run out of bits. (Not decl size, | |
1472 type size!) */ | |
111 | 1473 HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field)); |
0 | 1474 |
1475 if (rli->remaining_in_alignment < bitsize) | |
1476 { | |
111 | 1477 HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type)); |
0 | 1478 |
1479 /* out of bits; bump up to next 'word'. */ | |
1480 rli->bitpos | |
1481 = size_binop (PLUS_EXPR, rli->bitpos, | |
1482 bitsize_int (rli->remaining_in_alignment)); | |
1483 rli->prev_field = field; | |
1484 if (typesize < bitsize) | |
1485 rli->remaining_in_alignment = 0; | |
1486 else | |
1487 rli->remaining_in_alignment = typesize - bitsize; | |
1488 } | |
1489 else | |
131 | 1490 { |
1491 rli->remaining_in_alignment -= bitsize; | |
1492 realign_p = false; | |
1493 } | |
0 | 1494 } |
1495 else | |
1496 { | |
1497 /* End of a run: if leaving a run of bitfields of the same type | |
1498 size, we have to "use up" the rest of the bits of the type | |
1499 size. | |
1500 | |
1501 Compute the new position as the sum of the size for the prior | |
1502 type and where we first started working on that type. | |
1503 Note: since the beginning of the field was aligned then | |
1504 of course the end will be too. No round needed. */ | |
1505 | |
1506 if (!integer_zerop (DECL_SIZE (rli->prev_field))) | |
1507 { | |
1508 rli->bitpos | |
1509 = size_binop (PLUS_EXPR, rli->bitpos, | |
1510 bitsize_int (rli->remaining_in_alignment)); | |
1511 } | |
1512 else | |
1513 /* We "use up" size zero fields; the code below should behave | |
1514 as if the prior field was not a bitfield. */ | |
1515 prev_saved = NULL; | |
1516 | |
1517 /* Cause a new bitfield to be captured, either this time (if | |
1518 currently a bitfield) or next time we see one. */ | |
111 | 1519 if (!DECL_BIT_FIELD_TYPE (field) |
0 | 1520 || integer_zerop (DECL_SIZE (field))) |
1521 rli->prev_field = NULL; | |
1522 } | |
1523 | |
131 | 1524 /* Does this field automatically have alignment it needs by virtue |
1525 of the fields that precede it and the record's own alignment? */ | |
1526 if (realign_p) | |
1527 { | |
1528 /* If the alignment is still within offset_align, just align | |
1529 the bit position. */ | |
1530 if (desired_align < rli->offset_align) | |
1531 rli->bitpos = round_up (rli->bitpos, desired_align); | |
1532 else | |
1533 { | |
1534 /* First adjust OFFSET by the partial bits, then align. */ | |
1535 tree d = size_binop (CEIL_DIV_EXPR, rli->bitpos, | |
1536 bitsize_unit_node); | |
1537 rli->offset = size_binop (PLUS_EXPR, rli->offset, | |
1538 fold_convert (sizetype, d)); | |
1539 rli->bitpos = bitsize_zero_node; | |
1540 | |
1541 rli->offset = round_up (rli->offset, | |
1542 desired_align / BITS_PER_UNIT); | |
1543 } | |
1544 | |
1545 if (! TREE_CONSTANT (rli->offset)) | |
1546 rli->offset_align = desired_align; | |
1547 } | |
1548 | |
0 | 1549 normalize_rli (rli); |
1550 } | |
1551 | |
111 | 1552 /* If we're starting a new run of same type size bitfields |
0 | 1553 (or a run of non-bitfields), set up the "first of the run" |
1554 fields. | |
1555 | |
1556 That is, if the current field is not a bitfield, or if there | |
1557 was a prior bitfield the type sizes differ, or if there wasn't | |
1558 a prior bitfield the size of the current field is nonzero. | |
1559 | |
1560 Note: we must be sure to test ONLY the type size if there was | |
1561 a prior bitfield and ONLY for the current field being zero if | |
1562 there wasn't. */ | |
1563 | |
1564 if (!DECL_BIT_FIELD_TYPE (field) | |
1565 || (prev_saved != NULL | |
1566 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)) | |
131 | 1567 : !integer_zerop (DECL_SIZE (field)))) |
0 | 1568 { |
1569 /* Never smaller than a byte for compatibility. */ | |
1570 unsigned int type_align = BITS_PER_UNIT; | |
1571 | |
1572 /* (When not a bitfield), we could be seeing a flex array (with | |
1573 no DECL_SIZE). Since we won't be using remaining_in_alignment | |
1574 until we see a bitfield (and come by here again) we just skip | |
1575 calculating it. */ | |
1576 if (DECL_SIZE (field) != NULL | |
111 | 1577 && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field))) |
1578 && tree_fits_uhwi_p (DECL_SIZE (field))) | |
0 | 1579 { |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1580 unsigned HOST_WIDE_INT bitsize |
111 | 1581 = tree_to_uhwi (DECL_SIZE (field)); |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1582 unsigned HOST_WIDE_INT typesize |
111 | 1583 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field))); |
0 | 1584 |
1585 if (typesize < bitsize) | |
1586 rli->remaining_in_alignment = 0; | |
1587 else | |
1588 rli->remaining_in_alignment = typesize - bitsize; | |
1589 } | |
1590 | |
1591 /* Now align (conventionally) for the new type. */ | |
131 | 1592 if (! DECL_PACKED (field)) |
1593 type_align = TYPE_ALIGN (TREE_TYPE (field)); | |
0 | 1594 |
1595 if (maximum_field_alignment != 0) | |
1596 type_align = MIN (type_align, maximum_field_alignment); | |
1597 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1598 rli->bitpos = round_up (rli->bitpos, type_align); |
0 | 1599 |
1600 /* If we really aligned, don't allow subsequent bitfields | |
1601 to undo that. */ | |
1602 rli->prev_field = NULL; | |
1603 } | |
1604 } | |
1605 | |
1606 /* Offset so far becomes the position of this field after normalizing. */ | |
1607 normalize_rli (rli); | |
1608 DECL_FIELD_OFFSET (field) = rli->offset; | |
1609 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; | |
1610 SET_DECL_OFFSET_ALIGN (field, rli->offset_align); | |
111 | 1611 handle_warn_if_not_align (field, rli->record_align); |
1612 | |
1613 /* Evaluate nonconstant offsets only once, either now or as soon as safe. */ | |
1614 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST) | |
1615 DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field)); | |
0 | 1616 |
1617 /* If this field ended up more aligned than we thought it would be (we | |
1618 approximate this by seeing if its position changed), lay out the field | |
1619 again; perhaps we can use an integral mode for it now. */ | |
1620 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) | |
111 | 1621 actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))); |
0 | 1622 else if (integer_zerop (DECL_FIELD_OFFSET (field))) |
1623 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); | |
111 | 1624 else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) |
0 | 1625 actual_align = (BITS_PER_UNIT |
111 | 1626 * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field)))); |
0 | 1627 else |
1628 actual_align = DECL_OFFSET_ALIGN (field); | |
1629 /* ACTUAL_ALIGN is still the actual alignment *within the record* . | |
1630 store / extract bit field operations will check the alignment of the | |
1631 record against the mode of bit fields. */ | |
1632 | |
1633 if (known_align != actual_align) | |
1634 layout_decl (field, actual_align); | |
1635 | |
1636 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field)) | |
1637 rli->prev_field = field; | |
1638 | |
1639 /* Now add size of this field to the size of the record. If the size is | |
1640 not constant, treat the field as being a multiple of bytes and just | |
1641 adjust the offset, resetting the bit position. Otherwise, apportion the | |
1642 size amongst the bit position and offset. First handle the case of an | |
1643 unspecified size, which can happen when we have an invalid nested struct | |
1644 definition, such as struct j { struct j { int i; } }. The error message | |
1645 is printed in finish_struct. */ | |
1646 if (DECL_SIZE (field) == 0) | |
1647 /* Do nothing. */; | |
1648 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST | |
1649 || TREE_OVERFLOW (DECL_SIZE (field))) | |
1650 { | |
1651 rli->offset | |
1652 = size_binop (PLUS_EXPR, rli->offset, | |
1653 fold_convert (sizetype, | |
1654 size_binop (CEIL_DIV_EXPR, rli->bitpos, | |
1655 bitsize_unit_node))); | |
1656 rli->offset | |
1657 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field)); | |
1658 rli->bitpos = bitsize_zero_node; | |
1659 rli->offset_align = MIN (rli->offset_align, desired_align); | |
131 | 1660 |
1661 if (!multiple_of_p (bitsizetype, DECL_SIZE (field), | |
1662 bitsize_int (rli->offset_align))) | |
1663 { | |
1664 tree type = strip_array_types (TREE_TYPE (field)); | |
1665 /* The above adjusts offset_align just based on the start of the | |
1666 field. The field might not have a size that is a multiple of | |
1667 that offset_align though. If the field is an array of fixed | |
1668 sized elements, assume there can be any multiple of those | |
1669 sizes. If it is a variable length aggregate or array of | |
1670 variable length aggregates, assume worst that the end is | |
1671 just BITS_PER_UNIT aligned. */ | |
1672 if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST) | |
1673 { | |
1674 if (TREE_INT_CST_LOW (TYPE_SIZE (type))) | |
1675 { | |
1676 unsigned HOST_WIDE_INT sz | |
1677 = least_bit_hwi (TREE_INT_CST_LOW (TYPE_SIZE (type))); | |
1678 rli->offset_align = MIN (rli->offset_align, sz); | |
1679 } | |
1680 } | |
1681 else | |
1682 rli->offset_align = MIN (rli->offset_align, BITS_PER_UNIT); | |
1683 } | |
0 | 1684 } |
1685 else if (targetm.ms_bitfield_layout_p (rli->t)) | |
1686 { | |
1687 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); | |
1688 | |
131 | 1689 /* If FIELD is the last field and doesn't end at the full length |
1690 of the type then pad the struct out to the full length of the | |
1691 last type. */ | |
1692 if (DECL_BIT_FIELD_TYPE (field) | |
0 | 1693 && !integer_zerop (DECL_SIZE (field))) |
131 | 1694 { |
1695 /* We have to scan, because non-field DECLS are also here. */ | |
1696 tree probe = field; | |
1697 while ((probe = DECL_CHAIN (probe))) | |
1698 if (TREE_CODE (probe) == FIELD_DECL) | |
1699 break; | |
1700 if (!probe) | |
1701 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, | |
1702 bitsize_int (rli->remaining_in_alignment)); | |
1703 } | |
0 | 1704 |
1705 normalize_rli (rli); | |
1706 } | |
1707 else | |
1708 { | |
1709 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); | |
1710 normalize_rli (rli); | |
1711 } | |
1712 } | |
1713 | |
1714 /* Assuming that all the fields have been laid out, this function uses | |
1715 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type | |
1716 indicated by RLI. */ | |
1717 | |
1718 static void | |
1719 finalize_record_size (record_layout_info rli) | |
1720 { | |
1721 tree unpadded_size, unpadded_size_unit; | |
1722 | |
1723 /* Now we want just byte and bit offsets, so set the offset alignment | |
1724 to be a byte and then normalize. */ | |
1725 rli->offset_align = BITS_PER_UNIT; | |
1726 normalize_rli (rli); | |
1727 | |
1728 /* Determine the desired alignment. */ | |
1729 #ifdef ROUND_TYPE_ALIGN | |
111 | 1730 SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), |
1731 rli->record_align)); | |
0 | 1732 #else |
111 | 1733 SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align)); |
0 | 1734 #endif |
1735 | |
1736 /* Compute the size so far. Be sure to allow for extra bits in the | |
1737 size in bytes. We have guaranteed above that it will be no more | |
1738 than a single byte. */ | |
1739 unpadded_size = rli_size_so_far (rli); | |
1740 unpadded_size_unit = rli_size_unit_so_far (rli); | |
1741 if (! integer_zerop (rli->bitpos)) | |
1742 unpadded_size_unit | |
1743 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node); | |
1744 | |
1745 /* Round the size up to be a multiple of the required alignment. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1746 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t)); |
0 | 1747 TYPE_SIZE_UNIT (rli->t) |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1748 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t)); |
0 | 1749 |
1750 if (TREE_CONSTANT (unpadded_size) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1751 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1752 && input_location != BUILTINS_LOCATION) |
0 | 1753 warning (OPT_Wpadded, "padding struct size to alignment boundary"); |
1754 | |
1755 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE | |
1756 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary | |
1757 && TREE_CONSTANT (unpadded_size)) | |
1758 { | |
1759 tree unpacked_size; | |
1760 | |
1761 #ifdef ROUND_TYPE_ALIGN | |
1762 rli->unpacked_align | |
1763 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align); | |
1764 #else | |
1765 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align); | |
1766 #endif | |
1767 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1768 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align); |
0 | 1769 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t))) |
1770 { | |
1771 if (TYPE_NAME (rli->t)) | |
1772 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1773 tree name; |
0 | 1774 |
1775 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1776 name = TYPE_NAME (rli->t); |
0 | 1777 else |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1778 name = DECL_NAME (TYPE_NAME (rli->t)); |
0 | 1779 |
1780 if (STRICT_ALIGNMENT) | |
1781 warning (OPT_Wpacked, "packed attribute causes inefficient " | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1782 "alignment for %qE", name); |
0 | 1783 else |
1784 warning (OPT_Wpacked, | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1785 "packed attribute is unnecessary for %qE", name); |
0 | 1786 } |
1787 else | |
1788 { | |
1789 if (STRICT_ALIGNMENT) | |
1790 warning (OPT_Wpacked, | |
1791 "packed attribute causes inefficient alignment"); | |
1792 else | |
1793 warning (OPT_Wpacked, "packed attribute is unnecessary"); | |
1794 } | |
1795 } | |
1796 } | |
1797 } | |
1798 | |
1799 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */ | |
1800 | |
1801 void | |
1802 compute_record_mode (tree type) | |
1803 { | |
1804 tree field; | |
111 | 1805 machine_mode mode = VOIDmode; |
0 | 1806 |
1807 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that. | |
1808 However, if possible, we use a mode that fits in a register | |
1809 instead, in order to allow for better optimization down the | |
1810 line. */ | |
1811 SET_TYPE_MODE (type, BLKmode); | |
1812 | |
111 | 1813 if (! tree_fits_uhwi_p (TYPE_SIZE (type))) |
0 | 1814 return; |
1815 | |
1816 /* A record which has any BLKmode members must itself be | |
1817 BLKmode; it can't go in a register. Unless the member is | |
1818 BLKmode only because it isn't aligned. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1819 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
0 | 1820 { |
1821 if (TREE_CODE (field) != FIELD_DECL) | |
1822 continue; | |
1823 | |
1824 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK | |
1825 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode | |
1826 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)) | |
1827 && !(TYPE_SIZE (TREE_TYPE (field)) != 0 | |
1828 && integer_zerop (TYPE_SIZE (TREE_TYPE (field))))) | |
111 | 1829 || ! tree_fits_uhwi_p (bit_position (field)) |
0 | 1830 || DECL_SIZE (field) == 0 |
111 | 1831 || ! tree_fits_uhwi_p (DECL_SIZE (field))) |
0 | 1832 return; |
1833 | |
1834 /* If this field is the whole struct, remember its mode so | |
1835 that, say, we can put a double in a class into a DF | |
1836 register instead of forcing it to live in the stack. */ | |
1837 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field))) | |
1838 mode = DECL_MODE (field); | |
1839 | |
111 | 1840 /* With some targets, it is sub-optimal to access an aligned |
1841 BLKmode structure as a scalar. */ | |
1842 if (targetm.member_type_forces_blk (field, mode)) | |
0 | 1843 return; |
1844 } | |
1845 | |
1846 /* If we only have one real field; use its mode if that mode's size | |
1847 matches the type's size. This only applies to RECORD_TYPE. This | |
1848 does not apply to unions. */ | |
131 | 1849 poly_uint64 type_size; |
1850 if (TREE_CODE (type) == RECORD_TYPE | |
1851 && mode != VOIDmode | |
1852 && poly_int_tree_p (TYPE_SIZE (type), &type_size) | |
1853 && known_eq (GET_MODE_BITSIZE (mode), type_size)) | |
111 | 1854 ; |
0 | 1855 else |
111 | 1856 mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk (); |
0 | 1857 |
1858 /* If structure's known alignment is less than what the scalar | |
1859 mode would need, and it matters, then stick with BLKmode. */ | |
111 | 1860 if (mode != BLKmode |
0 | 1861 && STRICT_ALIGNMENT |
1862 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT | |
111 | 1863 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode))) |
0 | 1864 { |
1865 /* If this is the only reason this type is BLKmode, then | |
1866 don't force containing types to be BLKmode. */ | |
1867 TYPE_NO_FORCE_BLK (type) = 1; | |
111 | 1868 mode = BLKmode; |
0 | 1869 } |
111 | 1870 |
1871 SET_TYPE_MODE (type, mode); | |
0 | 1872 } |
1873 | |
1874 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid | |
1875 out. */ | |
1876 | |
1877 static void | |
1878 finalize_type_size (tree type) | |
1879 { | |
1880 /* Normally, use the alignment corresponding to the mode chosen. | |
1881 However, where strict alignment is not required, avoid | |
1882 over-aligning structures, since most compilers do not do this | |
1883 alignment. */ | |
111 | 1884 if (TYPE_MODE (type) != BLKmode |
1885 && TYPE_MODE (type) != VOIDmode | |
1886 && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type))) | |
0 | 1887 { |
1888 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type)); | |
1889 | |
1890 /* Don't override a larger alignment requirement coming from a user | |
1891 alignment of one of the fields. */ | |
1892 if (mode_align >= TYPE_ALIGN (type)) | |
1893 { | |
111 | 1894 SET_TYPE_ALIGN (type, mode_align); |
0 | 1895 TYPE_USER_ALIGN (type) = 0; |
1896 } | |
1897 } | |
1898 | |
1899 /* Do machine-dependent extra alignment. */ | |
1900 #ifdef ROUND_TYPE_ALIGN | |
111 | 1901 SET_TYPE_ALIGN (type, |
1902 ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT)); | |
0 | 1903 #endif |
1904 | |
1905 /* If we failed to find a simple way to calculate the unit size | |
1906 of the type, find it by division. */ | |
1907 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0) | |
1908 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the | |
1909 result will fit in sizetype. We will get more efficient code using | |
1910 sizetype, so we force a conversion. */ | |
1911 TYPE_SIZE_UNIT (type) | |
1912 = fold_convert (sizetype, | |
1913 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type), | |
1914 bitsize_unit_node)); | |
1915 | |
1916 if (TYPE_SIZE (type) != 0) | |
1917 { | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1918 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type)); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1919 TYPE_SIZE_UNIT (type) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1920 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type)); |
0 | 1921 } |
1922 | |
1923 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */ | |
1924 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) | |
1925 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type)); | |
1926 if (TYPE_SIZE_UNIT (type) != 0 | |
1927 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST) | |
1928 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type)); | |
1929 | |
131 | 1930 /* Handle empty records as per the x86-64 psABI. */ |
1931 TYPE_EMPTY_P (type) = targetm.calls.empty_record_p (type); | |
1932 | |
0 | 1933 /* Also layout any other variants of the type. */ |
1934 if (TYPE_NEXT_VARIANT (type) | |
1935 || type != TYPE_MAIN_VARIANT (type)) | |
1936 { | |
1937 tree variant; | |
1938 /* Record layout info of this variant. */ | |
1939 tree size = TYPE_SIZE (type); | |
1940 tree size_unit = TYPE_SIZE_UNIT (type); | |
1941 unsigned int align = TYPE_ALIGN (type); | |
111 | 1942 unsigned int precision = TYPE_PRECISION (type); |
0 | 1943 unsigned int user_align = TYPE_USER_ALIGN (type); |
111 | 1944 machine_mode mode = TYPE_MODE (type); |
131 | 1945 bool empty_p = TYPE_EMPTY_P (type); |
0 | 1946 |
1947 /* Copy it into all variants. */ | |
1948 for (variant = TYPE_MAIN_VARIANT (type); | |
1949 variant != 0; | |
1950 variant = TYPE_NEXT_VARIANT (variant)) | |
1951 { | |
1952 TYPE_SIZE (variant) = size; | |
1953 TYPE_SIZE_UNIT (variant) = size_unit; | |
111 | 1954 unsigned valign = align; |
1955 if (TYPE_USER_ALIGN (variant)) | |
1956 valign = MAX (valign, TYPE_ALIGN (variant)); | |
1957 else | |
1958 TYPE_USER_ALIGN (variant) = user_align; | |
1959 SET_TYPE_ALIGN (variant, valign); | |
1960 TYPE_PRECISION (variant) = precision; | |
0 | 1961 SET_TYPE_MODE (variant, mode); |
131 | 1962 TYPE_EMPTY_P (variant) = empty_p; |
0 | 1963 } |
1964 } | |
1965 } | |
1966 | |
111 | 1967 /* Return a new underlying object for a bitfield started with FIELD. */ |
1968 | |
1969 static tree | |
1970 start_bitfield_representative (tree field) | |
1971 { | |
1972 tree repr = make_node (FIELD_DECL); | |
1973 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field); | |
1974 /* Force the representative to begin at a BITS_PER_UNIT aligned | |
1975 boundary - C++ may use tail-padding of a base object to | |
1976 continue packing bits so the bitfield region does not start | |
1977 at bit zero (see g++.dg/abi/bitfield5.C for example). | |
1978 Unallocated bits may happen for other reasons as well, | |
1979 for example Ada which allows explicit bit-granular structure layout. */ | |
1980 DECL_FIELD_BIT_OFFSET (repr) | |
1981 = size_binop (BIT_AND_EXPR, | |
1982 DECL_FIELD_BIT_OFFSET (field), | |
1983 bitsize_int (~(BITS_PER_UNIT - 1))); | |
1984 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field)); | |
1985 DECL_SIZE (repr) = DECL_SIZE (field); | |
1986 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field); | |
1987 DECL_PACKED (repr) = DECL_PACKED (field); | |
1988 DECL_CONTEXT (repr) = DECL_CONTEXT (field); | |
1989 /* There are no indirect accesses to this field. If we introduce | |
1990 some then they have to use the record alias set. This makes | |
1991 sure to properly conflict with [indirect] accesses to addressable | |
1992 fields of the bitfield group. */ | |
1993 DECL_NONADDRESSABLE_P (repr) = 1; | |
1994 return repr; | |
1995 } | |
1996 | |
1997 /* Finish up a bitfield group that was started by creating the underlying | |
1998 object REPR with the last field in the bitfield group FIELD. */ | |
1999 | |
2000 static void | |
2001 finish_bitfield_representative (tree repr, tree field) | |
2002 { | |
2003 unsigned HOST_WIDE_INT bitsize, maxbitsize; | |
2004 tree nextf, size; | |
2005 | |
2006 size = size_diffop (DECL_FIELD_OFFSET (field), | |
2007 DECL_FIELD_OFFSET (repr)); | |
2008 while (TREE_CODE (size) == COMPOUND_EXPR) | |
2009 size = TREE_OPERAND (size, 1); | |
2010 gcc_assert (tree_fits_uhwi_p (size)); | |
2011 bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT | |
2012 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) | |
2013 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)) | |
2014 + tree_to_uhwi (DECL_SIZE (field))); | |
2015 | |
2016 /* Round up bitsize to multiples of BITS_PER_UNIT. */ | |
2017 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); | |
2018 | |
2019 /* Now nothing tells us how to pad out bitsize ... */ | |
2020 nextf = DECL_CHAIN (field); | |
2021 while (nextf && TREE_CODE (nextf) != FIELD_DECL) | |
2022 nextf = DECL_CHAIN (nextf); | |
2023 if (nextf) | |
2024 { | |
2025 tree maxsize; | |
2026 /* If there was an error, the field may be not laid out | |
2027 correctly. Don't bother to do anything. */ | |
2028 if (TREE_TYPE (nextf) == error_mark_node) | |
2029 return; | |
2030 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf), | |
2031 DECL_FIELD_OFFSET (repr)); | |
2032 if (tree_fits_uhwi_p (maxsize)) | |
2033 { | |
2034 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT | |
2035 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf)) | |
2036 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); | |
2037 /* If the group ends within a bitfield nextf does not need to be | |
2038 aligned to BITS_PER_UNIT. Thus round up. */ | |
2039 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); | |
2040 } | |
2041 else | |
2042 maxbitsize = bitsize; | |
2043 } | |
2044 else | |
2045 { | |
2046 /* Note that if the C++ FE sets up tail-padding to be re-used it | |
2047 creates a as-base variant of the type with TYPE_SIZE adjusted | |
2048 accordingly. So it is safe to include tail-padding here. */ | |
2049 tree aggsize = lang_hooks.types.unit_size_without_reusable_padding | |
2050 (DECL_CONTEXT (field)); | |
2051 tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr)); | |
2052 /* We cannot generally rely on maxsize to fold to an integer constant, | |
2053 so use bitsize as fallback for this case. */ | |
2054 if (tree_fits_uhwi_p (maxsize)) | |
2055 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT | |
2056 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); | |
2057 else | |
2058 maxbitsize = bitsize; | |
2059 } | |
2060 | |
2061 /* Only if we don't artificially break up the representative in | |
2062 the middle of a large bitfield with different possibly | |
2063 overlapping representatives. And all representatives start | |
2064 at byte offset. */ | |
2065 gcc_assert (maxbitsize % BITS_PER_UNIT == 0); | |
2066 | |
2067 /* Find the smallest nice mode to use. */ | |
2068 opt_scalar_int_mode mode_iter; | |
2069 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT) | |
2070 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize) | |
2071 break; | |
2072 | |
2073 scalar_int_mode mode; | |
2074 if (!mode_iter.exists (&mode) | |
2075 || GET_MODE_BITSIZE (mode) > maxbitsize | |
2076 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE) | |
2077 { | |
2078 /* We really want a BLKmode representative only as a last resort, | |
2079 considering the member b in | |
2080 struct { int a : 7; int b : 17; int c; } __attribute__((packed)); | |
2081 Otherwise we simply want to split the representative up | |
2082 allowing for overlaps within the bitfield region as required for | |
2083 struct { int a : 7; int b : 7; | |
2084 int c : 10; int d; } __attribute__((packed)); | |
2085 [0, 15] HImode for a and b, [8, 23] HImode for c. */ | |
2086 DECL_SIZE (repr) = bitsize_int (bitsize); | |
2087 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT); | |
2088 SET_DECL_MODE (repr, BLKmode); | |
2089 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node, | |
2090 bitsize / BITS_PER_UNIT); | |
2091 } | |
2092 else | |
2093 { | |
2094 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode); | |
2095 DECL_SIZE (repr) = bitsize_int (modesize); | |
2096 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT); | |
2097 SET_DECL_MODE (repr, mode); | |
2098 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1); | |
2099 } | |
2100 | |
2101 /* Remember whether the bitfield group is at the end of the | |
2102 structure or not. */ | |
2103 DECL_CHAIN (repr) = nextf; | |
2104 } | |
2105 | |
2106 /* Compute and set FIELD_DECLs for the underlying objects we should | |
2107 use for bitfield access for the structure T. */ | |
2108 | |
2109 void | |
2110 finish_bitfield_layout (tree t) | |
2111 { | |
2112 tree field, prev; | |
2113 tree repr = NULL_TREE; | |
2114 | |
2115 /* Unions would be special, for the ease of type-punning optimizations | |
2116 we could use the underlying type as hint for the representative | |
2117 if the bitfield would fit and the representative would not exceed | |
2118 the union in size. */ | |
2119 if (TREE_CODE (t) != RECORD_TYPE) | |
2120 return; | |
2121 | |
2122 for (prev = NULL_TREE, field = TYPE_FIELDS (t); | |
2123 field; field = DECL_CHAIN (field)) | |
2124 { | |
2125 if (TREE_CODE (field) != FIELD_DECL) | |
2126 continue; | |
2127 | |
2128 /* In the C++ memory model, consecutive bit fields in a structure are | |
2129 considered one memory location and updating a memory location | |
2130 may not store into adjacent memory locations. */ | |
2131 if (!repr | |
2132 && DECL_BIT_FIELD_TYPE (field)) | |
2133 { | |
2134 /* Start new representative. */ | |
2135 repr = start_bitfield_representative (field); | |
2136 } | |
2137 else if (repr | |
2138 && ! DECL_BIT_FIELD_TYPE (field)) | |
2139 { | |
2140 /* Finish off new representative. */ | |
2141 finish_bitfield_representative (repr, prev); | |
2142 repr = NULL_TREE; | |
2143 } | |
2144 else if (DECL_BIT_FIELD_TYPE (field)) | |
2145 { | |
2146 gcc_assert (repr != NULL_TREE); | |
2147 | |
2148 /* Zero-size bitfields finish off a representative and | |
2149 do not have a representative themselves. This is | |
2150 required by the C++ memory model. */ | |
2151 if (integer_zerop (DECL_SIZE (field))) | |
2152 { | |
2153 finish_bitfield_representative (repr, prev); | |
2154 repr = NULL_TREE; | |
2155 } | |
2156 | |
2157 /* We assume that either DECL_FIELD_OFFSET of the representative | |
2158 and each bitfield member is a constant or they are equal. | |
2159 This is because we need to be able to compute the bit-offset | |
2160 of each field relative to the representative in get_bit_range | |
2161 during RTL expansion. | |
2162 If these constraints are not met, simply force a new | |
2163 representative to be generated. That will at most | |
2164 generate worse code but still maintain correctness with | |
2165 respect to the C++ memory model. */ | |
2166 else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)) | |
2167 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) | |
2168 || operand_equal_p (DECL_FIELD_OFFSET (repr), | |
2169 DECL_FIELD_OFFSET (field), 0))) | |
2170 { | |
2171 finish_bitfield_representative (repr, prev); | |
2172 repr = start_bitfield_representative (field); | |
2173 } | |
2174 } | |
2175 else | |
2176 continue; | |
2177 | |
2178 if (repr) | |
2179 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr; | |
2180 | |
2181 prev = field; | |
2182 } | |
2183 | |
2184 if (repr) | |
2185 finish_bitfield_representative (repr, prev); | |
2186 } | |
2187 | |
0 | 2188 /* Do all of the work required to layout the type indicated by RLI, |
2189 once the fields have been laid out. This function will call `free' | |
2190 for RLI, unless FREE_P is false. Passing a value other than false | |
2191 for FREE_P is bad practice; this option only exists to support the | |
2192 G++ 3.2 ABI. */ | |
2193 | |
2194 void | |
2195 finish_record_layout (record_layout_info rli, int free_p) | |
2196 { | |
2197 tree variant; | |
2198 | |
2199 /* Compute the final size. */ | |
2200 finalize_record_size (rli); | |
2201 | |
2202 /* Compute the TYPE_MODE for the record. */ | |
2203 compute_record_mode (rli->t); | |
2204 | |
2205 /* Perform any last tweaks to the TYPE_SIZE, etc. */ | |
2206 finalize_type_size (rli->t); | |
2207 | |
111 | 2208 /* Compute bitfield representatives. */ |
2209 finish_bitfield_layout (rli->t); | |
2210 | |
2211 /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants. | |
2212 With C++ templates, it is too early to do this when the attribute | |
2213 is being parsed. */ | |
0 | 2214 for (variant = TYPE_NEXT_VARIANT (rli->t); variant; |
2215 variant = TYPE_NEXT_VARIANT (variant)) | |
111 | 2216 { |
2217 TYPE_PACKED (variant) = TYPE_PACKED (rli->t); | |
2218 TYPE_REVERSE_STORAGE_ORDER (variant) | |
2219 = TYPE_REVERSE_STORAGE_ORDER (rli->t); | |
2220 } | |
0 | 2221 |
2222 /* Lay out any static members. This is done now because their type | |
2223 may use the record's type. */ | |
111 | 2224 while (!vec_safe_is_empty (rli->pending_statics)) |
2225 layout_decl (rli->pending_statics->pop (), 0); | |
0 | 2226 |
2227 /* Clean up. */ | |
2228 if (free_p) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2229 { |
111 | 2230 vec_free (rli->pending_statics); |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2231 free (rli); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2232 } |
0 | 2233 } |
2234 | |
2235 | |
2236 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is | |
2237 NAME, its fields are chained in reverse on FIELDS. | |
2238 | |
2239 If ALIGN_TYPE is non-null, it is given the same alignment as | |
2240 ALIGN_TYPE. */ | |
2241 | |
2242 void | |
2243 finish_builtin_struct (tree type, const char *name, tree fields, | |
2244 tree align_type) | |
2245 { | |
2246 tree tail, next; | |
2247 | |
2248 for (tail = NULL_TREE; fields; tail = fields, fields = next) | |
2249 { | |
2250 DECL_FIELD_CONTEXT (fields) = type; | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2251 next = DECL_CHAIN (fields); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2252 DECL_CHAIN (fields) = tail; |
0 | 2253 } |
2254 TYPE_FIELDS (type) = tail; | |
2255 | |
2256 if (align_type) | |
2257 { | |
111 | 2258 SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type)); |
0 | 2259 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type); |
111 | 2260 SET_TYPE_WARN_IF_NOT_ALIGN (type, |
2261 TYPE_WARN_IF_NOT_ALIGN (align_type)); | |
0 | 2262 } |
2263 | |
2264 layout_type (type); | |
2265 #if 0 /* not yet, should get fixed properly later */ | |
2266 TYPE_NAME (type) = make_type_decl (get_identifier (name), type); | |
2267 #else | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2268 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION, |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2269 TYPE_DECL, get_identifier (name), type); |
0 | 2270 #endif |
2271 TYPE_STUB_DECL (type) = TYPE_NAME (type); | |
2272 layout_decl (TYPE_NAME (type), 0); | |
2273 } | |
2274 | |
2275 /* Calculate the mode, size, and alignment for TYPE. | |
2276 For an array type, calculate the element separation as well. | |
2277 Record TYPE on the chain of permanent or temporary types | |
2278 so that dbxout will find out about it. | |
2279 | |
2280 TYPE_SIZE of a type is nonzero if the type has been laid out already. | |
2281 layout_type does nothing on such a type. | |
2282 | |
2283 If the type is incomplete, its TYPE_SIZE remains zero. */ | |
2284 | |
2285 void | |
2286 layout_type (tree type) | |
2287 { | |
2288 gcc_assert (type); | |
2289 | |
2290 if (type == error_mark_node) | |
2291 return; | |
2292 | |
111 | 2293 /* We don't want finalize_type_size to copy an alignment attribute to |
2294 variants that don't have it. */ | |
2295 type = TYPE_MAIN_VARIANT (type); | |
2296 | |
0 | 2297 /* Do nothing if type has been laid out before. */ |
2298 if (TYPE_SIZE (type)) | |
2299 return; | |
2300 | |
2301 switch (TREE_CODE (type)) | |
2302 { | |
2303 case LANG_TYPE: | |
2304 /* This kind of type is the responsibility | |
2305 of the language-specific code. */ | |
2306 gcc_unreachable (); | |
2307 | |
111 | 2308 case BOOLEAN_TYPE: |
0 | 2309 case INTEGER_TYPE: |
2310 case ENUMERAL_TYPE: | |
111 | 2311 { |
2312 scalar_int_mode mode | |
2313 = smallest_int_mode_for_size (TYPE_PRECISION (type)); | |
2314 SET_TYPE_MODE (type, mode); | |
2315 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); | |
2316 /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */ | |
2317 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); | |
2318 break; | |
2319 } | |
0 | 2320 |
2321 case REAL_TYPE: | |
111 | 2322 { |
2323 /* Allow the caller to choose the type mode, which is how decimal | |
2324 floats are distinguished from binary ones. */ | |
2325 if (TYPE_MODE (type) == VOIDmode) | |
2326 SET_TYPE_MODE | |
2327 (type, float_mode_for_size (TYPE_PRECISION (type)).require ()); | |
2328 scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type)); | |
2329 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); | |
2330 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); | |
2331 break; | |
2332 } | |
0 | 2333 |
2334 case FIXED_POINT_TYPE: | |
111 | 2335 { |
2336 /* TYPE_MODE (type) has been set already. */ | |
2337 scalar_mode mode = SCALAR_TYPE_MODE (type); | |
2338 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); | |
2339 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); | |
2340 break; | |
2341 } | |
0 | 2342 |
2343 case COMPLEX_TYPE: | |
2344 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); | |
2345 SET_TYPE_MODE (type, | |
111 | 2346 GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type)))); |
2347 | |
0 | 2348 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); |
2349 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); | |
2350 break; | |
2351 | |
2352 case VECTOR_TYPE: | |
2353 { | |
131 | 2354 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type); |
0 | 2355 tree innertype = TREE_TYPE (type); |
2356 | |
2357 /* Find an appropriate mode for the vector type. */ | |
2358 if (TYPE_MODE (type) == VOIDmode) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2359 SET_TYPE_MODE (type, |
111 | 2360 mode_for_vector (SCALAR_TYPE_MODE (innertype), |
2361 nunits).else_blk ()); | |
0 | 2362 |
2363 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type)); | |
2364 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); | |
111 | 2365 /* Several boolean vector elements may fit in a single unit. */ |
2366 if (VECTOR_BOOLEAN_TYPE_P (type) | |
2367 && type->type_common.mode != BLKmode) | |
2368 TYPE_SIZE_UNIT (type) | |
2369 = size_int (GET_MODE_SIZE (type->type_common.mode)); | |
2370 else | |
2371 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR, | |
2372 TYPE_SIZE_UNIT (innertype), | |
2373 size_int (nunits)); | |
131 | 2374 TYPE_SIZE (type) = int_const_binop |
2375 (MULT_EXPR, | |
2376 bits_from_bytes (TYPE_SIZE_UNIT (type)), | |
2377 bitsize_int (BITS_PER_UNIT)); | |
111 | 2378 |
2379 /* For vector types, we do not default to the mode's alignment. | |
2380 Instead, query a target hook, defaulting to natural alignment. | |
2381 This prevents ABI changes depending on whether or not native | |
2382 vector modes are supported. */ | |
2383 SET_TYPE_ALIGN (type, targetm.vector_alignment (type)); | |
2384 | |
2385 /* However, if the underlying mode requires a bigger alignment than | |
2386 what the target hook provides, we cannot use the mode. For now, | |
2387 simply reject that case. */ | |
2388 gcc_assert (TYPE_ALIGN (type) | |
2389 >= GET_MODE_ALIGNMENT (TYPE_MODE (type))); | |
0 | 2390 break; |
2391 } | |
2392 | |
2393 case VOID_TYPE: | |
2394 /* This is an incomplete type and so doesn't have a size. */ | |
111 | 2395 SET_TYPE_ALIGN (type, 1); |
0 | 2396 TYPE_USER_ALIGN (type) = 0; |
2397 SET_TYPE_MODE (type, VOIDmode); | |
2398 break; | |
2399 | |
2400 case OFFSET_TYPE: | |
2401 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE); | |
111 | 2402 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS); |
2403 /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be | |
2404 integral, which may be an __intN. */ | |
2405 SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ()); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2406 TYPE_PRECISION (type) = POINTER_SIZE; |
0 | 2407 break; |
2408 | |
2409 case FUNCTION_TYPE: | |
2410 case METHOD_TYPE: | |
2411 /* It's hard to see what the mode and size of a function ought to | |
2412 be, but we do know the alignment is FUNCTION_BOUNDARY, so | |
2413 make it consistent with that. */ | |
111 | 2414 SET_TYPE_MODE (type, |
2415 int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ()); | |
0 | 2416 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY); |
2417 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); | |
2418 break; | |
2419 | |
2420 case POINTER_TYPE: | |
2421 case REFERENCE_TYPE: | |
2422 { | |
111 | 2423 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2424 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); |
0 | 2425 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); |
2426 TYPE_UNSIGNED (type) = 1; | |
111 | 2427 TYPE_PRECISION (type) = GET_MODE_PRECISION (mode); |
0 | 2428 } |
2429 break; | |
2430 | |
2431 case ARRAY_TYPE: | |
2432 { | |
2433 tree index = TYPE_DOMAIN (type); | |
2434 tree element = TREE_TYPE (type); | |
2435 | |
2436 /* We need to know both bounds in order to compute the size. */ | |
2437 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index) | |
2438 && TYPE_SIZE (element)) | |
2439 { | |
2440 tree ub = TYPE_MAX_VALUE (index); | |
2441 tree lb = TYPE_MIN_VALUE (index); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2442 tree element_size = TYPE_SIZE (element); |
0 | 2443 tree length; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2444 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2445 /* Make sure that an array of zero-sized element is zero-sized |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2446 regardless of its extent. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2447 if (integer_zerop (element_size)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2448 length = size_zero_node; |
0 | 2449 |
111 | 2450 /* The computation should happen in the original signedness so |
2451 that (possible) negative values are handled appropriately | |
2452 when determining overflow. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2453 else |
111 | 2454 { |
2455 /* ??? When it is obvious that the range is signed | |
2456 represent it using ssizetype. */ | |
2457 if (TREE_CODE (lb) == INTEGER_CST | |
2458 && TREE_CODE (ub) == INTEGER_CST | |
2459 && TYPE_UNSIGNED (TREE_TYPE (lb)) | |
2460 && tree_int_cst_lt (ub, lb)) | |
2461 { | |
2462 lb = wide_int_to_tree (ssizetype, | |
2463 offset_int::from (wi::to_wide (lb), | |
2464 SIGNED)); | |
2465 ub = wide_int_to_tree (ssizetype, | |
2466 offset_int::from (wi::to_wide (ub), | |
2467 SIGNED)); | |
2468 } | |
2469 length | |
2470 = fold_convert (sizetype, | |
2471 size_binop (PLUS_EXPR, | |
2472 build_int_cst (TREE_TYPE (lb), 1), | |
2473 size_binop (MINUS_EXPR, ub, lb))); | |
2474 } | |
2475 | |
2476 /* ??? We have no way to distinguish a null-sized array from an | |
2477 array spanning the whole sizetype range, so we arbitrarily | |
2478 decide that [0, -1] is the only valid representation. */ | |
2479 if (integer_zerop (length) | |
2480 && TREE_OVERFLOW (length) | |
2481 && integer_zerop (lb)) | |
2482 length = size_zero_node; | |
0 | 2483 |
2484 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size, | |
131 | 2485 bits_from_bytes (length)); |
0 | 2486 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2487 /* If we know the size of the element, calculate the total size |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2488 directly, rather than do some division thing below. This |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2489 optimization helps Fortran assumed-size arrays (where the |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2490 size of the array is determined at runtime) substantially. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2491 if (TYPE_SIZE_UNIT (element)) |
0 | 2492 TYPE_SIZE_UNIT (type) |
2493 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length); | |
2494 } | |
2495 | |
2496 /* Now round the alignment and size, | |
2497 using machine-dependent criteria if any. */ | |
2498 | |
111 | 2499 unsigned align = TYPE_ALIGN (element); |
2500 if (TYPE_USER_ALIGN (type)) | |
2501 align = MAX (align, TYPE_ALIGN (type)); | |
2502 else | |
2503 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element); | |
2504 if (!TYPE_WARN_IF_NOT_ALIGN (type)) | |
2505 SET_TYPE_WARN_IF_NOT_ALIGN (type, | |
2506 TYPE_WARN_IF_NOT_ALIGN (element)); | |
0 | 2507 #ifdef ROUND_TYPE_ALIGN |
111 | 2508 align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT); |
0 | 2509 #else |
111 | 2510 align = MAX (align, BITS_PER_UNIT); |
0 | 2511 #endif |
111 | 2512 SET_TYPE_ALIGN (type, align); |
0 | 2513 SET_TYPE_MODE (type, BLKmode); |
2514 if (TYPE_SIZE (type) != 0 | |
111 | 2515 && ! targetm.member_type_forces_blk (type, VOIDmode) |
0 | 2516 /* BLKmode elements force BLKmode aggregate; |
2517 else extract/store fields may lose. */ | |
2518 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode | |
2519 || TYPE_NO_FORCE_BLK (TREE_TYPE (type)))) | |
2520 { | |
111 | 2521 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type), |
2522 TYPE_SIZE (type))); | |
0 | 2523 if (TYPE_MODE (type) != BLKmode |
2524 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT | |
2525 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))) | |
2526 { | |
2527 TYPE_NO_FORCE_BLK (type) = 1; | |
2528 SET_TYPE_MODE (type, BLKmode); | |
2529 } | |
2530 } | |
111 | 2531 if (AGGREGATE_TYPE_P (element)) |
2532 TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element); | |
0 | 2533 /* When the element size is constant, check that it is at least as |
2534 large as the element alignment. */ | |
2535 if (TYPE_SIZE_UNIT (element) | |
2536 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST | |
2537 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than | |
2538 TYPE_ALIGN_UNIT. */ | |
2539 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element)) | |
2540 && !integer_zerop (TYPE_SIZE_UNIT (element)) | |
2541 && compare_tree_int (TYPE_SIZE_UNIT (element), | |
2542 TYPE_ALIGN_UNIT (element)) < 0) | |
2543 error ("alignment of array elements is greater than element size"); | |
2544 break; | |
2545 } | |
2546 | |
2547 case RECORD_TYPE: | |
2548 case UNION_TYPE: | |
2549 case QUAL_UNION_TYPE: | |
2550 { | |
2551 tree field; | |
2552 record_layout_info rli; | |
2553 | |
2554 /* Initialize the layout information. */ | |
2555 rli = start_record_layout (type); | |
2556 | |
2557 /* If this is a QUAL_UNION_TYPE, we want to process the fields | |
2558 in the reverse order in building the COND_EXPR that denotes | |
2559 its size. We reverse them again later. */ | |
2560 if (TREE_CODE (type) == QUAL_UNION_TYPE) | |
2561 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); | |
2562 | |
2563 /* Place all the fields. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2564 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
0 | 2565 place_field (rli, field); |
2566 | |
2567 if (TREE_CODE (type) == QUAL_UNION_TYPE) | |
2568 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); | |
2569 | |
2570 /* Finish laying out the record. */ | |
2571 finish_record_layout (rli, /*free_p=*/true); | |
2572 } | |
2573 break; | |
2574 | |
2575 default: | |
2576 gcc_unreachable (); | |
2577 } | |
2578 | |
2579 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For | |
2580 records and unions, finish_record_layout already called this | |
2581 function. */ | |
111 | 2582 if (!RECORD_OR_UNION_TYPE_P (type)) |
0 | 2583 finalize_type_size (type); |
2584 | |
2585 /* We should never see alias sets on incomplete aggregates. And we | |
2586 should not call layout_type on not incomplete aggregates. */ | |
2587 if (AGGREGATE_TYPE_P (type)) | |
2588 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type)); | |
2589 } | |
2590 | |
111 | 2591 /* Return the least alignment required for type TYPE. */ |
2592 | |
2593 unsigned int | |
2594 min_align_of_type (tree type) | |
0 | 2595 { |
111 | 2596 unsigned int align = TYPE_ALIGN (type); |
2597 if (!TYPE_USER_ALIGN (type)) | |
0 | 2598 { |
111 | 2599 align = MIN (align, BIGGEST_ALIGNMENT); |
2600 #ifdef BIGGEST_FIELD_ALIGNMENT | |
2601 align = MIN (align, BIGGEST_FIELD_ALIGNMENT); | |
2602 #endif | |
2603 unsigned int field_align = align; | |
2604 #ifdef ADJUST_FIELD_ALIGN | |
2605 field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align); | |
2606 #endif | |
2607 align = MIN (align, field_align); | |
0 | 2608 } |
111 | 2609 return align / BITS_PER_UNIT; |
0 | 2610 } |
2611 | |
2612 /* Create and return a type for signed integers of PRECISION bits. */ | |
2613 | |
2614 tree | |
2615 make_signed_type (int precision) | |
2616 { | |
2617 tree type = make_node (INTEGER_TYPE); | |
2618 | |
2619 TYPE_PRECISION (type) = precision; | |
2620 | |
2621 fixup_signed_type (type); | |
2622 return type; | |
2623 } | |
2624 | |
2625 /* Create and return a type for unsigned integers of PRECISION bits. */ | |
2626 | |
2627 tree | |
2628 make_unsigned_type (int precision) | |
2629 { | |
2630 tree type = make_node (INTEGER_TYPE); | |
2631 | |
2632 TYPE_PRECISION (type) = precision; | |
2633 | |
2634 fixup_unsigned_type (type); | |
2635 return type; | |
2636 } | |
2637 | |
2638 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP, | |
2639 and SATP. */ | |
2640 | |
2641 tree | |
2642 make_fract_type (int precision, int unsignedp, int satp) | |
2643 { | |
2644 tree type = make_node (FIXED_POINT_TYPE); | |
2645 | |
2646 TYPE_PRECISION (type) = precision; | |
2647 | |
2648 if (satp) | |
2649 TYPE_SATURATING (type) = 1; | |
2650 | |
2651 /* Lay out the type: set its alignment, size, etc. */ | |
111 | 2652 TYPE_UNSIGNED (type) = unsignedp; |
2653 enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT; | |
2654 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ()); | |
0 | 2655 layout_type (type); |
2656 | |
2657 return type; | |
2658 } | |
2659 | |
2660 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP, | |
2661 and SATP. */ | |
2662 | |
2663 tree | |
2664 make_accum_type (int precision, int unsignedp, int satp) | |
2665 { | |
2666 tree type = make_node (FIXED_POINT_TYPE); | |
2667 | |
2668 TYPE_PRECISION (type) = precision; | |
2669 | |
2670 if (satp) | |
2671 TYPE_SATURATING (type) = 1; | |
2672 | |
2673 /* Lay out the type: set its alignment, size, etc. */ | |
111 | 2674 TYPE_UNSIGNED (type) = unsignedp; |
2675 enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM; | |
2676 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ()); | |
0 | 2677 layout_type (type); |
2678 | |
2679 return type; | |
2680 } | |
2681 | |
111 | 2682 /* Initialize sizetypes so layout_type can use them. */ |
0 | 2683 |
2684 void | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2685 initialize_sizetypes (void) |
0 | 2686 { |
111 | 2687 int precision, bprecision; |
2688 | |
2689 /* Get sizetypes precision from the SIZE_TYPE target macro. */ | |
2690 if (strcmp (SIZETYPE, "unsigned int") == 0) | |
2691 precision = INT_TYPE_SIZE; | |
2692 else if (strcmp (SIZETYPE, "long unsigned int") == 0) | |
2693 precision = LONG_TYPE_SIZE; | |
2694 else if (strcmp (SIZETYPE, "long long unsigned int") == 0) | |
2695 precision = LONG_LONG_TYPE_SIZE; | |
2696 else if (strcmp (SIZETYPE, "short unsigned int") == 0) | |
2697 precision = SHORT_TYPE_SIZE; | |
2698 else | |
2699 { | |
2700 int i; | |
2701 | |
2702 precision = -1; | |
2703 for (i = 0; i < NUM_INT_N_ENTS; i++) | |
2704 if (int_n_enabled_p[i]) | |
2705 { | |
2706 char name[50]; | |
2707 sprintf (name, "__int%d unsigned", int_n_data[i].bitsize); | |
2708 | |
2709 if (strcmp (name, SIZETYPE) == 0) | |
2710 { | |
2711 precision = int_n_data[i].bitsize; | |
2712 } | |
2713 } | |
2714 if (precision == -1) | |
2715 gcc_unreachable (); | |
2716 } | |
2717 | |
2718 bprecision | |
2719 = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE); | |
2720 bprecision = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision)); | |
2721 if (bprecision > HOST_BITS_PER_DOUBLE_INT) | |
2722 bprecision = HOST_BITS_PER_DOUBLE_INT; | |
2723 | |
2724 /* Create stubs for sizetype and bitsizetype so we can create constants. */ | |
2725 sizetype = make_node (INTEGER_TYPE); | |
2726 TYPE_NAME (sizetype) = get_identifier ("sizetype"); | |
2727 TYPE_PRECISION (sizetype) = precision; | |
2728 TYPE_UNSIGNED (sizetype) = 1; | |
2729 bitsizetype = make_node (INTEGER_TYPE); | |
2730 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype"); | |
2731 TYPE_PRECISION (bitsizetype) = bprecision; | |
2732 TYPE_UNSIGNED (bitsizetype) = 1; | |
2733 | |
2734 /* Now layout both types manually. */ | |
2735 scalar_int_mode mode = smallest_int_mode_for_size (precision); | |
2736 SET_TYPE_MODE (sizetype, mode); | |
2737 SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype))); | |
2738 TYPE_SIZE (sizetype) = bitsize_int (precision); | |
2739 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode)); | |
2740 set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED); | |
2741 | |
2742 mode = smallest_int_mode_for_size (bprecision); | |
2743 SET_TYPE_MODE (bitsizetype, mode); | |
2744 SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype))); | |
2745 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision); | |
2746 TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode)); | |
2747 set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED); | |
0 | 2748 |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2749 /* Create the signed variants of *sizetype. */ |
111 | 2750 ssizetype = make_signed_type (TYPE_PRECISION (sizetype)); |
2751 TYPE_NAME (ssizetype) = get_identifier ("ssizetype"); | |
2752 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype)); | |
2753 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype"); | |
0 | 2754 } |
2755 | |
2756 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE | |
2757 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE | |
2758 for TYPE, based on the PRECISION and whether or not the TYPE | |
2759 IS_UNSIGNED. PRECISION need not correspond to a width supported | |
2760 natively by the hardware; for example, on a machine with 8-bit, | |
2761 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or | |
2762 61. */ | |
2763 | |
2764 void | |
2765 set_min_and_max_values_for_integral_type (tree type, | |
2766 int precision, | |
111 | 2767 signop sgn) |
0 | 2768 { |
111 | 2769 /* For bitfields with zero width we end up creating integer types |
2770 with zero precision. Don't assign any minimum/maximum values | |
2771 to those types, they don't have any valid value. */ | |
2772 if (precision < 1) | |
2773 return; | |
2774 | |
2775 TYPE_MIN_VALUE (type) | |
2776 = wide_int_to_tree (type, wi::min_value (precision, sgn)); | |
2777 TYPE_MAX_VALUE (type) | |
2778 = wide_int_to_tree (type, wi::max_value (precision, sgn)); | |
0 | 2779 } |
2780 | |
2781 /* Set the extreme values of TYPE based on its precision in bits, | |
2782 then lay it out. Used when make_signed_type won't do | |
111 | 2783 because the tree code is not INTEGER_TYPE. */ |
0 | 2784 |
2785 void | |
2786 fixup_signed_type (tree type) | |
2787 { | |
2788 int precision = TYPE_PRECISION (type); | |
2789 | |
111 | 2790 set_min_and_max_values_for_integral_type (type, precision, SIGNED); |
0 | 2791 |
2792 /* Lay out the type: set its alignment, size, etc. */ | |
2793 layout_type (type); | |
2794 } | |
2795 | |
2796 /* Set the extreme values of TYPE based on its precision in bits, | |
2797 then lay it out. This is used both in `make_unsigned_type' | |
2798 and for enumeral types. */ | |
2799 | |
2800 void | |
2801 fixup_unsigned_type (tree type) | |
2802 { | |
2803 int precision = TYPE_PRECISION (type); | |
2804 | |
2805 TYPE_UNSIGNED (type) = 1; | |
2806 | |
111 | 2807 set_min_and_max_values_for_integral_type (type, precision, UNSIGNED); |
0 | 2808 |
2809 /* Lay out the type: set its alignment, size, etc. */ | |
2810 layout_type (type); | |
2811 } | |
2812 | |
111 | 2813 /* Construct an iterator for a bitfield that spans BITSIZE bits, |
2814 starting at BITPOS. | |
2815 | |
2816 BITREGION_START is the bit position of the first bit in this | |
2817 sequence of bit fields. BITREGION_END is the last bit in this | |
2818 sequence. If these two fields are non-zero, we should restrict the | |
2819 memory access to that range. Otherwise, we are allowed to touch | |
2820 any adjacent non bit-fields. | |
2821 | |
2822 ALIGN is the alignment of the underlying object in bits. | |
2823 VOLATILEP says whether the bitfield is volatile. */ | |
2824 | |
2825 bit_field_mode_iterator | |
2826 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, | |
131 | 2827 poly_int64 bitregion_start, |
2828 poly_int64 bitregion_end, | |
111 | 2829 unsigned int align, bool volatilep) |
2830 : m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize), | |
2831 m_bitpos (bitpos), m_bitregion_start (bitregion_start), | |
2832 m_bitregion_end (bitregion_end), m_align (align), | |
2833 m_volatilep (volatilep), m_count (0) | |
2834 { | |
131 | 2835 if (known_eq (m_bitregion_end, 0)) |
111 | 2836 { |
2837 /* We can assume that any aligned chunk of ALIGN bits that overlaps | |
2838 the bitfield is mapped and won't trap, provided that ALIGN isn't | |
2839 too large. The cap is the biggest required alignment for data, | |
2840 or at least the word size. And force one such chunk at least. */ | |
2841 unsigned HOST_WIDE_INT units | |
2842 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD)); | |
2843 if (bitsize <= 0) | |
2844 bitsize = 1; | |
131 | 2845 HOST_WIDE_INT end = bitpos + bitsize + units - 1; |
2846 m_bitregion_end = end - end % units - 1; | |
111 | 2847 } |
2848 } | |
2849 | |
2850 /* Calls to this function return successively larger modes that can be used | |
2851 to represent the bitfield. Return true if another bitfield mode is | |
2852 available, storing it in *OUT_MODE if so. */ | |
2853 | |
2854 bool | |
2855 bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode) | |
2856 { | |
2857 scalar_int_mode mode; | |
2858 for (; m_mode.exists (&mode); m_mode = GET_MODE_WIDER_MODE (mode)) | |
2859 { | |
2860 unsigned int unit = GET_MODE_BITSIZE (mode); | |
2861 | |
2862 /* Skip modes that don't have full precision. */ | |
2863 if (unit != GET_MODE_PRECISION (mode)) | |
2864 continue; | |
2865 | |
2866 /* Stop if the mode is too wide to handle efficiently. */ | |
2867 if (unit > MAX_FIXED_MODE_SIZE) | |
2868 break; | |
2869 | |
2870 /* Don't deliver more than one multiword mode; the smallest one | |
2871 should be used. */ | |
2872 if (m_count > 0 && unit > BITS_PER_WORD) | |
2873 break; | |
2874 | |
2875 /* Skip modes that are too small. */ | |
2876 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit; | |
2877 unsigned HOST_WIDE_INT subend = substart + m_bitsize; | |
2878 if (subend > unit) | |
2879 continue; | |
2880 | |
2881 /* Stop if the mode goes outside the bitregion. */ | |
2882 HOST_WIDE_INT start = m_bitpos - substart; | |
131 | 2883 if (maybe_ne (m_bitregion_start, 0) |
2884 && maybe_lt (start, m_bitregion_start)) | |
111 | 2885 break; |
2886 HOST_WIDE_INT end = start + unit; | |
131 | 2887 if (maybe_gt (end, m_bitregion_end + 1)) |
111 | 2888 break; |
2889 | |
2890 /* Stop if the mode requires too much alignment. */ | |
2891 if (GET_MODE_ALIGNMENT (mode) > m_align | |
2892 && targetm.slow_unaligned_access (mode, m_align)) | |
2893 break; | |
2894 | |
2895 *out_mode = mode; | |
2896 m_mode = GET_MODE_WIDER_MODE (mode); | |
2897 m_count++; | |
2898 return true; | |
2899 } | |
2900 return false; | |
2901 } | |
2902 | |
2903 /* Return true if smaller modes are generally preferred for this kind | |
2904 of bitfield. */ | |
2905 | |
2906 bool | |
2907 bit_field_mode_iterator::prefer_smaller_modes () | |
2908 { | |
2909 return (m_volatilep | |
2910 ? targetm.narrow_volatile_bitfield () | |
2911 : !SLOW_BYTE_ACCESS); | |
2912 } | |
2913 | |
0 | 2914 /* Find the best machine mode to use when referencing a bit field of length |
2915 BITSIZE bits starting at BITPOS. | |
2916 | |
111 | 2917 BITREGION_START is the bit position of the first bit in this |
2918 sequence of bit fields. BITREGION_END is the last bit in this | |
2919 sequence. If these two fields are non-zero, we should restrict the | |
2920 memory access to that range. Otherwise, we are allowed to touch | |
2921 any adjacent non bit-fields. | |
2922 | |
2923 The chosen mode must have no more than LARGEST_MODE_BITSIZE bits. | |
2924 INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller | |
2925 doesn't want to apply a specific limit. | |
2926 | |
2927 If no mode meets all these conditions, we return VOIDmode. | |
2928 | |
0 | 2929 The underlying object is known to be aligned to a boundary of ALIGN bits. |
2930 | |
2931 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the | |
2932 smallest mode meeting these conditions. | |
2933 | |
2934 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the | |
2935 largest mode (but a mode no wider than UNITS_PER_WORD) that meets | |
2936 all the conditions. | |
2937 | |
2938 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to | |
2939 decide which of the above modes should be used. */ | |
2940 | |
111 | 2941 bool |
2942 get_best_mode (int bitsize, int bitpos, | |
131 | 2943 poly_uint64 bitregion_start, poly_uint64 bitregion_end, |
111 | 2944 unsigned int align, |
2945 unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep, | |
2946 scalar_int_mode *best_mode) | |
0 | 2947 { |
111 | 2948 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start, |
2949 bitregion_end, align, volatilep); | |
2950 scalar_int_mode mode; | |
2951 bool found = false; | |
2952 while (iter.next_mode (&mode) | |
2953 /* ??? For historical reasons, reject modes that would normally | |
2954 receive greater alignment, even if unaligned accesses are | |
2955 acceptable. This has both advantages and disadvantages. | |
2956 Removing this check means that something like: | |
2957 | |
2958 struct s { unsigned int x; unsigned int y; }; | |
2959 int f (struct s *s) { return s->x == 0 && s->y == 0; } | |
2960 | |
2961 can be implemented using a single load and compare on | |
2962 64-bit machines that have no alignment restrictions. | |
2963 For example, on powerpc64-linux-gnu, we would generate: | |
2964 | |
2965 ld 3,0(3) | |
2966 cntlzd 3,3 | |
2967 srdi 3,3,6 | |
2968 blr | |
2969 | |
2970 rather than: | |
2971 | |
2972 lwz 9,0(3) | |
2973 cmpwi 7,9,0 | |
2974 bne 7,.L3 | |
2975 lwz 3,4(3) | |
2976 cntlzw 3,3 | |
2977 srwi 3,3,5 | |
2978 extsw 3,3 | |
2979 blr | |
2980 .p2align 4,,15 | |
2981 .L3: | |
2982 li 3,0 | |
2983 blr | |
2984 | |
2985 However, accessing more than one field can make life harder | |
2986 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c | |
2987 has a series of unsigned short copies followed by a series of | |
2988 unsigned short comparisons. With this check, both the copies | |
2989 and comparisons remain 16-bit accesses and FRE is able | |
2990 to eliminate the latter. Without the check, the comparisons | |
2991 can be done using 2 64-bit operations, which FRE isn't able | |
2992 to handle in the same way. | |
2993 | |
2994 Either way, it would probably be worth disabling this check | |
2995 during expand. One particular example where removing the | |
2996 check would help is the get_best_mode call in store_bit_field. | |
2997 If we are given a memory bitregion of 128 bits that is aligned | |
2998 to a 64-bit boundary, and the bitfield we want to modify is | |
2999 in the second half of the bitregion, this check causes | |
3000 store_bitfield to turn the memory into a 64-bit reference | |
3001 to the _first_ half of the region. We later use | |
3002 adjust_bitfield_address to get a reference to the correct half, | |
3003 but doing so looks to adjust_bitfield_address as though we are | |
3004 moving past the end of the original object, so it drops the | |
3005 associated MEM_EXPR and MEM_OFFSET. Removing the check | |
3006 causes store_bit_field to keep a 128-bit memory reference, | |
3007 so that the final bitfield reference still has a MEM_EXPR | |
3008 and MEM_OFFSET. */ | |
3009 && GET_MODE_ALIGNMENT (mode) <= align | |
3010 && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize) | |
0 | 3011 { |
111 | 3012 *best_mode = mode; |
3013 found = true; | |
3014 if (iter.prefer_smaller_modes ()) | |
0 | 3015 break; |
3016 } | |
3017 | |
111 | 3018 return found; |
0 | 3019 } |
3020 | |
3021 /* Gets minimal and maximal values for MODE (signed or unsigned depending on | |
3022 SIGN). The returned constants are made to be usable in TARGET_MODE. */ | |
3023 | |
3024 void | |
111 | 3025 get_mode_bounds (scalar_int_mode mode, int sign, |
3026 scalar_int_mode target_mode, | |
0 | 3027 rtx *mmin, rtx *mmax) |
3028 { | |
111 | 3029 unsigned size = GET_MODE_PRECISION (mode); |
0 | 3030 unsigned HOST_WIDE_INT min_val, max_val; |
3031 | |
3032 gcc_assert (size <= HOST_BITS_PER_WIDE_INT); | |
3033 | |
111 | 3034 /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */ |
3035 if (mode == BImode) | |
0 | 3036 { |
111 | 3037 if (STORE_FLAG_VALUE < 0) |
3038 { | |
3039 min_val = STORE_FLAG_VALUE; | |
3040 max_val = 0; | |
3041 } | |
3042 else | |
3043 { | |
3044 min_val = 0; | |
3045 max_val = STORE_FLAG_VALUE; | |
3046 } | |
3047 } | |
3048 else if (sign) | |
3049 { | |
3050 min_val = -(HOST_WIDE_INT_1U << (size - 1)); | |
3051 max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1; | |
0 | 3052 } |
3053 else | |
3054 { | |
3055 min_val = 0; | |
111 | 3056 max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1; |
0 | 3057 } |
3058 | |
3059 *mmin = gen_int_mode (min_val, target_mode); | |
3060 *mmax = gen_int_mode (max_val, target_mode); | |
3061 } | |
3062 | |
3063 #include "gt-stor-layout.h" |