Mercurial > hg > CbC > CbC_gcc
annotate gcc/stor-layout.c @ 123:ab229f40eab2
fix inline_call
author | mir3636 |
---|---|
date | Fri, 30 Mar 2018 22:58:55 +0900 |
parents | 04ced10e8804 |
children | 84e7813d76e9 |
rev | line source |
---|---|
0 | 1 /* C-compiler utilities for types and variables storage layout |
111 | 2 Copyright (C) 1987-2017 Free Software Foundation, Inc. |
0 | 3 |
4 This file is part of GCC. | |
5 | |
6 GCC is free software; you can redistribute it and/or modify it under | |
7 the terms of the GNU General Public License as published by the Free | |
8 Software Foundation; either version 3, or (at your option) any later | |
9 version. | |
10 | |
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
111 | 24 #include "target.h" |
25 #include "function.h" | |
26 #include "rtl.h" | |
0 | 27 #include "tree.h" |
111 | 28 #include "memmodel.h" |
0 | 29 #include "tm_p.h" |
111 | 30 #include "stringpool.h" |
31 #include "regs.h" | |
32 #include "emit-rtl.h" | |
33 #include "cgraph.h" | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
34 #include "diagnostic-core.h" |
111 | 35 #include "fold-const.h" |
36 #include "stor-layout.h" | |
37 #include "varasm.h" | |
38 #include "print-tree.h" | |
0 | 39 #include "langhooks.h" |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
40 #include "tree-inline.h" |
111 | 41 #include "dumpfile.h" |
42 #include "gimplify.h" | |
43 #include "debug.h" | |
0 | 44 |
45 /* Data type for the expressions representing sizes of data types. | |
46 It is the first integer type laid out. */ | |
111 | 47 tree sizetype_tab[(int) stk_type_kind_last]; |
0 | 48 |
49 /* If nonzero, this is an upper limit on alignment of structure fields. | |
50 The value is measured in bits. */ | |
51 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT; | |
52 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
53 static tree self_referential_size (tree); |
0 | 54 static void finalize_record_size (record_layout_info); |
55 static void finalize_type_size (tree); | |
56 static void place_union_field (record_layout_info, tree); | |
57 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, | |
58 HOST_WIDE_INT, tree); | |
59 extern void debug_rli (record_layout_info); | |
60 | |
61 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR | |
62 to serve as the actual size-expression for a type or decl. */ | |
63 | |
64 tree | |
65 variable_size (tree size) | |
66 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
67 /* Obviously. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
68 if (TREE_CONSTANT (size)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
69 return size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
70 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
71 /* If the size is self-referential, we can't make a SAVE_EXPR (see |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
72 save_expr for the rationale). But we can do something else. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
73 if (CONTAINS_PLACEHOLDER_P (size)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
74 return self_referential_size (size); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
75 |
111 | 76 /* If we are in the global binding level, we can't make a SAVE_EXPR |
77 since it may end up being shared across functions, so it is up | |
78 to the front-end to deal with this case. */ | |
79 if (lang_hooks.decls.global_bindings_p ()) | |
0 | 80 return size; |
81 | |
111 | 82 return save_expr (size); |
0 | 83 } |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
84 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
85 /* An array of functions used for self-referential size computation. */ |
111 | 86 static GTY(()) vec<tree, va_gc> *size_functions; |
87 | |
88 /* Return true if T is a self-referential component reference. */ | |
89 | |
90 static bool | |
91 self_referential_component_ref_p (tree t) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
92 { |
111 | 93 if (TREE_CODE (t) != COMPONENT_REF) |
94 return false; | |
95 | |
96 while (REFERENCE_CLASS_P (t)) | |
97 t = TREE_OPERAND (t, 0); | |
98 | |
99 return (TREE_CODE (t) == PLACEHOLDER_EXPR); | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
100 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
101 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
102 /* Similar to copy_tree_r but do not copy component references involving |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
103 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
104 and substituted in substitute_in_expr. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
105 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
106 static tree |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
107 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
108 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
109 enum tree_code code = TREE_CODE (*tp); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
110 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
111 /* Stop at types, decls, constants like copy_tree_r. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
112 if (TREE_CODE_CLASS (code) == tcc_type |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
113 || TREE_CODE_CLASS (code) == tcc_declaration |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
114 || TREE_CODE_CLASS (code) == tcc_constant) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
115 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
116 *walk_subtrees = 0; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
117 return NULL_TREE; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
118 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
119 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
120 /* This is the pattern built in ada/make_aligning_type. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
121 else if (code == ADDR_EXPR |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
122 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
123 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
124 *walk_subtrees = 0; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
125 return NULL_TREE; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
126 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
127 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
128 /* Default case: the component reference. */ |
111 | 129 else if (self_referential_component_ref_p (*tp)) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
130 { |
111 | 131 *walk_subtrees = 0; |
132 return NULL_TREE; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
133 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
134 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
135 /* We're not supposed to have them in self-referential size trees |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
136 because we wouldn't properly control when they are evaluated. |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
137 However, not creating superfluous SAVE_EXPRs requires accurate |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
138 tracking of readonly-ness all the way down to here, which we |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
139 cannot always guarantee in practice. So punt in this case. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
140 else if (code == SAVE_EXPR) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
141 return error_mark_node; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
142 |
111 | 143 else if (code == STATEMENT_LIST) |
144 gcc_unreachable (); | |
145 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
146 return copy_tree_r (tp, walk_subtrees, data); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
147 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
148 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
149 /* Given a SIZE expression that is self-referential, return an equivalent |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
150 expression to serve as the actual size expression for a type. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
151 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
152 static tree |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
153 self_referential_size (tree size) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
154 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
155 static unsigned HOST_WIDE_INT fnno = 0; |
111 | 156 vec<tree> self_refs = vNULL; |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
157 tree param_type_list = NULL, param_decl_list = NULL; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
158 tree t, ref, return_type, fntype, fnname, fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
159 unsigned int i; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
160 char buf[128]; |
111 | 161 vec<tree, va_gc> *args = NULL; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
162 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
163 /* Do not factor out simple operations. */ |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
164 t = skip_simple_constant_arithmetic (size); |
111 | 165 if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t)) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
166 return size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
167 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
168 /* Collect the list of self-references in the expression. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
169 find_placeholder_in_expr (size, &self_refs); |
111 | 170 gcc_assert (self_refs.length () > 0); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
171 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
172 /* Obtain a private copy of the expression. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
173 t = size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
174 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
175 return size; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
176 size = t; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
177 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
178 /* Build the parameter and argument lists in parallel; also |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
179 substitute the former for the latter in the expression. */ |
111 | 180 vec_alloc (args, self_refs.length ()); |
181 FOR_EACH_VEC_ELT (self_refs, i, ref) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
182 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
183 tree subst, param_name, param_type, param_decl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
184 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
185 if (DECL_P (ref)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
186 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
187 /* We shouldn't have true variables here. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
188 gcc_assert (TREE_READONLY (ref)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
189 subst = ref; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
190 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
191 /* This is the pattern built in ada/make_aligning_type. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
192 else if (TREE_CODE (ref) == ADDR_EXPR) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
193 subst = ref; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
194 /* Default case: the component reference. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
195 else |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
196 subst = TREE_OPERAND (ref, 1); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
197 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
198 sprintf (buf, "p%d", i); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
199 param_name = get_identifier (buf); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
200 param_type = TREE_TYPE (ref); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
201 param_decl |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
202 = build_decl (input_location, PARM_DECL, param_name, param_type); |
111 | 203 DECL_ARG_TYPE (param_decl) = param_type; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
204 DECL_ARTIFICIAL (param_decl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
205 TREE_READONLY (param_decl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
206 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
207 size = substitute_in_expr (size, subst, param_decl); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
208 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
209 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
210 param_decl_list = chainon (param_decl, param_decl_list); |
111 | 211 args->quick_push (ref); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
212 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
213 |
111 | 214 self_refs.release (); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
215 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
216 /* Append 'void' to indicate that the number of parameters is fixed. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
217 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
218 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
219 /* The 3 lists have been created in reverse order. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
220 param_type_list = nreverse (param_type_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
221 param_decl_list = nreverse (param_decl_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
222 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
223 /* Build the function type. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
224 return_type = TREE_TYPE (size); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
225 fntype = build_function_type (return_type, param_type_list); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
226 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
227 /* Build the function declaration. */ |
111 | 228 sprintf (buf, "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
229 fnname = get_file_function_name (buf); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
230 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype); |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
231 for (t = param_decl_list; t; t = DECL_CHAIN (t)) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
232 DECL_CONTEXT (t) = fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
233 DECL_ARGUMENTS (fndecl) = param_decl_list; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
234 DECL_RESULT (fndecl) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
235 = build_decl (input_location, RESULT_DECL, 0, return_type); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
236 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
237 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
238 /* The function has been created by the compiler and we don't |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
239 want to emit debug info for it. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
240 DECL_ARTIFICIAL (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
241 DECL_IGNORED_P (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
242 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
243 /* It is supposed to be "const" and never throw. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
244 TREE_READONLY (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
245 TREE_NOTHROW (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
246 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
247 /* We want it to be inlined when this is deemed profitable, as |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
248 well as discarded if every call has been integrated. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
249 DECL_DECLARED_INLINE_P (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
250 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
251 /* It is made up of a unique return statement. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
252 DECL_INITIAL (fndecl) = make_node (BLOCK); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
253 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
254 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
255 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
256 TREE_STATIC (fndecl) = 1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
257 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
258 /* Put it onto the list of size functions. */ |
111 | 259 vec_safe_push (size_functions, fndecl); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
260 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
261 /* Replace the original expression with a call to the size function. */ |
111 | 262 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
263 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
264 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
265 /* Take, queue and compile all the size functions. It is essential that |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
266 the size functions be gimplified at the very end of the compilation |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
267 in order to guarantee transparent handling of self-referential sizes. |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
268 Otherwise the GENERIC inliner would not be able to inline them back |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
269 at each of their call sites, thus creating artificial non-constant |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
270 size expressions which would trigger nasty problems later on. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
271 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
272 void |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
273 finalize_size_functions (void) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
274 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
275 unsigned int i; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
276 tree fndecl; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
277 |
111 | 278 for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
279 { |
111 | 280 allocate_struct_function (fndecl, false); |
281 set_cfun (NULL); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
282 dump_function (TDI_original, fndecl); |
111 | 283 |
284 /* As these functions are used to describe the layout of variable-length | |
285 structures, debug info generation needs their implementation. */ | |
286 debug_hooks->size_function (fndecl); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
287 gimplify_function_tree (fndecl); |
111 | 288 cgraph_node::finalize_function (fndecl, false); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
289 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
290 |
111 | 291 vec_free (size_functions); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
292 } |
0 | 293 |
111 | 294 /* Return a machine mode of class MCLASS with SIZE bits of precision, |
295 if one exists. The mode may have padding bits as well the SIZE | |
296 value bits. If LIMIT is nonzero, disregard modes wider than | |
297 MAX_FIXED_MODE_SIZE. */ | |
298 | |
299 opt_machine_mode | |
0 | 300 mode_for_size (unsigned int size, enum mode_class mclass, int limit) |
301 { | |
111 | 302 machine_mode mode; |
303 int i; | |
0 | 304 |
305 if (limit && size > MAX_FIXED_MODE_SIZE) | |
111 | 306 return opt_machine_mode (); |
0 | 307 |
308 /* Get the first mode which has this size, in the specified class. */ | |
111 | 309 FOR_EACH_MODE_IN_CLASS (mode, mclass) |
0 | 310 if (GET_MODE_PRECISION (mode) == size) |
311 return mode; | |
312 | |
111 | 313 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT) |
314 for (i = 0; i < NUM_INT_N_ENTS; i ++) | |
315 if (int_n_data[i].bitsize == size | |
316 && int_n_enabled_p[i]) | |
317 return int_n_data[i].m; | |
318 | |
319 return opt_machine_mode (); | |
0 | 320 } |
321 | |
322 /* Similar, except passed a tree node. */ | |
323 | |
111 | 324 opt_machine_mode |
0 | 325 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit) |
326 { | |
327 unsigned HOST_WIDE_INT uhwi; | |
328 unsigned int ui; | |
329 | |
111 | 330 if (!tree_fits_uhwi_p (size)) |
331 return opt_machine_mode (); | |
332 uhwi = tree_to_uhwi (size); | |
0 | 333 ui = uhwi; |
334 if (uhwi != ui) | |
111 | 335 return opt_machine_mode (); |
0 | 336 return mode_for_size (ui, mclass, limit); |
337 } | |
338 | |
111 | 339 /* Return the narrowest mode of class MCLASS that contains at least |
340 SIZE bits. Abort if no such mode exists. */ | |
341 | |
342 machine_mode | |
0 | 343 smallest_mode_for_size (unsigned int size, enum mode_class mclass) |
344 { | |
111 | 345 machine_mode mode = VOIDmode; |
346 int i; | |
0 | 347 |
348 /* Get the first mode which has at least this size, in the | |
349 specified class. */ | |
111 | 350 FOR_EACH_MODE_IN_CLASS (mode, mclass) |
0 | 351 if (GET_MODE_PRECISION (mode) >= size) |
111 | 352 break; |
353 | |
354 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT) | |
355 for (i = 0; i < NUM_INT_N_ENTS; i ++) | |
356 if (int_n_data[i].bitsize >= size | |
357 && int_n_data[i].bitsize < GET_MODE_PRECISION (mode) | |
358 && int_n_enabled_p[i]) | |
359 mode = int_n_data[i].m; | |
360 | |
361 if (mode == VOIDmode) | |
362 gcc_unreachable (); | |
363 | |
364 return mode; | |
0 | 365 } |
366 | |
111 | 367 /* Return an integer mode of exactly the same size as MODE, if one exists. */ |
368 | |
369 opt_scalar_int_mode | |
370 int_mode_for_mode (machine_mode mode) | |
0 | 371 { |
372 switch (GET_MODE_CLASS (mode)) | |
373 { | |
374 case MODE_INT: | |
375 case MODE_PARTIAL_INT: | |
111 | 376 return as_a <scalar_int_mode> (mode); |
0 | 377 |
378 case MODE_COMPLEX_INT: | |
379 case MODE_COMPLEX_FLOAT: | |
380 case MODE_FLOAT: | |
381 case MODE_DECIMAL_FLOAT: | |
382 case MODE_VECTOR_INT: | |
383 case MODE_VECTOR_FLOAT: | |
384 case MODE_FRACT: | |
385 case MODE_ACCUM: | |
386 case MODE_UFRACT: | |
387 case MODE_UACCUM: | |
388 case MODE_VECTOR_FRACT: | |
389 case MODE_VECTOR_ACCUM: | |
390 case MODE_VECTOR_UFRACT: | |
391 case MODE_VECTOR_UACCUM: | |
111 | 392 case MODE_POINTER_BOUNDS: |
393 return int_mode_for_size (GET_MODE_BITSIZE (mode), 0); | |
0 | 394 |
395 case MODE_RANDOM: | |
396 if (mode == BLKmode) | |
111 | 397 return opt_scalar_int_mode (); |
398 | |
399 /* fall through */ | |
0 | 400 |
401 case MODE_CC: | |
402 default: | |
403 gcc_unreachable (); | |
404 } | |
405 } | |
406 | |
111 | 407 /* Find a mode that can be used for efficient bitwise operations on MODE, |
408 if one exists. */ | |
409 | |
410 opt_machine_mode | |
411 bitwise_mode_for_mode (machine_mode mode) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
412 { |
111 | 413 /* Quick exit if we already have a suitable mode. */ |
414 unsigned int bitsize = GET_MODE_BITSIZE (mode); | |
415 scalar_int_mode int_mode; | |
416 if (is_a <scalar_int_mode> (mode, &int_mode) | |
417 && GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE) | |
418 return int_mode; | |
419 | |
420 /* Reuse the sanity checks from int_mode_for_mode. */ | |
421 gcc_checking_assert ((int_mode_for_mode (mode), true)); | |
422 | |
423 /* Try to replace complex modes with complex modes. In general we | |
424 expect both components to be processed independently, so we only | |
425 care whether there is a register for the inner mode. */ | |
426 if (COMPLEX_MODE_P (mode)) | |
427 { | |
428 machine_mode trial = mode; | |
429 if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT | |
430 || mode_for_size (bitsize, MODE_COMPLEX_INT, false).exists (&trial)) | |
431 && have_regs_of_mode[GET_MODE_INNER (trial)]) | |
432 return trial; | |
433 } | |
434 | |
435 /* Try to replace vector modes with vector modes. Also try using vector | |
436 modes if an integer mode would be too big. */ | |
437 if (VECTOR_MODE_P (mode) || bitsize > MAX_FIXED_MODE_SIZE) | |
438 { | |
439 machine_mode trial = mode; | |
440 if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT | |
441 || mode_for_size (bitsize, MODE_VECTOR_INT, 0).exists (&trial)) | |
442 && have_regs_of_mode[trial] | |
443 && targetm.vector_mode_supported_p (trial)) | |
444 return trial; | |
445 } | |
446 | |
447 /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */ | |
448 return mode_for_size (bitsize, MODE_INT, true); | |
449 } | |
450 | |
451 /* Find a type that can be used for efficient bitwise operations on MODE. | |
452 Return null if no such mode exists. */ | |
453 | |
454 tree | |
455 bitwise_type_for_mode (machine_mode mode) | |
456 { | |
457 if (!bitwise_mode_for_mode (mode).exists (&mode)) | |
458 return NULL_TREE; | |
459 | |
460 unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode); | |
461 tree inner_type = build_nonstandard_integer_type (inner_size, true); | |
462 | |
463 if (VECTOR_MODE_P (mode)) | |
464 return build_vector_type_for_mode (inner_type, mode); | |
465 | |
466 if (COMPLEX_MODE_P (mode)) | |
467 return build_complex_type (inner_type); | |
468 | |
469 gcc_checking_assert (GET_MODE_INNER (mode) == mode); | |
470 return inner_type; | |
471 } | |
472 | |
473 /* Find a mode that is suitable for representing a vector with NUNITS | |
474 elements of mode INNERMODE, if one exists. The returned mode can be | |
475 either an integer mode or a vector mode. */ | |
476 | |
477 opt_machine_mode | |
478 mode_for_vector (scalar_mode innermode, unsigned nunits) | |
479 { | |
480 machine_mode mode; | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
481 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
482 /* First, look for a supported vector type. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
483 if (SCALAR_FLOAT_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
484 mode = MIN_MODE_VECTOR_FLOAT; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
485 else if (SCALAR_FRACT_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
486 mode = MIN_MODE_VECTOR_FRACT; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
487 else if (SCALAR_UFRACT_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
488 mode = MIN_MODE_VECTOR_UFRACT; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
489 else if (SCALAR_ACCUM_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
490 mode = MIN_MODE_VECTOR_ACCUM; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
491 else if (SCALAR_UACCUM_MODE_P (innermode)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
492 mode = MIN_MODE_VECTOR_UACCUM; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
493 else |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
494 mode = MIN_MODE_VECTOR_INT; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
495 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
496 /* Do not check vector_mode_supported_p here. We'll do that |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
497 later in vector_type_mode. */ |
111 | 498 FOR_EACH_MODE_FROM (mode, mode) |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
499 if (GET_MODE_NUNITS (mode) == nunits |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
500 && GET_MODE_INNER (mode) == innermode) |
111 | 501 return mode; |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
502 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
503 /* For integers, try mapping it to a same-sized scalar mode. */ |
111 | 504 if (GET_MODE_CLASS (innermode) == MODE_INT) |
505 { | |
506 unsigned int nbits = nunits * GET_MODE_BITSIZE (innermode); | |
507 if (int_mode_for_size (nbits, 0).exists (&mode) | |
508 && have_regs_of_mode[mode]) | |
509 return mode; | |
510 } | |
511 | |
512 return opt_machine_mode (); | |
513 } | |
514 | |
515 /* Return the mode for a vector that has NUNITS integer elements of | |
516 INT_BITS bits each, if such a mode exists. The mode can be either | |
517 an integer mode or a vector mode. */ | |
518 | |
519 opt_machine_mode | |
520 mode_for_int_vector (unsigned int int_bits, unsigned int nunits) | |
521 { | |
522 scalar_int_mode int_mode; | |
523 machine_mode vec_mode; | |
524 if (int_mode_for_size (int_bits, 0).exists (&int_mode) | |
525 && mode_for_vector (int_mode, nunits).exists (&vec_mode)) | |
526 return vec_mode; | |
527 return opt_machine_mode (); | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
528 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
529 |
0 | 530 /* Return the alignment of MODE. This will be bounded by 1 and |
531 BIGGEST_ALIGNMENT. */ | |
532 | |
533 unsigned int | |
111 | 534 get_mode_alignment (machine_mode mode) |
0 | 535 { |
536 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT)); | |
537 } | |
538 | |
111 | 539 /* Return the natural mode of an array, given that it is SIZE bytes in |
540 total and has elements of type ELEM_TYPE. */ | |
541 | |
542 static machine_mode | |
543 mode_for_array (tree elem_type, tree size) | |
544 { | |
545 tree elem_size; | |
546 unsigned HOST_WIDE_INT int_size, int_elem_size; | |
547 bool limit_p; | |
548 | |
549 /* One-element arrays get the component type's mode. */ | |
550 elem_size = TYPE_SIZE (elem_type); | |
551 if (simple_cst_equal (size, elem_size)) | |
552 return TYPE_MODE (elem_type); | |
553 | |
554 limit_p = true; | |
555 if (tree_fits_uhwi_p (size) && tree_fits_uhwi_p (elem_size)) | |
556 { | |
557 int_size = tree_to_uhwi (size); | |
558 int_elem_size = tree_to_uhwi (elem_size); | |
559 if (int_elem_size > 0 | |
560 && int_size % int_elem_size == 0 | |
561 && targetm.array_mode_supported_p (TYPE_MODE (elem_type), | |
562 int_size / int_elem_size)) | |
563 limit_p = false; | |
564 } | |
565 return mode_for_size_tree (size, MODE_INT, limit_p).else_blk (); | |
566 } | |
0 | 567 |
568 /* Subroutine of layout_decl: Force alignment required for the data type. | |
569 But if the decl itself wants greater alignment, don't override that. */ | |
570 | |
571 static inline void | |
572 do_type_align (tree type, tree decl) | |
573 { | |
574 if (TYPE_ALIGN (type) > DECL_ALIGN (decl)) | |
575 { | |
111 | 576 SET_DECL_ALIGN (decl, TYPE_ALIGN (type)); |
0 | 577 if (TREE_CODE (decl) == FIELD_DECL) |
578 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type); | |
579 } | |
111 | 580 if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl)) |
581 SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type)); | |
0 | 582 } |
583 | |
584 /* Set the size, mode and alignment of a ..._DECL node. | |
585 TYPE_DECL does need this for C++. | |
586 Note that LABEL_DECL and CONST_DECL nodes do not need this, | |
587 and FUNCTION_DECL nodes have them set up in a special (and simple) way. | |
588 Don't call layout_decl for them. | |
589 | |
590 KNOWN_ALIGN is the amount of alignment we can assume this | |
591 decl has with no special effort. It is relevant only for FIELD_DECLs | |
592 and depends on the previous fields. | |
593 All that matters about KNOWN_ALIGN is which powers of 2 divide it. | |
594 If KNOWN_ALIGN is 0, it means, "as much alignment as you like": | |
595 the record will be aligned to suit. */ | |
596 | |
597 void | |
598 layout_decl (tree decl, unsigned int known_align) | |
599 { | |
600 tree type = TREE_TYPE (decl); | |
601 enum tree_code code = TREE_CODE (decl); | |
602 rtx rtl = NULL_RTX; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
603 location_t loc = DECL_SOURCE_LOCATION (decl); |
0 | 604 |
605 if (code == CONST_DECL) | |
606 return; | |
607 | |
608 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL | |
111 | 609 || code == TYPE_DECL || code == FIELD_DECL); |
0 | 610 |
611 rtl = DECL_RTL_IF_SET (decl); | |
612 | |
613 if (type == error_mark_node) | |
614 type = void_type_node; | |
615 | |
616 /* Usually the size and mode come from the data type without change, | |
617 however, the front-end may set the explicit width of the field, so its | |
618 size may not be the same as the size of its type. This happens with | |
619 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it | |
620 also happens with other fields. For example, the C++ front-end creates | |
621 zero-sized fields corresponding to empty base classes, and depends on | |
622 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the | |
623 size in bytes from the size in bits. If we have already set the mode, | |
624 don't set it again since we can be called twice for FIELD_DECLs. */ | |
625 | |
626 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type); | |
627 if (DECL_MODE (decl) == VOIDmode) | |
111 | 628 SET_DECL_MODE (decl, TYPE_MODE (type)); |
0 | 629 |
630 if (DECL_SIZE (decl) == 0) | |
631 { | |
632 DECL_SIZE (decl) = TYPE_SIZE (type); | |
633 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type); | |
634 } | |
635 else if (DECL_SIZE_UNIT (decl) == 0) | |
636 DECL_SIZE_UNIT (decl) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
637 = fold_convert_loc (loc, sizetype, |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
638 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl), |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
639 bitsize_unit_node)); |
0 | 640 |
641 if (code != FIELD_DECL) | |
642 /* For non-fields, update the alignment from the type. */ | |
643 do_type_align (type, decl); | |
644 else | |
645 /* For fields, it's a bit more complicated... */ | |
646 { | |
647 bool old_user_align = DECL_USER_ALIGN (decl); | |
648 bool zero_bitfield = false; | |
649 bool packed_p = DECL_PACKED (decl); | |
650 unsigned int mfa; | |
651 | |
652 if (DECL_BIT_FIELD (decl)) | |
653 { | |
654 DECL_BIT_FIELD_TYPE (decl) = type; | |
655 | |
656 /* A zero-length bit-field affects the alignment of the next | |
657 field. In essence such bit-fields are not influenced by | |
658 any packing due to #pragma pack or attribute packed. */ | |
659 if (integer_zerop (DECL_SIZE (decl)) | |
660 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl))) | |
661 { | |
662 zero_bitfield = true; | |
663 packed_p = false; | |
664 if (PCC_BITFIELD_TYPE_MATTERS) | |
665 do_type_align (type, decl); | |
666 else | |
667 { | |
668 #ifdef EMPTY_FIELD_BOUNDARY | |
669 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl)) | |
670 { | |
111 | 671 SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY); |
0 | 672 DECL_USER_ALIGN (decl) = 0; |
673 } | |
674 #endif | |
675 } | |
676 } | |
677 | |
678 /* See if we can use an ordinary integer mode for a bit-field. | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
679 Conditions are: a fixed size that is correct for another mode, |
111 | 680 occupying a complete byte or bytes on proper boundary. */ |
0 | 681 if (TYPE_SIZE (type) != 0 |
682 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST | |
111 | 683 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT) |
0 | 684 { |
111 | 685 machine_mode xmode; |
686 if (mode_for_size_tree (DECL_SIZE (decl), | |
687 MODE_INT, 1).exists (&xmode)) | |
0 | 688 { |
111 | 689 unsigned int xalign = GET_MODE_ALIGNMENT (xmode); |
690 if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl)) | |
691 && (known_align == 0 || known_align >= xalign)) | |
692 { | |
693 SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl))); | |
694 SET_DECL_MODE (decl, xmode); | |
695 DECL_BIT_FIELD (decl) = 0; | |
696 } | |
0 | 697 } |
698 } | |
699 | |
700 /* Turn off DECL_BIT_FIELD if we won't need it set. */ | |
701 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode | |
702 && known_align >= TYPE_ALIGN (type) | |
703 && DECL_ALIGN (decl) >= TYPE_ALIGN (type)) | |
704 DECL_BIT_FIELD (decl) = 0; | |
705 } | |
706 else if (packed_p && DECL_USER_ALIGN (decl)) | |
707 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and | |
708 round up; we'll reduce it again below. We want packing to | |
709 supersede USER_ALIGN inherited from the type, but defer to | |
710 alignment explicitly specified on the field decl. */; | |
711 else | |
712 do_type_align (type, decl); | |
713 | |
714 /* If the field is packed and not explicitly aligned, give it the | |
715 minimum alignment. Note that do_type_align may set | |
716 DECL_USER_ALIGN, so we need to check old_user_align instead. */ | |
717 if (packed_p | |
718 && !old_user_align) | |
111 | 719 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT)); |
0 | 720 |
721 if (! packed_p && ! DECL_USER_ALIGN (decl)) | |
722 { | |
723 /* Some targets (i.e. i386, VMS) limit struct field alignment | |
724 to a lower boundary than alignment of variables unless | |
725 it was overridden by attribute aligned. */ | |
726 #ifdef BIGGEST_FIELD_ALIGNMENT | |
111 | 727 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), |
728 (unsigned) BIGGEST_FIELD_ALIGNMENT)); | |
0 | 729 #endif |
730 #ifdef ADJUST_FIELD_ALIGN | |
111 | 731 SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl), |
732 DECL_ALIGN (decl))); | |
0 | 733 #endif |
734 } | |
735 | |
736 if (zero_bitfield) | |
737 mfa = initial_max_fld_align * BITS_PER_UNIT; | |
738 else | |
739 mfa = maximum_field_alignment; | |
740 /* Should this be controlled by DECL_USER_ALIGN, too? */ | |
741 if (mfa != 0) | |
111 | 742 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa)); |
0 | 743 } |
744 | |
745 /* Evaluate nonconstant size only once, either now or as soon as safe. */ | |
746 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) | |
747 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl)); | |
748 if (DECL_SIZE_UNIT (decl) != 0 | |
749 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST) | |
750 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl)); | |
751 | |
752 /* If requested, warn about definitions of large data objects. */ | |
753 if (warn_larger_than | |
754 && (code == VAR_DECL || code == PARM_DECL) | |
755 && ! DECL_EXTERNAL (decl)) | |
756 { | |
757 tree size = DECL_SIZE_UNIT (decl); | |
758 | |
759 if (size != 0 && TREE_CODE (size) == INTEGER_CST | |
760 && compare_tree_int (size, larger_than_size) > 0) | |
761 { | |
762 int size_as_int = TREE_INT_CST_LOW (size); | |
763 | |
764 if (compare_tree_int (size, size_as_int) == 0) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
765 warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int); |
0 | 766 else |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
767 warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes", |
0 | 768 decl, larger_than_size); |
769 } | |
770 } | |
771 | |
772 /* If the RTL was already set, update its mode and mem attributes. */ | |
773 if (rtl) | |
774 { | |
775 PUT_MODE (rtl, DECL_MODE (decl)); | |
776 SET_DECL_RTL (decl, 0); | |
111 | 777 if (MEM_P (rtl)) |
778 set_mem_attributes (rtl, decl, 1); | |
0 | 779 SET_DECL_RTL (decl, rtl); |
780 } | |
781 } | |
782 | |
111 | 783 /* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the |
784 results of a previous call to layout_decl and calls it again. */ | |
0 | 785 |
786 void | |
787 relayout_decl (tree decl) | |
788 { | |
789 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0; | |
111 | 790 SET_DECL_MODE (decl, VOIDmode); |
0 | 791 if (!DECL_USER_ALIGN (decl)) |
111 | 792 SET_DECL_ALIGN (decl, 0); |
793 if (DECL_RTL_SET_P (decl)) | |
794 SET_DECL_RTL (decl, 0); | |
0 | 795 |
796 layout_decl (decl, 0); | |
797 } | |
798 | |
799 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or | |
800 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which | |
801 is to be passed to all other layout functions for this record. It is the | |
802 responsibility of the caller to call `free' for the storage returned. | |
803 Note that garbage collection is not permitted until we finish laying | |
804 out the record. */ | |
805 | |
806 record_layout_info | |
807 start_record_layout (tree t) | |
808 { | |
809 record_layout_info rli = XNEW (struct record_layout_info_s); | |
810 | |
811 rli->t = t; | |
812 | |
813 /* If the type has a minimum specified alignment (via an attribute | |
814 declaration, for example) use it -- otherwise, start with a | |
815 one-byte alignment. */ | |
816 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t)); | |
817 rli->unpacked_align = rli->record_align; | |
818 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT); | |
819 | |
820 #ifdef STRUCTURE_SIZE_BOUNDARY | |
821 /* Packed structures don't need to have minimum size. */ | |
822 if (! TYPE_PACKED (t)) | |
823 { | |
824 unsigned tmp; | |
825 | |
826 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */ | |
827 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY; | |
828 if (maximum_field_alignment != 0) | |
829 tmp = MIN (tmp, maximum_field_alignment); | |
830 rli->record_align = MAX (rli->record_align, tmp); | |
831 } | |
832 #endif | |
833 | |
834 rli->offset = size_zero_node; | |
835 rli->bitpos = bitsize_zero_node; | |
836 rli->prev_field = 0; | |
111 | 837 rli->pending_statics = 0; |
0 | 838 rli->packed_maybe_necessary = 0; |
839 rli->remaining_in_alignment = 0; | |
840 | |
841 return rli; | |
842 } | |
843 | |
111 | 844 /* Return the combined bit position for the byte offset OFFSET and the |
845 bit position BITPOS. | |
846 | |
847 These functions operate on byte and bit positions present in FIELD_DECLs | |
848 and assume that these expressions result in no (intermediate) overflow. | |
849 This assumption is necessary to fold the expressions as much as possible, | |
850 so as to avoid creating artificially variable-sized types in languages | |
851 supporting variable-sized types like Ada. */ | |
0 | 852 |
853 tree | |
854 bit_from_pos (tree offset, tree bitpos) | |
855 { | |
856 return size_binop (PLUS_EXPR, bitpos, | |
857 size_binop (MULT_EXPR, | |
858 fold_convert (bitsizetype, offset), | |
859 bitsize_unit_node)); | |
860 } | |
861 | |
111 | 862 /* Return the combined truncated byte position for the byte offset OFFSET and |
863 the bit position BITPOS. */ | |
864 | |
0 | 865 tree |
866 byte_from_pos (tree offset, tree bitpos) | |
867 { | |
111 | 868 tree bytepos; |
869 if (TREE_CODE (bitpos) == MULT_EXPR | |
870 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node)) | |
871 bytepos = TREE_OPERAND (bitpos, 0); | |
872 else | |
873 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node); | |
874 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos)); | |
0 | 875 } |
876 | |
111 | 877 /* Split the bit position POS into a byte offset *POFFSET and a bit |
878 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */ | |
879 | |
0 | 880 void |
881 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align, | |
882 tree pos) | |
883 { | |
111 | 884 tree toff_align = bitsize_int (off_align); |
885 if (TREE_CODE (pos) == MULT_EXPR | |
886 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align)) | |
887 { | |
888 *poffset = size_binop (MULT_EXPR, | |
889 fold_convert (sizetype, TREE_OPERAND (pos, 0)), | |
890 size_int (off_align / BITS_PER_UNIT)); | |
891 *pbitpos = bitsize_zero_node; | |
892 } | |
893 else | |
894 { | |
895 *poffset = size_binop (MULT_EXPR, | |
896 fold_convert (sizetype, | |
897 size_binop (FLOOR_DIV_EXPR, pos, | |
898 toff_align)), | |
899 size_int (off_align / BITS_PER_UNIT)); | |
900 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align); | |
901 } | |
0 | 902 } |
903 | |
904 /* Given a pointer to bit and byte offsets and an offset alignment, | |
905 normalize the offsets so they are within the alignment. */ | |
906 | |
907 void | |
908 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align) | |
909 { | |
910 /* If the bit position is now larger than it should be, adjust it | |
911 downwards. */ | |
912 if (compare_tree_int (*pbitpos, off_align) >= 0) | |
913 { | |
111 | 914 tree offset, bitpos; |
915 pos_from_bit (&offset, &bitpos, off_align, *pbitpos); | |
916 *poffset = size_binop (PLUS_EXPR, *poffset, offset); | |
917 *pbitpos = bitpos; | |
0 | 918 } |
919 } | |
920 | |
921 /* Print debugging information about the information in RLI. */ | |
922 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
923 DEBUG_FUNCTION void |
0 | 924 debug_rli (record_layout_info rli) |
925 { | |
926 print_node_brief (stderr, "type", rli->t, 0); | |
927 print_node_brief (stderr, "\noffset", rli->offset, 0); | |
928 print_node_brief (stderr, " bitpos", rli->bitpos, 0); | |
929 | |
930 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n", | |
931 rli->record_align, rli->unpacked_align, | |
932 rli->offset_align); | |
933 | |
934 /* The ms_struct code is the only that uses this. */ | |
935 if (targetm.ms_bitfield_layout_p (rli->t)) | |
936 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment); | |
937 | |
938 if (rli->packed_maybe_necessary) | |
939 fprintf (stderr, "packed may be necessary\n"); | |
940 | |
111 | 941 if (!vec_safe_is_empty (rli->pending_statics)) |
0 | 942 { |
943 fprintf (stderr, "pending statics:\n"); | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
944 debug_vec_tree (rli->pending_statics); |
0 | 945 } |
946 } | |
947 | |
948 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and | |
949 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */ | |
950 | |
951 void | |
952 normalize_rli (record_layout_info rli) | |
953 { | |
954 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align); | |
955 } | |
956 | |
957 /* Returns the size in bytes allocated so far. */ | |
958 | |
959 tree | |
960 rli_size_unit_so_far (record_layout_info rli) | |
961 { | |
962 return byte_from_pos (rli->offset, rli->bitpos); | |
963 } | |
964 | |
965 /* Returns the size in bits allocated so far. */ | |
966 | |
967 tree | |
968 rli_size_so_far (record_layout_info rli) | |
969 { | |
970 return bit_from_pos (rli->offset, rli->bitpos); | |
971 } | |
972 | |
973 /* FIELD is about to be added to RLI->T. The alignment (in bits) of | |
974 the next available location within the record is given by KNOWN_ALIGN. | |
975 Update the variable alignment fields in RLI, and return the alignment | |
976 to give the FIELD. */ | |
977 | |
978 unsigned int | |
979 update_alignment_for_field (record_layout_info rli, tree field, | |
980 unsigned int known_align) | |
981 { | |
982 /* The alignment required for FIELD. */ | |
983 unsigned int desired_align; | |
984 /* The type of this field. */ | |
985 tree type = TREE_TYPE (field); | |
986 /* True if the field was explicitly aligned by the user. */ | |
987 bool user_align; | |
988 bool is_bitfield; | |
989 | |
990 /* Do not attempt to align an ERROR_MARK node */ | |
991 if (TREE_CODE (type) == ERROR_MARK) | |
992 return 0; | |
993 | |
994 /* Lay out the field so we know what alignment it needs. */ | |
995 layout_decl (field, known_align); | |
996 desired_align = DECL_ALIGN (field); | |
997 user_align = DECL_USER_ALIGN (field); | |
998 | |
999 is_bitfield = (type != error_mark_node | |
1000 && DECL_BIT_FIELD_TYPE (field) | |
1001 && ! integer_zerop (TYPE_SIZE (type))); | |
1002 | |
1003 /* Record must have at least as much alignment as any field. | |
1004 Otherwise, the alignment of the field within the record is | |
1005 meaningless. */ | |
1006 if (targetm.ms_bitfield_layout_p (rli->t)) | |
1007 { | |
1008 /* Here, the alignment of the underlying type of a bitfield can | |
1009 affect the alignment of a record; even a zero-sized field | |
1010 can do this. The alignment should be to the alignment of | |
1011 the type, except that for zero-size bitfields this only | |
1012 applies if there was an immediately prior, nonzero-size | |
1013 bitfield. (That's the way it is, experimentally.) */ | |
1014 if ((!is_bitfield && !DECL_PACKED (field)) | |
111 | 1015 || ((DECL_SIZE (field) == NULL_TREE |
1016 || !integer_zerop (DECL_SIZE (field))) | |
0 | 1017 ? !DECL_PACKED (field) |
1018 : (rli->prev_field | |
1019 && DECL_BIT_FIELD_TYPE (rli->prev_field) | |
1020 && ! integer_zerop (DECL_SIZE (rli->prev_field))))) | |
1021 { | |
1022 unsigned int type_align = TYPE_ALIGN (type); | |
1023 type_align = MAX (type_align, desired_align); | |
1024 if (maximum_field_alignment != 0) | |
1025 type_align = MIN (type_align, maximum_field_alignment); | |
1026 rli->record_align = MAX (rli->record_align, type_align); | |
1027 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); | |
1028 } | |
1029 } | |
1030 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS) | |
1031 { | |
1032 /* Named bit-fields cause the entire structure to have the | |
1033 alignment implied by their type. Some targets also apply the same | |
1034 rules to unnamed bitfields. */ | |
1035 if (DECL_NAME (field) != 0 | |
1036 || targetm.align_anon_bitfield ()) | |
1037 { | |
1038 unsigned int type_align = TYPE_ALIGN (type); | |
1039 | |
1040 #ifdef ADJUST_FIELD_ALIGN | |
1041 if (! TYPE_USER_ALIGN (type)) | |
111 | 1042 type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
0 | 1043 #endif |
1044 | |
1045 /* Targets might chose to handle unnamed and hence possibly | |
1046 zero-width bitfield. Those are not influenced by #pragmas | |
1047 or packed attributes. */ | |
1048 if (integer_zerop (DECL_SIZE (field))) | |
1049 { | |
1050 if (initial_max_fld_align) | |
1051 type_align = MIN (type_align, | |
1052 initial_max_fld_align * BITS_PER_UNIT); | |
1053 } | |
1054 else if (maximum_field_alignment != 0) | |
1055 type_align = MIN (type_align, maximum_field_alignment); | |
1056 else if (DECL_PACKED (field)) | |
1057 type_align = MIN (type_align, BITS_PER_UNIT); | |
1058 | |
1059 /* The alignment of the record is increased to the maximum | |
1060 of the current alignment, the alignment indicated on the | |
1061 field (i.e., the alignment specified by an __aligned__ | |
1062 attribute), and the alignment indicated by the type of | |
1063 the field. */ | |
1064 rli->record_align = MAX (rli->record_align, desired_align); | |
1065 rli->record_align = MAX (rli->record_align, type_align); | |
1066 | |
1067 if (warn_packed) | |
1068 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); | |
1069 user_align |= TYPE_USER_ALIGN (type); | |
1070 } | |
1071 } | |
1072 else | |
1073 { | |
1074 rli->record_align = MAX (rli->record_align, desired_align); | |
1075 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); | |
1076 } | |
1077 | |
1078 TYPE_USER_ALIGN (rli->t) |= user_align; | |
1079 | |
1080 return desired_align; | |
1081 } | |
1082 | |
111 | 1083 /* Issue a warning if the record alignment, RECORD_ALIGN, is less than |
1084 the field alignment of FIELD or FIELD isn't aligned. */ | |
1085 | |
1086 static void | |
1087 handle_warn_if_not_align (tree field, unsigned int record_align) | |
1088 { | |
1089 tree type = TREE_TYPE (field); | |
1090 | |
1091 if (type == error_mark_node) | |
1092 return; | |
1093 | |
1094 unsigned int warn_if_not_align = 0; | |
1095 | |
1096 int opt_w = 0; | |
1097 | |
1098 if (warn_if_not_aligned) | |
1099 { | |
1100 warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field); | |
1101 if (!warn_if_not_align) | |
1102 warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type); | |
1103 if (warn_if_not_align) | |
1104 opt_w = OPT_Wif_not_aligned; | |
1105 } | |
1106 | |
1107 if (!warn_if_not_align | |
1108 && warn_packed_not_aligned | |
1109 && TYPE_USER_ALIGN (type)) | |
1110 { | |
1111 warn_if_not_align = TYPE_ALIGN (type); | |
1112 opt_w = OPT_Wpacked_not_aligned; | |
1113 } | |
1114 | |
1115 if (!warn_if_not_align) | |
1116 return; | |
1117 | |
1118 tree context = DECL_CONTEXT (field); | |
1119 | |
1120 warn_if_not_align /= BITS_PER_UNIT; | |
1121 record_align /= BITS_PER_UNIT; | |
1122 if ((record_align % warn_if_not_align) != 0) | |
1123 warning (opt_w, "alignment %u of %qT is less than %u", | |
1124 record_align, context, warn_if_not_align); | |
1125 | |
1126 unsigned HOST_WIDE_INT off | |
1127 = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) | |
1128 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) / BITS_PER_UNIT); | |
1129 if ((off % warn_if_not_align) != 0) | |
1130 warning (opt_w, "%q+D offset %wu in %qT isn't aligned to %u", | |
1131 field, off, context, warn_if_not_align); | |
1132 } | |
1133 | |
0 | 1134 /* Called from place_field to handle unions. */ |
1135 | |
1136 static void | |
1137 place_union_field (record_layout_info rli, tree field) | |
1138 { | |
1139 update_alignment_for_field (rli, field, /*known_align=*/0); | |
1140 | |
1141 DECL_FIELD_OFFSET (field) = size_zero_node; | |
1142 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node; | |
1143 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT); | |
111 | 1144 handle_warn_if_not_align (field, rli->record_align); |
0 | 1145 |
1146 /* If this is an ERROR_MARK return *after* having set the | |
1147 field at the start of the union. This helps when parsing | |
1148 invalid fields. */ | |
1149 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK) | |
1150 return; | |
1151 | |
111 | 1152 if (AGGREGATE_TYPE_P (TREE_TYPE (field)) |
1153 && TYPE_TYPELESS_STORAGE (TREE_TYPE (field))) | |
1154 TYPE_TYPELESS_STORAGE (rli->t) = 1; | |
1155 | |
0 | 1156 /* We assume the union's size will be a multiple of a byte so we don't |
1157 bother with BITPOS. */ | |
1158 if (TREE_CODE (rli->t) == UNION_TYPE) | |
1159 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field)); | |
1160 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1161 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field), |
0 | 1162 DECL_SIZE_UNIT (field), rli->offset); |
1163 } | |
1164 | |
1165 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated | |
1166 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more | |
1167 units of alignment than the underlying TYPE. */ | |
1168 static int | |
1169 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset, | |
1170 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type) | |
1171 { | |
1172 /* Note that the calculation of OFFSET might overflow; we calculate it so | |
1173 that we still get the right result as long as ALIGN is a power of two. */ | |
1174 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset; | |
1175 | |
1176 offset = offset % align; | |
1177 return ((offset + size + align - 1) / align | |
111 | 1178 > tree_to_uhwi (TYPE_SIZE (type)) / align); |
0 | 1179 } |
1180 | |
1181 /* RLI contains information about the layout of a RECORD_TYPE. FIELD | |
1182 is a FIELD_DECL to be added after those fields already present in | |
1183 T. (FIELD is not actually added to the TYPE_FIELDS list here; | |
1184 callers that desire that behavior must manually perform that step.) */ | |
1185 | |
1186 void | |
1187 place_field (record_layout_info rli, tree field) | |
1188 { | |
1189 /* The alignment required for FIELD. */ | |
1190 unsigned int desired_align; | |
1191 /* The alignment FIELD would have if we just dropped it into the | |
1192 record as it presently stands. */ | |
1193 unsigned int known_align; | |
1194 unsigned int actual_align; | |
1195 /* The type of this field. */ | |
1196 tree type = TREE_TYPE (field); | |
1197 | |
1198 gcc_assert (TREE_CODE (field) != ERROR_MARK); | |
1199 | |
1200 /* If FIELD is static, then treat it like a separate variable, not | |
1201 really like a structure field. If it is a FUNCTION_DECL, it's a | |
1202 method. In both cases, all we do is lay out the decl, and we do | |
1203 it *after* the record is laid out. */ | |
111 | 1204 if (VAR_P (field)) |
0 | 1205 { |
111 | 1206 vec_safe_push (rli->pending_statics, field); |
0 | 1207 return; |
1208 } | |
1209 | |
1210 /* Enumerators and enum types which are local to this class need not | |
1211 be laid out. Likewise for initialized constant fields. */ | |
1212 else if (TREE_CODE (field) != FIELD_DECL) | |
1213 return; | |
1214 | |
1215 /* Unions are laid out very differently than records, so split | |
1216 that code off to another function. */ | |
1217 else if (TREE_CODE (rli->t) != RECORD_TYPE) | |
1218 { | |
1219 place_union_field (rli, field); | |
1220 return; | |
1221 } | |
1222 | |
1223 else if (TREE_CODE (type) == ERROR_MARK) | |
1224 { | |
1225 /* Place this field at the current allocation position, so we | |
1226 maintain monotonicity. */ | |
1227 DECL_FIELD_OFFSET (field) = rli->offset; | |
1228 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; | |
1229 SET_DECL_OFFSET_ALIGN (field, rli->offset_align); | |
111 | 1230 handle_warn_if_not_align (field, rli->record_align); |
0 | 1231 return; |
1232 } | |
1233 | |
111 | 1234 if (AGGREGATE_TYPE_P (type) |
1235 && TYPE_TYPELESS_STORAGE (type)) | |
1236 TYPE_TYPELESS_STORAGE (rli->t) = 1; | |
1237 | |
0 | 1238 /* Work out the known alignment so far. Note that A & (-A) is the |
1239 value of the least-significant bit in A that is one. */ | |
1240 if (! integer_zerop (rli->bitpos)) | |
111 | 1241 known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos)); |
0 | 1242 else if (integer_zerop (rli->offset)) |
1243 known_align = 0; | |
111 | 1244 else if (tree_fits_uhwi_p (rli->offset)) |
0 | 1245 known_align = (BITS_PER_UNIT |
111 | 1246 * least_bit_hwi (tree_to_uhwi (rli->offset))); |
0 | 1247 else |
1248 known_align = rli->offset_align; | |
1249 | |
1250 desired_align = update_alignment_for_field (rli, field, known_align); | |
1251 if (known_align == 0) | |
1252 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); | |
1253 | |
1254 if (warn_packed && DECL_PACKED (field)) | |
1255 { | |
1256 if (known_align >= TYPE_ALIGN (type)) | |
1257 { | |
1258 if (TYPE_ALIGN (type) > desired_align) | |
1259 { | |
1260 if (STRICT_ALIGNMENT) | |
1261 warning (OPT_Wattributes, "packed attribute causes " | |
1262 "inefficient alignment for %q+D", field); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1263 /* Don't warn if DECL_PACKED was set by the type. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1264 else if (!TYPE_PACKED (rli->t)) |
0 | 1265 warning (OPT_Wattributes, "packed attribute is " |
1266 "unnecessary for %q+D", field); | |
1267 } | |
1268 } | |
1269 else | |
1270 rli->packed_maybe_necessary = 1; | |
1271 } | |
1272 | |
1273 /* Does this field automatically have alignment it needs by virtue | |
111 | 1274 of the fields that precede it and the record's own alignment? */ |
1275 if (known_align < desired_align) | |
0 | 1276 { |
1277 /* No, we need to skip space before this field. | |
1278 Bump the cumulative size to multiple of field alignment. */ | |
1279 | |
111 | 1280 if (!targetm.ms_bitfield_layout_p (rli->t) |
1281 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1282 warning (OPT_Wpadded, "padding struct to align %q+D", field); |
0 | 1283 |
1284 /* If the alignment is still within offset_align, just align | |
1285 the bit position. */ | |
1286 if (desired_align < rli->offset_align) | |
1287 rli->bitpos = round_up (rli->bitpos, desired_align); | |
1288 else | |
1289 { | |
1290 /* First adjust OFFSET by the partial bits, then align. */ | |
1291 rli->offset | |
1292 = size_binop (PLUS_EXPR, rli->offset, | |
1293 fold_convert (sizetype, | |
1294 size_binop (CEIL_DIV_EXPR, rli->bitpos, | |
1295 bitsize_unit_node))); | |
1296 rli->bitpos = bitsize_zero_node; | |
1297 | |
1298 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT); | |
1299 } | |
1300 | |
1301 if (! TREE_CONSTANT (rli->offset)) | |
1302 rli->offset_align = desired_align; | |
111 | 1303 if (targetm.ms_bitfield_layout_p (rli->t)) |
1304 rli->prev_field = NULL; | |
0 | 1305 } |
1306 | |
1307 /* Handle compatibility with PCC. Note that if the record has any | |
1308 variable-sized fields, we need not worry about compatibility. */ | |
1309 if (PCC_BITFIELD_TYPE_MATTERS | |
1310 && ! targetm.ms_bitfield_layout_p (rli->t) | |
1311 && TREE_CODE (field) == FIELD_DECL | |
1312 && type != error_mark_node | |
1313 && DECL_BIT_FIELD (field) | |
1314 && (! DECL_PACKED (field) | |
1315 /* Enter for these packed fields only to issue a warning. */ | |
1316 || TYPE_ALIGN (type) <= BITS_PER_UNIT) | |
1317 && maximum_field_alignment == 0 | |
1318 && ! integer_zerop (DECL_SIZE (field)) | |
111 | 1319 && tree_fits_uhwi_p (DECL_SIZE (field)) |
1320 && tree_fits_uhwi_p (rli->offset) | |
1321 && tree_fits_uhwi_p (TYPE_SIZE (type))) | |
0 | 1322 { |
1323 unsigned int type_align = TYPE_ALIGN (type); | |
1324 tree dsize = DECL_SIZE (field); | |
111 | 1325 HOST_WIDE_INT field_size = tree_to_uhwi (dsize); |
1326 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset); | |
1327 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); | |
0 | 1328 |
1329 #ifdef ADJUST_FIELD_ALIGN | |
1330 if (! TYPE_USER_ALIGN (type)) | |
111 | 1331 type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
0 | 1332 #endif |
1333 | |
1334 /* A bit field may not span more units of alignment of its type | |
1335 than its type itself. Advance to next boundary if necessary. */ | |
1336 if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) | |
1337 { | |
1338 if (DECL_PACKED (field)) | |
1339 { | |
1340 if (warn_packed_bitfield_compat == 1) | |
1341 inform | |
1342 (input_location, | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1343 "offset of packed bit-field %qD has changed in GCC 4.4", |
0 | 1344 field); |
1345 } | |
1346 else | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1347 rli->bitpos = round_up (rli->bitpos, type_align); |
0 | 1348 } |
1349 | |
1350 if (! DECL_PACKED (field)) | |
1351 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); | |
111 | 1352 |
1353 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t, | |
1354 TYPE_WARN_IF_NOT_ALIGN (type)); | |
0 | 1355 } |
1356 | |
1357 #ifdef BITFIELD_NBYTES_LIMITED | |
1358 if (BITFIELD_NBYTES_LIMITED | |
1359 && ! targetm.ms_bitfield_layout_p (rli->t) | |
1360 && TREE_CODE (field) == FIELD_DECL | |
1361 && type != error_mark_node | |
1362 && DECL_BIT_FIELD_TYPE (field) | |
1363 && ! DECL_PACKED (field) | |
1364 && ! integer_zerop (DECL_SIZE (field)) | |
111 | 1365 && tree_fits_uhwi_p (DECL_SIZE (field)) |
1366 && tree_fits_uhwi_p (rli->offset) | |
1367 && tree_fits_uhwi_p (TYPE_SIZE (type))) | |
0 | 1368 { |
1369 unsigned int type_align = TYPE_ALIGN (type); | |
1370 tree dsize = DECL_SIZE (field); | |
111 | 1371 HOST_WIDE_INT field_size = tree_to_uhwi (dsize); |
1372 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset); | |
1373 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); | |
0 | 1374 |
1375 #ifdef ADJUST_FIELD_ALIGN | |
1376 if (! TYPE_USER_ALIGN (type)) | |
111 | 1377 type_align = ADJUST_FIELD_ALIGN (field, type, type_align); |
0 | 1378 #endif |
1379 | |
1380 if (maximum_field_alignment != 0) | |
1381 type_align = MIN (type_align, maximum_field_alignment); | |
1382 /* ??? This test is opposite the test in the containing if | |
1383 statement, so this code is unreachable currently. */ | |
1384 else if (DECL_PACKED (field)) | |
1385 type_align = MIN (type_align, BITS_PER_UNIT); | |
1386 | |
1387 /* A bit field may not span the unit of alignment of its type. | |
1388 Advance to next boundary if necessary. */ | |
1389 if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) | |
1390 rli->bitpos = round_up (rli->bitpos, type_align); | |
1391 | |
1392 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); | |
111 | 1393 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t, |
1394 TYPE_WARN_IF_NOT_ALIGN (type)); | |
0 | 1395 } |
1396 #endif | |
1397 | |
1398 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details. | |
1399 A subtlety: | |
1400 When a bit field is inserted into a packed record, the whole | |
1401 size of the underlying type is used by one or more same-size | |
1402 adjacent bitfields. (That is, if its long:3, 32 bits is | |
1403 used in the record, and any additional adjacent long bitfields are | |
1404 packed into the same chunk of 32 bits. However, if the size | |
1405 changes, a new field of that size is allocated.) In an unpacked | |
1406 record, this is the same as using alignment, but not equivalent | |
1407 when packing. | |
1408 | |
1409 Note: for compatibility, we use the type size, not the type alignment | |
1410 to determine alignment, since that matches the documentation */ | |
1411 | |
1412 if (targetm.ms_bitfield_layout_p (rli->t)) | |
1413 { | |
1414 tree prev_saved = rli->prev_field; | |
1415 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL; | |
1416 | |
1417 /* This is a bitfield if it exists. */ | |
1418 if (rli->prev_field) | |
1419 { | |
1420 /* If both are bitfields, nonzero, and the same size, this is | |
1421 the middle of a run. Zero declared size fields are special | |
1422 and handled as "end of run". (Note: it's nonzero declared | |
1423 size, but equal type sizes!) (Since we know that both | |
1424 the current and previous fields are bitfields by the | |
1425 time we check it, DECL_SIZE must be present for both.) */ | |
1426 if (DECL_BIT_FIELD_TYPE (field) | |
1427 && !integer_zerop (DECL_SIZE (field)) | |
1428 && !integer_zerop (DECL_SIZE (rli->prev_field)) | |
111 | 1429 && tree_fits_shwi_p (DECL_SIZE (rli->prev_field)) |
1430 && tree_fits_uhwi_p (TYPE_SIZE (type)) | |
0 | 1431 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))) |
1432 { | |
1433 /* We're in the middle of a run of equal type size fields; make | |
1434 sure we realign if we run out of bits. (Not decl size, | |
1435 type size!) */ | |
111 | 1436 HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field)); |
0 | 1437 |
1438 if (rli->remaining_in_alignment < bitsize) | |
1439 { | |
111 | 1440 HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type)); |
0 | 1441 |
1442 /* out of bits; bump up to next 'word'. */ | |
1443 rli->bitpos | |
1444 = size_binop (PLUS_EXPR, rli->bitpos, | |
1445 bitsize_int (rli->remaining_in_alignment)); | |
1446 rli->prev_field = field; | |
1447 if (typesize < bitsize) | |
1448 rli->remaining_in_alignment = 0; | |
1449 else | |
1450 rli->remaining_in_alignment = typesize - bitsize; | |
1451 } | |
1452 else | |
1453 rli->remaining_in_alignment -= bitsize; | |
1454 } | |
1455 else | |
1456 { | |
1457 /* End of a run: if leaving a run of bitfields of the same type | |
1458 size, we have to "use up" the rest of the bits of the type | |
1459 size. | |
1460 | |
1461 Compute the new position as the sum of the size for the prior | |
1462 type and where we first started working on that type. | |
1463 Note: since the beginning of the field was aligned then | |
1464 of course the end will be too. No round needed. */ | |
1465 | |
1466 if (!integer_zerop (DECL_SIZE (rli->prev_field))) | |
1467 { | |
1468 rli->bitpos | |
1469 = size_binop (PLUS_EXPR, rli->bitpos, | |
1470 bitsize_int (rli->remaining_in_alignment)); | |
1471 } | |
1472 else | |
1473 /* We "use up" size zero fields; the code below should behave | |
1474 as if the prior field was not a bitfield. */ | |
1475 prev_saved = NULL; | |
1476 | |
1477 /* Cause a new bitfield to be captured, either this time (if | |
1478 currently a bitfield) or next time we see one. */ | |
111 | 1479 if (!DECL_BIT_FIELD_TYPE (field) |
0 | 1480 || integer_zerop (DECL_SIZE (field))) |
1481 rli->prev_field = NULL; | |
1482 } | |
1483 | |
1484 normalize_rli (rli); | |
1485 } | |
1486 | |
111 | 1487 /* If we're starting a new run of same type size bitfields |
0 | 1488 (or a run of non-bitfields), set up the "first of the run" |
1489 fields. | |
1490 | |
1491 That is, if the current field is not a bitfield, or if there | |
1492 was a prior bitfield the type sizes differ, or if there wasn't | |
1493 a prior bitfield the size of the current field is nonzero. | |
1494 | |
1495 Note: we must be sure to test ONLY the type size if there was | |
1496 a prior bitfield and ONLY for the current field being zero if | |
1497 there wasn't. */ | |
1498 | |
1499 if (!DECL_BIT_FIELD_TYPE (field) | |
1500 || (prev_saved != NULL | |
1501 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)) | |
1502 : !integer_zerop (DECL_SIZE (field)) )) | |
1503 { | |
1504 /* Never smaller than a byte for compatibility. */ | |
1505 unsigned int type_align = BITS_PER_UNIT; | |
1506 | |
1507 /* (When not a bitfield), we could be seeing a flex array (with | |
1508 no DECL_SIZE). Since we won't be using remaining_in_alignment | |
1509 until we see a bitfield (and come by here again) we just skip | |
1510 calculating it. */ | |
1511 if (DECL_SIZE (field) != NULL | |
111 | 1512 && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field))) |
1513 && tree_fits_uhwi_p (DECL_SIZE (field))) | |
0 | 1514 { |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1515 unsigned HOST_WIDE_INT bitsize |
111 | 1516 = tree_to_uhwi (DECL_SIZE (field)); |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1517 unsigned HOST_WIDE_INT typesize |
111 | 1518 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field))); |
0 | 1519 |
1520 if (typesize < bitsize) | |
1521 rli->remaining_in_alignment = 0; | |
1522 else | |
1523 rli->remaining_in_alignment = typesize - bitsize; | |
1524 } | |
1525 | |
1526 /* Now align (conventionally) for the new type. */ | |
1527 type_align = TYPE_ALIGN (TREE_TYPE (field)); | |
1528 | |
1529 if (maximum_field_alignment != 0) | |
1530 type_align = MIN (type_align, maximum_field_alignment); | |
1531 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1532 rli->bitpos = round_up (rli->bitpos, type_align); |
0 | 1533 |
1534 /* If we really aligned, don't allow subsequent bitfields | |
1535 to undo that. */ | |
1536 rli->prev_field = NULL; | |
1537 } | |
1538 } | |
1539 | |
1540 /* Offset so far becomes the position of this field after normalizing. */ | |
1541 normalize_rli (rli); | |
1542 DECL_FIELD_OFFSET (field) = rli->offset; | |
1543 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; | |
1544 SET_DECL_OFFSET_ALIGN (field, rli->offset_align); | |
111 | 1545 handle_warn_if_not_align (field, rli->record_align); |
1546 | |
1547 /* Evaluate nonconstant offsets only once, either now or as soon as safe. */ | |
1548 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST) | |
1549 DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field)); | |
0 | 1550 |
1551 /* If this field ended up more aligned than we thought it would be (we | |
1552 approximate this by seeing if its position changed), lay out the field | |
1553 again; perhaps we can use an integral mode for it now. */ | |
1554 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) | |
111 | 1555 actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))); |
0 | 1556 else if (integer_zerop (DECL_FIELD_OFFSET (field))) |
1557 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); | |
111 | 1558 else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) |
0 | 1559 actual_align = (BITS_PER_UNIT |
111 | 1560 * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field)))); |
0 | 1561 else |
1562 actual_align = DECL_OFFSET_ALIGN (field); | |
1563 /* ACTUAL_ALIGN is still the actual alignment *within the record* . | |
1564 store / extract bit field operations will check the alignment of the | |
1565 record against the mode of bit fields. */ | |
1566 | |
1567 if (known_align != actual_align) | |
1568 layout_decl (field, actual_align); | |
1569 | |
1570 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field)) | |
1571 rli->prev_field = field; | |
1572 | |
1573 /* Now add size of this field to the size of the record. If the size is | |
1574 not constant, treat the field as being a multiple of bytes and just | |
1575 adjust the offset, resetting the bit position. Otherwise, apportion the | |
1576 size amongst the bit position and offset. First handle the case of an | |
1577 unspecified size, which can happen when we have an invalid nested struct | |
1578 definition, such as struct j { struct j { int i; } }. The error message | |
1579 is printed in finish_struct. */ | |
1580 if (DECL_SIZE (field) == 0) | |
1581 /* Do nothing. */; | |
1582 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST | |
1583 || TREE_OVERFLOW (DECL_SIZE (field))) | |
1584 { | |
1585 rli->offset | |
1586 = size_binop (PLUS_EXPR, rli->offset, | |
1587 fold_convert (sizetype, | |
1588 size_binop (CEIL_DIV_EXPR, rli->bitpos, | |
1589 bitsize_unit_node))); | |
1590 rli->offset | |
1591 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field)); | |
1592 rli->bitpos = bitsize_zero_node; | |
1593 rli->offset_align = MIN (rli->offset_align, desired_align); | |
1594 } | |
1595 else if (targetm.ms_bitfield_layout_p (rli->t)) | |
1596 { | |
1597 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); | |
1598 | |
1599 /* If we ended a bitfield before the full length of the type then | |
1600 pad the struct out to the full length of the last type. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1601 if ((DECL_CHAIN (field) == NULL |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1602 || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL) |
0 | 1603 && DECL_BIT_FIELD_TYPE (field) |
1604 && !integer_zerop (DECL_SIZE (field))) | |
1605 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, | |
1606 bitsize_int (rli->remaining_in_alignment)); | |
1607 | |
1608 normalize_rli (rli); | |
1609 } | |
1610 else | |
1611 { | |
1612 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); | |
1613 normalize_rli (rli); | |
1614 } | |
1615 } | |
1616 | |
1617 /* Assuming that all the fields have been laid out, this function uses | |
1618 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type | |
1619 indicated by RLI. */ | |
1620 | |
1621 static void | |
1622 finalize_record_size (record_layout_info rli) | |
1623 { | |
1624 tree unpadded_size, unpadded_size_unit; | |
1625 | |
1626 /* Now we want just byte and bit offsets, so set the offset alignment | |
1627 to be a byte and then normalize. */ | |
1628 rli->offset_align = BITS_PER_UNIT; | |
1629 normalize_rli (rli); | |
1630 | |
1631 /* Determine the desired alignment. */ | |
1632 #ifdef ROUND_TYPE_ALIGN | |
111 | 1633 SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), |
1634 rli->record_align)); | |
0 | 1635 #else |
111 | 1636 SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align)); |
0 | 1637 #endif |
1638 | |
1639 /* Compute the size so far. Be sure to allow for extra bits in the | |
1640 size in bytes. We have guaranteed above that it will be no more | |
1641 than a single byte. */ | |
1642 unpadded_size = rli_size_so_far (rli); | |
1643 unpadded_size_unit = rli_size_unit_so_far (rli); | |
1644 if (! integer_zerop (rli->bitpos)) | |
1645 unpadded_size_unit | |
1646 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node); | |
1647 | |
1648 /* Round the size up to be a multiple of the required alignment. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1649 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t)); |
0 | 1650 TYPE_SIZE_UNIT (rli->t) |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1651 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t)); |
0 | 1652 |
1653 if (TREE_CONSTANT (unpadded_size) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1654 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1655 && input_location != BUILTINS_LOCATION) |
0 | 1656 warning (OPT_Wpadded, "padding struct size to alignment boundary"); |
1657 | |
1658 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE | |
1659 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary | |
1660 && TREE_CONSTANT (unpadded_size)) | |
1661 { | |
1662 tree unpacked_size; | |
1663 | |
1664 #ifdef ROUND_TYPE_ALIGN | |
1665 rli->unpacked_align | |
1666 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align); | |
1667 #else | |
1668 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align); | |
1669 #endif | |
1670 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1671 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align); |
0 | 1672 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t))) |
1673 { | |
1674 if (TYPE_NAME (rli->t)) | |
1675 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1676 tree name; |
0 | 1677 |
1678 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1679 name = TYPE_NAME (rli->t); |
0 | 1680 else |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1681 name = DECL_NAME (TYPE_NAME (rli->t)); |
0 | 1682 |
1683 if (STRICT_ALIGNMENT) | |
1684 warning (OPT_Wpacked, "packed attribute causes inefficient " | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1685 "alignment for %qE", name); |
0 | 1686 else |
1687 warning (OPT_Wpacked, | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1688 "packed attribute is unnecessary for %qE", name); |
0 | 1689 } |
1690 else | |
1691 { | |
1692 if (STRICT_ALIGNMENT) | |
1693 warning (OPT_Wpacked, | |
1694 "packed attribute causes inefficient alignment"); | |
1695 else | |
1696 warning (OPT_Wpacked, "packed attribute is unnecessary"); | |
1697 } | |
1698 } | |
1699 } | |
1700 } | |
1701 | |
1702 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */ | |
1703 | |
1704 void | |
1705 compute_record_mode (tree type) | |
1706 { | |
1707 tree field; | |
111 | 1708 machine_mode mode = VOIDmode; |
0 | 1709 |
1710 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that. | |
1711 However, if possible, we use a mode that fits in a register | |
1712 instead, in order to allow for better optimization down the | |
1713 line. */ | |
1714 SET_TYPE_MODE (type, BLKmode); | |
1715 | |
111 | 1716 if (! tree_fits_uhwi_p (TYPE_SIZE (type))) |
0 | 1717 return; |
1718 | |
1719 /* A record which has any BLKmode members must itself be | |
1720 BLKmode; it can't go in a register. Unless the member is | |
1721 BLKmode only because it isn't aligned. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1722 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
0 | 1723 { |
1724 if (TREE_CODE (field) != FIELD_DECL) | |
1725 continue; | |
1726 | |
1727 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK | |
1728 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode | |
1729 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)) | |
1730 && !(TYPE_SIZE (TREE_TYPE (field)) != 0 | |
1731 && integer_zerop (TYPE_SIZE (TREE_TYPE (field))))) | |
111 | 1732 || ! tree_fits_uhwi_p (bit_position (field)) |
0 | 1733 || DECL_SIZE (field) == 0 |
111 | 1734 || ! tree_fits_uhwi_p (DECL_SIZE (field))) |
0 | 1735 return; |
1736 | |
1737 /* If this field is the whole struct, remember its mode so | |
1738 that, say, we can put a double in a class into a DF | |
1739 register instead of forcing it to live in the stack. */ | |
1740 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field))) | |
1741 mode = DECL_MODE (field); | |
1742 | |
111 | 1743 /* With some targets, it is sub-optimal to access an aligned |
1744 BLKmode structure as a scalar. */ | |
1745 if (targetm.member_type_forces_blk (field, mode)) | |
0 | 1746 return; |
1747 } | |
1748 | |
1749 /* If we only have one real field; use its mode if that mode's size | |
1750 matches the type's size. This only applies to RECORD_TYPE. This | |
1751 does not apply to unions. */ | |
1752 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode | |
111 | 1753 && tree_fits_uhwi_p (TYPE_SIZE (type)) |
1754 && GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type))) | |
1755 ; | |
0 | 1756 else |
111 | 1757 mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk (); |
0 | 1758 |
1759 /* If structure's known alignment is less than what the scalar | |
1760 mode would need, and it matters, then stick with BLKmode. */ | |
111 | 1761 if (mode != BLKmode |
0 | 1762 && STRICT_ALIGNMENT |
1763 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT | |
111 | 1764 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode))) |
0 | 1765 { |
1766 /* If this is the only reason this type is BLKmode, then | |
1767 don't force containing types to be BLKmode. */ | |
1768 TYPE_NO_FORCE_BLK (type) = 1; | |
111 | 1769 mode = BLKmode; |
0 | 1770 } |
111 | 1771 |
1772 SET_TYPE_MODE (type, mode); | |
0 | 1773 } |
1774 | |
1775 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid | |
1776 out. */ | |
1777 | |
1778 static void | |
1779 finalize_type_size (tree type) | |
1780 { | |
1781 /* Normally, use the alignment corresponding to the mode chosen. | |
1782 However, where strict alignment is not required, avoid | |
1783 over-aligning structures, since most compilers do not do this | |
1784 alignment. */ | |
111 | 1785 if (TYPE_MODE (type) != BLKmode |
1786 && TYPE_MODE (type) != VOIDmode | |
1787 && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type))) | |
0 | 1788 { |
1789 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type)); | |
1790 | |
1791 /* Don't override a larger alignment requirement coming from a user | |
1792 alignment of one of the fields. */ | |
1793 if (mode_align >= TYPE_ALIGN (type)) | |
1794 { | |
111 | 1795 SET_TYPE_ALIGN (type, mode_align); |
0 | 1796 TYPE_USER_ALIGN (type) = 0; |
1797 } | |
1798 } | |
1799 | |
1800 /* Do machine-dependent extra alignment. */ | |
1801 #ifdef ROUND_TYPE_ALIGN | |
111 | 1802 SET_TYPE_ALIGN (type, |
1803 ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT)); | |
0 | 1804 #endif |
1805 | |
1806 /* If we failed to find a simple way to calculate the unit size | |
1807 of the type, find it by division. */ | |
1808 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0) | |
1809 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the | |
1810 result will fit in sizetype. We will get more efficient code using | |
1811 sizetype, so we force a conversion. */ | |
1812 TYPE_SIZE_UNIT (type) | |
1813 = fold_convert (sizetype, | |
1814 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type), | |
1815 bitsize_unit_node)); | |
1816 | |
1817 if (TYPE_SIZE (type) != 0) | |
1818 { | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1819 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type)); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1820 TYPE_SIZE_UNIT (type) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1821 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type)); |
0 | 1822 } |
1823 | |
1824 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */ | |
1825 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) | |
1826 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type)); | |
1827 if (TYPE_SIZE_UNIT (type) != 0 | |
1828 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST) | |
1829 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type)); | |
1830 | |
1831 /* Also layout any other variants of the type. */ | |
1832 if (TYPE_NEXT_VARIANT (type) | |
1833 || type != TYPE_MAIN_VARIANT (type)) | |
1834 { | |
1835 tree variant; | |
1836 /* Record layout info of this variant. */ | |
1837 tree size = TYPE_SIZE (type); | |
1838 tree size_unit = TYPE_SIZE_UNIT (type); | |
1839 unsigned int align = TYPE_ALIGN (type); | |
111 | 1840 unsigned int precision = TYPE_PRECISION (type); |
0 | 1841 unsigned int user_align = TYPE_USER_ALIGN (type); |
111 | 1842 machine_mode mode = TYPE_MODE (type); |
0 | 1843 |
1844 /* Copy it into all variants. */ | |
1845 for (variant = TYPE_MAIN_VARIANT (type); | |
1846 variant != 0; | |
1847 variant = TYPE_NEXT_VARIANT (variant)) | |
1848 { | |
1849 TYPE_SIZE (variant) = size; | |
1850 TYPE_SIZE_UNIT (variant) = size_unit; | |
111 | 1851 unsigned valign = align; |
1852 if (TYPE_USER_ALIGN (variant)) | |
1853 valign = MAX (valign, TYPE_ALIGN (variant)); | |
1854 else | |
1855 TYPE_USER_ALIGN (variant) = user_align; | |
1856 SET_TYPE_ALIGN (variant, valign); | |
1857 TYPE_PRECISION (variant) = precision; | |
0 | 1858 SET_TYPE_MODE (variant, mode); |
1859 } | |
1860 } | |
1861 } | |
1862 | |
111 | 1863 /* Return a new underlying object for a bitfield started with FIELD. */ |
1864 | |
1865 static tree | |
1866 start_bitfield_representative (tree field) | |
1867 { | |
1868 tree repr = make_node (FIELD_DECL); | |
1869 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field); | |
1870 /* Force the representative to begin at a BITS_PER_UNIT aligned | |
1871 boundary - C++ may use tail-padding of a base object to | |
1872 continue packing bits so the bitfield region does not start | |
1873 at bit zero (see g++.dg/abi/bitfield5.C for example). | |
1874 Unallocated bits may happen for other reasons as well, | |
1875 for example Ada which allows explicit bit-granular structure layout. */ | |
1876 DECL_FIELD_BIT_OFFSET (repr) | |
1877 = size_binop (BIT_AND_EXPR, | |
1878 DECL_FIELD_BIT_OFFSET (field), | |
1879 bitsize_int (~(BITS_PER_UNIT - 1))); | |
1880 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field)); | |
1881 DECL_SIZE (repr) = DECL_SIZE (field); | |
1882 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field); | |
1883 DECL_PACKED (repr) = DECL_PACKED (field); | |
1884 DECL_CONTEXT (repr) = DECL_CONTEXT (field); | |
1885 /* There are no indirect accesses to this field. If we introduce | |
1886 some then they have to use the record alias set. This makes | |
1887 sure to properly conflict with [indirect] accesses to addressable | |
1888 fields of the bitfield group. */ | |
1889 DECL_NONADDRESSABLE_P (repr) = 1; | |
1890 return repr; | |
1891 } | |
1892 | |
1893 /* Finish up a bitfield group that was started by creating the underlying | |
1894 object REPR with the last field in the bitfield group FIELD. */ | |
1895 | |
1896 static void | |
1897 finish_bitfield_representative (tree repr, tree field) | |
1898 { | |
1899 unsigned HOST_WIDE_INT bitsize, maxbitsize; | |
1900 tree nextf, size; | |
1901 | |
1902 size = size_diffop (DECL_FIELD_OFFSET (field), | |
1903 DECL_FIELD_OFFSET (repr)); | |
1904 while (TREE_CODE (size) == COMPOUND_EXPR) | |
1905 size = TREE_OPERAND (size, 1); | |
1906 gcc_assert (tree_fits_uhwi_p (size)); | |
1907 bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT | |
1908 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) | |
1909 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)) | |
1910 + tree_to_uhwi (DECL_SIZE (field))); | |
1911 | |
1912 /* Round up bitsize to multiples of BITS_PER_UNIT. */ | |
1913 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); | |
1914 | |
1915 /* Now nothing tells us how to pad out bitsize ... */ | |
1916 nextf = DECL_CHAIN (field); | |
1917 while (nextf && TREE_CODE (nextf) != FIELD_DECL) | |
1918 nextf = DECL_CHAIN (nextf); | |
1919 if (nextf) | |
1920 { | |
1921 tree maxsize; | |
1922 /* If there was an error, the field may be not laid out | |
1923 correctly. Don't bother to do anything. */ | |
1924 if (TREE_TYPE (nextf) == error_mark_node) | |
1925 return; | |
1926 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf), | |
1927 DECL_FIELD_OFFSET (repr)); | |
1928 if (tree_fits_uhwi_p (maxsize)) | |
1929 { | |
1930 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT | |
1931 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf)) | |
1932 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); | |
1933 /* If the group ends within a bitfield nextf does not need to be | |
1934 aligned to BITS_PER_UNIT. Thus round up. */ | |
1935 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); | |
1936 } | |
1937 else | |
1938 maxbitsize = bitsize; | |
1939 } | |
1940 else | |
1941 { | |
1942 /* Note that if the C++ FE sets up tail-padding to be re-used it | |
1943 creates a as-base variant of the type with TYPE_SIZE adjusted | |
1944 accordingly. So it is safe to include tail-padding here. */ | |
1945 tree aggsize = lang_hooks.types.unit_size_without_reusable_padding | |
1946 (DECL_CONTEXT (field)); | |
1947 tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr)); | |
1948 /* We cannot generally rely on maxsize to fold to an integer constant, | |
1949 so use bitsize as fallback for this case. */ | |
1950 if (tree_fits_uhwi_p (maxsize)) | |
1951 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT | |
1952 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); | |
1953 else | |
1954 maxbitsize = bitsize; | |
1955 } | |
1956 | |
1957 /* Only if we don't artificially break up the representative in | |
1958 the middle of a large bitfield with different possibly | |
1959 overlapping representatives. And all representatives start | |
1960 at byte offset. */ | |
1961 gcc_assert (maxbitsize % BITS_PER_UNIT == 0); | |
1962 | |
1963 /* Find the smallest nice mode to use. */ | |
1964 opt_scalar_int_mode mode_iter; | |
1965 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT) | |
1966 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize) | |
1967 break; | |
1968 | |
1969 scalar_int_mode mode; | |
1970 if (!mode_iter.exists (&mode) | |
1971 || GET_MODE_BITSIZE (mode) > maxbitsize | |
1972 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE) | |
1973 { | |
1974 /* We really want a BLKmode representative only as a last resort, | |
1975 considering the member b in | |
1976 struct { int a : 7; int b : 17; int c; } __attribute__((packed)); | |
1977 Otherwise we simply want to split the representative up | |
1978 allowing for overlaps within the bitfield region as required for | |
1979 struct { int a : 7; int b : 7; | |
1980 int c : 10; int d; } __attribute__((packed)); | |
1981 [0, 15] HImode for a and b, [8, 23] HImode for c. */ | |
1982 DECL_SIZE (repr) = bitsize_int (bitsize); | |
1983 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT); | |
1984 SET_DECL_MODE (repr, BLKmode); | |
1985 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node, | |
1986 bitsize / BITS_PER_UNIT); | |
1987 } | |
1988 else | |
1989 { | |
1990 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode); | |
1991 DECL_SIZE (repr) = bitsize_int (modesize); | |
1992 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT); | |
1993 SET_DECL_MODE (repr, mode); | |
1994 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1); | |
1995 } | |
1996 | |
1997 /* Remember whether the bitfield group is at the end of the | |
1998 structure or not. */ | |
1999 DECL_CHAIN (repr) = nextf; | |
2000 } | |
2001 | |
2002 /* Compute and set FIELD_DECLs for the underlying objects we should | |
2003 use for bitfield access for the structure T. */ | |
2004 | |
2005 void | |
2006 finish_bitfield_layout (tree t) | |
2007 { | |
2008 tree field, prev; | |
2009 tree repr = NULL_TREE; | |
2010 | |
2011 /* Unions would be special, for the ease of type-punning optimizations | |
2012 we could use the underlying type as hint for the representative | |
2013 if the bitfield would fit and the representative would not exceed | |
2014 the union in size. */ | |
2015 if (TREE_CODE (t) != RECORD_TYPE) | |
2016 return; | |
2017 | |
2018 for (prev = NULL_TREE, field = TYPE_FIELDS (t); | |
2019 field; field = DECL_CHAIN (field)) | |
2020 { | |
2021 if (TREE_CODE (field) != FIELD_DECL) | |
2022 continue; | |
2023 | |
2024 /* In the C++ memory model, consecutive bit fields in a structure are | |
2025 considered one memory location and updating a memory location | |
2026 may not store into adjacent memory locations. */ | |
2027 if (!repr | |
2028 && DECL_BIT_FIELD_TYPE (field)) | |
2029 { | |
2030 /* Start new representative. */ | |
2031 repr = start_bitfield_representative (field); | |
2032 } | |
2033 else if (repr | |
2034 && ! DECL_BIT_FIELD_TYPE (field)) | |
2035 { | |
2036 /* Finish off new representative. */ | |
2037 finish_bitfield_representative (repr, prev); | |
2038 repr = NULL_TREE; | |
2039 } | |
2040 else if (DECL_BIT_FIELD_TYPE (field)) | |
2041 { | |
2042 gcc_assert (repr != NULL_TREE); | |
2043 | |
2044 /* Zero-size bitfields finish off a representative and | |
2045 do not have a representative themselves. This is | |
2046 required by the C++ memory model. */ | |
2047 if (integer_zerop (DECL_SIZE (field))) | |
2048 { | |
2049 finish_bitfield_representative (repr, prev); | |
2050 repr = NULL_TREE; | |
2051 } | |
2052 | |
2053 /* We assume that either DECL_FIELD_OFFSET of the representative | |
2054 and each bitfield member is a constant or they are equal. | |
2055 This is because we need to be able to compute the bit-offset | |
2056 of each field relative to the representative in get_bit_range | |
2057 during RTL expansion. | |
2058 If these constraints are not met, simply force a new | |
2059 representative to be generated. That will at most | |
2060 generate worse code but still maintain correctness with | |
2061 respect to the C++ memory model. */ | |
2062 else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)) | |
2063 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) | |
2064 || operand_equal_p (DECL_FIELD_OFFSET (repr), | |
2065 DECL_FIELD_OFFSET (field), 0))) | |
2066 { | |
2067 finish_bitfield_representative (repr, prev); | |
2068 repr = start_bitfield_representative (field); | |
2069 } | |
2070 } | |
2071 else | |
2072 continue; | |
2073 | |
2074 if (repr) | |
2075 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr; | |
2076 | |
2077 prev = field; | |
2078 } | |
2079 | |
2080 if (repr) | |
2081 finish_bitfield_representative (repr, prev); | |
2082 } | |
2083 | |
0 | 2084 /* Do all of the work required to layout the type indicated by RLI, |
2085 once the fields have been laid out. This function will call `free' | |
2086 for RLI, unless FREE_P is false. Passing a value other than false | |
2087 for FREE_P is bad practice; this option only exists to support the | |
2088 G++ 3.2 ABI. */ | |
2089 | |
2090 void | |
2091 finish_record_layout (record_layout_info rli, int free_p) | |
2092 { | |
2093 tree variant; | |
2094 | |
2095 /* Compute the final size. */ | |
2096 finalize_record_size (rli); | |
2097 | |
2098 /* Compute the TYPE_MODE for the record. */ | |
2099 compute_record_mode (rli->t); | |
2100 | |
2101 /* Perform any last tweaks to the TYPE_SIZE, etc. */ | |
2102 finalize_type_size (rli->t); | |
2103 | |
111 | 2104 /* Compute bitfield representatives. */ |
2105 finish_bitfield_layout (rli->t); | |
2106 | |
2107 /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants. | |
2108 With C++ templates, it is too early to do this when the attribute | |
2109 is being parsed. */ | |
0 | 2110 for (variant = TYPE_NEXT_VARIANT (rli->t); variant; |
2111 variant = TYPE_NEXT_VARIANT (variant)) | |
111 | 2112 { |
2113 TYPE_PACKED (variant) = TYPE_PACKED (rli->t); | |
2114 TYPE_REVERSE_STORAGE_ORDER (variant) | |
2115 = TYPE_REVERSE_STORAGE_ORDER (rli->t); | |
2116 } | |
0 | 2117 |
2118 /* Lay out any static members. This is done now because their type | |
2119 may use the record's type. */ | |
111 | 2120 while (!vec_safe_is_empty (rli->pending_statics)) |
2121 layout_decl (rli->pending_statics->pop (), 0); | |
0 | 2122 |
2123 /* Clean up. */ | |
2124 if (free_p) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2125 { |
111 | 2126 vec_free (rli->pending_statics); |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2127 free (rli); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2128 } |
0 | 2129 } |
2130 | |
2131 | |
2132 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is | |
2133 NAME, its fields are chained in reverse on FIELDS. | |
2134 | |
2135 If ALIGN_TYPE is non-null, it is given the same alignment as | |
2136 ALIGN_TYPE. */ | |
2137 | |
2138 void | |
2139 finish_builtin_struct (tree type, const char *name, tree fields, | |
2140 tree align_type) | |
2141 { | |
2142 tree tail, next; | |
2143 | |
2144 for (tail = NULL_TREE; fields; tail = fields, fields = next) | |
2145 { | |
2146 DECL_FIELD_CONTEXT (fields) = type; | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2147 next = DECL_CHAIN (fields); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2148 DECL_CHAIN (fields) = tail; |
0 | 2149 } |
2150 TYPE_FIELDS (type) = tail; | |
2151 | |
2152 if (align_type) | |
2153 { | |
111 | 2154 SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type)); |
0 | 2155 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type); |
111 | 2156 SET_TYPE_WARN_IF_NOT_ALIGN (type, |
2157 TYPE_WARN_IF_NOT_ALIGN (align_type)); | |
0 | 2158 } |
2159 | |
2160 layout_type (type); | |
2161 #if 0 /* not yet, should get fixed properly later */ | |
2162 TYPE_NAME (type) = make_type_decl (get_identifier (name), type); | |
2163 #else | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2164 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION, |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2165 TYPE_DECL, get_identifier (name), type); |
0 | 2166 #endif |
2167 TYPE_STUB_DECL (type) = TYPE_NAME (type); | |
2168 layout_decl (TYPE_NAME (type), 0); | |
2169 } | |
2170 | |
2171 /* Calculate the mode, size, and alignment for TYPE. | |
2172 For an array type, calculate the element separation as well. | |
2173 Record TYPE on the chain of permanent or temporary types | |
2174 so that dbxout will find out about it. | |
2175 | |
2176 TYPE_SIZE of a type is nonzero if the type has been laid out already. | |
2177 layout_type does nothing on such a type. | |
2178 | |
2179 If the type is incomplete, its TYPE_SIZE remains zero. */ | |
2180 | |
2181 void | |
2182 layout_type (tree type) | |
2183 { | |
2184 gcc_assert (type); | |
2185 | |
2186 if (type == error_mark_node) | |
2187 return; | |
2188 | |
111 | 2189 /* We don't want finalize_type_size to copy an alignment attribute to |
2190 variants that don't have it. */ | |
2191 type = TYPE_MAIN_VARIANT (type); | |
2192 | |
0 | 2193 /* Do nothing if type has been laid out before. */ |
2194 if (TYPE_SIZE (type)) | |
2195 return; | |
2196 | |
2197 switch (TREE_CODE (type)) | |
2198 { | |
2199 case LANG_TYPE: | |
2200 /* This kind of type is the responsibility | |
2201 of the language-specific code. */ | |
2202 gcc_unreachable (); | |
2203 | |
111 | 2204 case BOOLEAN_TYPE: |
0 | 2205 case INTEGER_TYPE: |
2206 case ENUMERAL_TYPE: | |
111 | 2207 { |
2208 scalar_int_mode mode | |
2209 = smallest_int_mode_for_size (TYPE_PRECISION (type)); | |
2210 SET_TYPE_MODE (type, mode); | |
2211 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); | |
2212 /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */ | |
2213 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); | |
2214 break; | |
2215 } | |
0 | 2216 |
2217 case REAL_TYPE: | |
111 | 2218 { |
2219 /* Allow the caller to choose the type mode, which is how decimal | |
2220 floats are distinguished from binary ones. */ | |
2221 if (TYPE_MODE (type) == VOIDmode) | |
2222 SET_TYPE_MODE | |
2223 (type, float_mode_for_size (TYPE_PRECISION (type)).require ()); | |
2224 scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type)); | |
2225 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); | |
2226 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); | |
2227 break; | |
2228 } | |
0 | 2229 |
2230 case FIXED_POINT_TYPE: | |
111 | 2231 { |
2232 /* TYPE_MODE (type) has been set already. */ | |
2233 scalar_mode mode = SCALAR_TYPE_MODE (type); | |
2234 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); | |
2235 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); | |
2236 break; | |
2237 } | |
0 | 2238 |
2239 case COMPLEX_TYPE: | |
2240 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); | |
2241 SET_TYPE_MODE (type, | |
111 | 2242 GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type)))); |
2243 | |
0 | 2244 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); |
2245 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); | |
2246 break; | |
2247 | |
2248 case VECTOR_TYPE: | |
2249 { | |
2250 int nunits = TYPE_VECTOR_SUBPARTS (type); | |
2251 tree innertype = TREE_TYPE (type); | |
2252 | |
2253 gcc_assert (!(nunits & (nunits - 1))); | |
2254 | |
2255 /* Find an appropriate mode for the vector type. */ | |
2256 if (TYPE_MODE (type) == VOIDmode) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2257 SET_TYPE_MODE (type, |
111 | 2258 mode_for_vector (SCALAR_TYPE_MODE (innertype), |
2259 nunits).else_blk ()); | |
0 | 2260 |
2261 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type)); | |
2262 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); | |
111 | 2263 /* Several boolean vector elements may fit in a single unit. */ |
2264 if (VECTOR_BOOLEAN_TYPE_P (type) | |
2265 && type->type_common.mode != BLKmode) | |
2266 TYPE_SIZE_UNIT (type) | |
2267 = size_int (GET_MODE_SIZE (type->type_common.mode)); | |
2268 else | |
2269 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR, | |
2270 TYPE_SIZE_UNIT (innertype), | |
2271 size_int (nunits)); | |
2272 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, | |
2273 TYPE_SIZE (innertype), | |
2274 bitsize_int (nunits)); | |
2275 | |
2276 /* For vector types, we do not default to the mode's alignment. | |
2277 Instead, query a target hook, defaulting to natural alignment. | |
2278 This prevents ABI changes depending on whether or not native | |
2279 vector modes are supported. */ | |
2280 SET_TYPE_ALIGN (type, targetm.vector_alignment (type)); | |
2281 | |
2282 /* However, if the underlying mode requires a bigger alignment than | |
2283 what the target hook provides, we cannot use the mode. For now, | |
2284 simply reject that case. */ | |
2285 gcc_assert (TYPE_ALIGN (type) | |
2286 >= GET_MODE_ALIGNMENT (TYPE_MODE (type))); | |
0 | 2287 break; |
2288 } | |
2289 | |
2290 case VOID_TYPE: | |
2291 /* This is an incomplete type and so doesn't have a size. */ | |
111 | 2292 SET_TYPE_ALIGN (type, 1); |
0 | 2293 TYPE_USER_ALIGN (type) = 0; |
2294 SET_TYPE_MODE (type, VOIDmode); | |
2295 break; | |
2296 | |
111 | 2297 case POINTER_BOUNDS_TYPE: |
2298 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); | |
2299 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); | |
2300 break; | |
2301 | |
0 | 2302 case OFFSET_TYPE: |
2303 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE); | |
111 | 2304 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS); |
2305 /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be | |
2306 integral, which may be an __intN. */ | |
2307 SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ()); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2308 TYPE_PRECISION (type) = POINTER_SIZE; |
0 | 2309 break; |
2310 | |
2311 case FUNCTION_TYPE: | |
2312 case METHOD_TYPE: | |
2313 /* It's hard to see what the mode and size of a function ought to | |
2314 be, but we do know the alignment is FUNCTION_BOUNDARY, so | |
2315 make it consistent with that. */ | |
111 | 2316 SET_TYPE_MODE (type, |
2317 int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ()); | |
0 | 2318 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY); |
2319 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); | |
2320 break; | |
2321 | |
2322 case POINTER_TYPE: | |
2323 case REFERENCE_TYPE: | |
2324 { | |
111 | 2325 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2326 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); |
0 | 2327 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); |
2328 TYPE_UNSIGNED (type) = 1; | |
111 | 2329 TYPE_PRECISION (type) = GET_MODE_PRECISION (mode); |
0 | 2330 } |
2331 break; | |
2332 | |
2333 case ARRAY_TYPE: | |
2334 { | |
2335 tree index = TYPE_DOMAIN (type); | |
2336 tree element = TREE_TYPE (type); | |
2337 | |
2338 /* We need to know both bounds in order to compute the size. */ | |
2339 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index) | |
2340 && TYPE_SIZE (element)) | |
2341 { | |
2342 tree ub = TYPE_MAX_VALUE (index); | |
2343 tree lb = TYPE_MIN_VALUE (index); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2344 tree element_size = TYPE_SIZE (element); |
0 | 2345 tree length; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2346 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2347 /* Make sure that an array of zero-sized element is zero-sized |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2348 regardless of its extent. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2349 if (integer_zerop (element_size)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2350 length = size_zero_node; |
0 | 2351 |
111 | 2352 /* The computation should happen in the original signedness so |
2353 that (possible) negative values are handled appropriately | |
2354 when determining overflow. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2355 else |
111 | 2356 { |
2357 /* ??? When it is obvious that the range is signed | |
2358 represent it using ssizetype. */ | |
2359 if (TREE_CODE (lb) == INTEGER_CST | |
2360 && TREE_CODE (ub) == INTEGER_CST | |
2361 && TYPE_UNSIGNED (TREE_TYPE (lb)) | |
2362 && tree_int_cst_lt (ub, lb)) | |
2363 { | |
2364 lb = wide_int_to_tree (ssizetype, | |
2365 offset_int::from (wi::to_wide (lb), | |
2366 SIGNED)); | |
2367 ub = wide_int_to_tree (ssizetype, | |
2368 offset_int::from (wi::to_wide (ub), | |
2369 SIGNED)); | |
2370 } | |
2371 length | |
2372 = fold_convert (sizetype, | |
2373 size_binop (PLUS_EXPR, | |
2374 build_int_cst (TREE_TYPE (lb), 1), | |
2375 size_binop (MINUS_EXPR, ub, lb))); | |
2376 } | |
2377 | |
2378 /* ??? We have no way to distinguish a null-sized array from an | |
2379 array spanning the whole sizetype range, so we arbitrarily | |
2380 decide that [0, -1] is the only valid representation. */ | |
2381 if (integer_zerop (length) | |
2382 && TREE_OVERFLOW (length) | |
2383 && integer_zerop (lb)) | |
2384 length = size_zero_node; | |
0 | 2385 |
2386 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size, | |
2387 fold_convert (bitsizetype, | |
2388 length)); | |
2389 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2390 /* If we know the size of the element, calculate the total size |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2391 directly, rather than do some division thing below. This |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2392 optimization helps Fortran assumed-size arrays (where the |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2393 size of the array is determined at runtime) substantially. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2394 if (TYPE_SIZE_UNIT (element)) |
0 | 2395 TYPE_SIZE_UNIT (type) |
2396 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length); | |
2397 } | |
2398 | |
2399 /* Now round the alignment and size, | |
2400 using machine-dependent criteria if any. */ | |
2401 | |
111 | 2402 unsigned align = TYPE_ALIGN (element); |
2403 if (TYPE_USER_ALIGN (type)) | |
2404 align = MAX (align, TYPE_ALIGN (type)); | |
2405 else | |
2406 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element); | |
2407 if (!TYPE_WARN_IF_NOT_ALIGN (type)) | |
2408 SET_TYPE_WARN_IF_NOT_ALIGN (type, | |
2409 TYPE_WARN_IF_NOT_ALIGN (element)); | |
0 | 2410 #ifdef ROUND_TYPE_ALIGN |
111 | 2411 align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT); |
0 | 2412 #else |
111 | 2413 align = MAX (align, BITS_PER_UNIT); |
0 | 2414 #endif |
111 | 2415 SET_TYPE_ALIGN (type, align); |
0 | 2416 SET_TYPE_MODE (type, BLKmode); |
2417 if (TYPE_SIZE (type) != 0 | |
111 | 2418 && ! targetm.member_type_forces_blk (type, VOIDmode) |
0 | 2419 /* BLKmode elements force BLKmode aggregate; |
2420 else extract/store fields may lose. */ | |
2421 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode | |
2422 || TYPE_NO_FORCE_BLK (TREE_TYPE (type)))) | |
2423 { | |
111 | 2424 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type), |
2425 TYPE_SIZE (type))); | |
0 | 2426 if (TYPE_MODE (type) != BLKmode |
2427 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT | |
2428 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))) | |
2429 { | |
2430 TYPE_NO_FORCE_BLK (type) = 1; | |
2431 SET_TYPE_MODE (type, BLKmode); | |
2432 } | |
2433 } | |
111 | 2434 if (AGGREGATE_TYPE_P (element)) |
2435 TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element); | |
0 | 2436 /* When the element size is constant, check that it is at least as |
2437 large as the element alignment. */ | |
2438 if (TYPE_SIZE_UNIT (element) | |
2439 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST | |
2440 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than | |
2441 TYPE_ALIGN_UNIT. */ | |
2442 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element)) | |
2443 && !integer_zerop (TYPE_SIZE_UNIT (element)) | |
2444 && compare_tree_int (TYPE_SIZE_UNIT (element), | |
2445 TYPE_ALIGN_UNIT (element)) < 0) | |
2446 error ("alignment of array elements is greater than element size"); | |
2447 break; | |
2448 } | |
2449 | |
2450 case RECORD_TYPE: | |
2451 case UNION_TYPE: | |
2452 case QUAL_UNION_TYPE: | |
2453 { | |
2454 tree field; | |
2455 record_layout_info rli; | |
2456 | |
2457 /* Initialize the layout information. */ | |
2458 rli = start_record_layout (type); | |
2459 | |
2460 /* If this is a QUAL_UNION_TYPE, we want to process the fields | |
2461 in the reverse order in building the COND_EXPR that denotes | |
2462 its size. We reverse them again later. */ | |
2463 if (TREE_CODE (type) == QUAL_UNION_TYPE) | |
2464 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); | |
2465 | |
2466 /* Place all the fields. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2467 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
0 | 2468 place_field (rli, field); |
2469 | |
2470 if (TREE_CODE (type) == QUAL_UNION_TYPE) | |
2471 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); | |
2472 | |
2473 /* Finish laying out the record. */ | |
2474 finish_record_layout (rli, /*free_p=*/true); | |
2475 } | |
2476 break; | |
2477 | |
2478 default: | |
2479 gcc_unreachable (); | |
2480 } | |
2481 | |
2482 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For | |
2483 records and unions, finish_record_layout already called this | |
2484 function. */ | |
111 | 2485 if (!RECORD_OR_UNION_TYPE_P (type)) |
0 | 2486 finalize_type_size (type); |
2487 | |
2488 /* We should never see alias sets on incomplete aggregates. And we | |
2489 should not call layout_type on not incomplete aggregates. */ | |
2490 if (AGGREGATE_TYPE_P (type)) | |
2491 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type)); | |
2492 } | |
2493 | |
111 | 2494 /* Return the least alignment required for type TYPE. */ |
2495 | |
2496 unsigned int | |
2497 min_align_of_type (tree type) | |
0 | 2498 { |
111 | 2499 unsigned int align = TYPE_ALIGN (type); |
2500 if (!TYPE_USER_ALIGN (type)) | |
0 | 2501 { |
111 | 2502 align = MIN (align, BIGGEST_ALIGNMENT); |
2503 #ifdef BIGGEST_FIELD_ALIGNMENT | |
2504 align = MIN (align, BIGGEST_FIELD_ALIGNMENT); | |
2505 #endif | |
2506 unsigned int field_align = align; | |
2507 #ifdef ADJUST_FIELD_ALIGN | |
2508 field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align); | |
2509 #endif | |
2510 align = MIN (align, field_align); | |
0 | 2511 } |
111 | 2512 return align / BITS_PER_UNIT; |
0 | 2513 } |
2514 | |
2515 /* Create and return a type for signed integers of PRECISION bits. */ | |
2516 | |
2517 tree | |
2518 make_signed_type (int precision) | |
2519 { | |
2520 tree type = make_node (INTEGER_TYPE); | |
2521 | |
2522 TYPE_PRECISION (type) = precision; | |
2523 | |
2524 fixup_signed_type (type); | |
2525 return type; | |
2526 } | |
2527 | |
2528 /* Create and return a type for unsigned integers of PRECISION bits. */ | |
2529 | |
2530 tree | |
2531 make_unsigned_type (int precision) | |
2532 { | |
2533 tree type = make_node (INTEGER_TYPE); | |
2534 | |
2535 TYPE_PRECISION (type) = precision; | |
2536 | |
2537 fixup_unsigned_type (type); | |
2538 return type; | |
2539 } | |
2540 | |
2541 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP, | |
2542 and SATP. */ | |
2543 | |
2544 tree | |
2545 make_fract_type (int precision, int unsignedp, int satp) | |
2546 { | |
2547 tree type = make_node (FIXED_POINT_TYPE); | |
2548 | |
2549 TYPE_PRECISION (type) = precision; | |
2550 | |
2551 if (satp) | |
2552 TYPE_SATURATING (type) = 1; | |
2553 | |
2554 /* Lay out the type: set its alignment, size, etc. */ | |
111 | 2555 TYPE_UNSIGNED (type) = unsignedp; |
2556 enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT; | |
2557 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ()); | |
0 | 2558 layout_type (type); |
2559 | |
2560 return type; | |
2561 } | |
2562 | |
2563 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP, | |
2564 and SATP. */ | |
2565 | |
2566 tree | |
2567 make_accum_type (int precision, int unsignedp, int satp) | |
2568 { | |
2569 tree type = make_node (FIXED_POINT_TYPE); | |
2570 | |
2571 TYPE_PRECISION (type) = precision; | |
2572 | |
2573 if (satp) | |
2574 TYPE_SATURATING (type) = 1; | |
2575 | |
2576 /* Lay out the type: set its alignment, size, etc. */ | |
111 | 2577 TYPE_UNSIGNED (type) = unsignedp; |
2578 enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM; | |
2579 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ()); | |
0 | 2580 layout_type (type); |
2581 | |
2582 return type; | |
2583 } | |
2584 | |
111 | 2585 /* Initialize sizetypes so layout_type can use them. */ |
0 | 2586 |
2587 void | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2588 initialize_sizetypes (void) |
0 | 2589 { |
111 | 2590 int precision, bprecision; |
2591 | |
2592 /* Get sizetypes precision from the SIZE_TYPE target macro. */ | |
2593 if (strcmp (SIZETYPE, "unsigned int") == 0) | |
2594 precision = INT_TYPE_SIZE; | |
2595 else if (strcmp (SIZETYPE, "long unsigned int") == 0) | |
2596 precision = LONG_TYPE_SIZE; | |
2597 else if (strcmp (SIZETYPE, "long long unsigned int") == 0) | |
2598 precision = LONG_LONG_TYPE_SIZE; | |
2599 else if (strcmp (SIZETYPE, "short unsigned int") == 0) | |
2600 precision = SHORT_TYPE_SIZE; | |
2601 else | |
2602 { | |
2603 int i; | |
2604 | |
2605 precision = -1; | |
2606 for (i = 0; i < NUM_INT_N_ENTS; i++) | |
2607 if (int_n_enabled_p[i]) | |
2608 { | |
2609 char name[50]; | |
2610 sprintf (name, "__int%d unsigned", int_n_data[i].bitsize); | |
2611 | |
2612 if (strcmp (name, SIZETYPE) == 0) | |
2613 { | |
2614 precision = int_n_data[i].bitsize; | |
2615 } | |
2616 } | |
2617 if (precision == -1) | |
2618 gcc_unreachable (); | |
2619 } | |
2620 | |
2621 bprecision | |
2622 = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE); | |
2623 bprecision = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision)); | |
2624 if (bprecision > HOST_BITS_PER_DOUBLE_INT) | |
2625 bprecision = HOST_BITS_PER_DOUBLE_INT; | |
2626 | |
2627 /* Create stubs for sizetype and bitsizetype so we can create constants. */ | |
2628 sizetype = make_node (INTEGER_TYPE); | |
2629 TYPE_NAME (sizetype) = get_identifier ("sizetype"); | |
2630 TYPE_PRECISION (sizetype) = precision; | |
2631 TYPE_UNSIGNED (sizetype) = 1; | |
2632 bitsizetype = make_node (INTEGER_TYPE); | |
2633 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype"); | |
2634 TYPE_PRECISION (bitsizetype) = bprecision; | |
2635 TYPE_UNSIGNED (bitsizetype) = 1; | |
2636 | |
2637 /* Now layout both types manually. */ | |
2638 scalar_int_mode mode = smallest_int_mode_for_size (precision); | |
2639 SET_TYPE_MODE (sizetype, mode); | |
2640 SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype))); | |
2641 TYPE_SIZE (sizetype) = bitsize_int (precision); | |
2642 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode)); | |
2643 set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED); | |
2644 | |
2645 mode = smallest_int_mode_for_size (bprecision); | |
2646 SET_TYPE_MODE (bitsizetype, mode); | |
2647 SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype))); | |
2648 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision); | |
2649 TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode)); | |
2650 set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED); | |
0 | 2651 |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2652 /* Create the signed variants of *sizetype. */ |
111 | 2653 ssizetype = make_signed_type (TYPE_PRECISION (sizetype)); |
2654 TYPE_NAME (ssizetype) = get_identifier ("ssizetype"); | |
2655 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype)); | |
2656 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype"); | |
0 | 2657 } |
2658 | |
2659 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE | |
2660 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE | |
2661 for TYPE, based on the PRECISION and whether or not the TYPE | |
2662 IS_UNSIGNED. PRECISION need not correspond to a width supported | |
2663 natively by the hardware; for example, on a machine with 8-bit, | |
2664 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or | |
2665 61. */ | |
2666 | |
2667 void | |
2668 set_min_and_max_values_for_integral_type (tree type, | |
2669 int precision, | |
111 | 2670 signop sgn) |
0 | 2671 { |
111 | 2672 /* For bitfields with zero width we end up creating integer types |
2673 with zero precision. Don't assign any minimum/maximum values | |
2674 to those types, they don't have any valid value. */ | |
2675 if (precision < 1) | |
2676 return; | |
2677 | |
2678 TYPE_MIN_VALUE (type) | |
2679 = wide_int_to_tree (type, wi::min_value (precision, sgn)); | |
2680 TYPE_MAX_VALUE (type) | |
2681 = wide_int_to_tree (type, wi::max_value (precision, sgn)); | |
0 | 2682 } |
2683 | |
2684 /* Set the extreme values of TYPE based on its precision in bits, | |
2685 then lay it out. Used when make_signed_type won't do | |
111 | 2686 because the tree code is not INTEGER_TYPE. */ |
0 | 2687 |
2688 void | |
2689 fixup_signed_type (tree type) | |
2690 { | |
2691 int precision = TYPE_PRECISION (type); | |
2692 | |
111 | 2693 set_min_and_max_values_for_integral_type (type, precision, SIGNED); |
0 | 2694 |
2695 /* Lay out the type: set its alignment, size, etc. */ | |
2696 layout_type (type); | |
2697 } | |
2698 | |
2699 /* Set the extreme values of TYPE based on its precision in bits, | |
2700 then lay it out. This is used both in `make_unsigned_type' | |
2701 and for enumeral types. */ | |
2702 | |
2703 void | |
2704 fixup_unsigned_type (tree type) | |
2705 { | |
2706 int precision = TYPE_PRECISION (type); | |
2707 | |
2708 TYPE_UNSIGNED (type) = 1; | |
2709 | |
111 | 2710 set_min_and_max_values_for_integral_type (type, precision, UNSIGNED); |
0 | 2711 |
2712 /* Lay out the type: set its alignment, size, etc. */ | |
2713 layout_type (type); | |
2714 } | |
2715 | |
111 | 2716 /* Construct an iterator for a bitfield that spans BITSIZE bits, |
2717 starting at BITPOS. | |
2718 | |
2719 BITREGION_START is the bit position of the first bit in this | |
2720 sequence of bit fields. BITREGION_END is the last bit in this | |
2721 sequence. If these two fields are non-zero, we should restrict the | |
2722 memory access to that range. Otherwise, we are allowed to touch | |
2723 any adjacent non bit-fields. | |
2724 | |
2725 ALIGN is the alignment of the underlying object in bits. | |
2726 VOLATILEP says whether the bitfield is volatile. */ | |
2727 | |
2728 bit_field_mode_iterator | |
2729 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, | |
2730 HOST_WIDE_INT bitregion_start, | |
2731 HOST_WIDE_INT bitregion_end, | |
2732 unsigned int align, bool volatilep) | |
2733 : m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize), | |
2734 m_bitpos (bitpos), m_bitregion_start (bitregion_start), | |
2735 m_bitregion_end (bitregion_end), m_align (align), | |
2736 m_volatilep (volatilep), m_count (0) | |
2737 { | |
2738 if (!m_bitregion_end) | |
2739 { | |
2740 /* We can assume that any aligned chunk of ALIGN bits that overlaps | |
2741 the bitfield is mapped and won't trap, provided that ALIGN isn't | |
2742 too large. The cap is the biggest required alignment for data, | |
2743 or at least the word size. And force one such chunk at least. */ | |
2744 unsigned HOST_WIDE_INT units | |
2745 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD)); | |
2746 if (bitsize <= 0) | |
2747 bitsize = 1; | |
2748 m_bitregion_end = bitpos + bitsize + units - 1; | |
2749 m_bitregion_end -= m_bitregion_end % units + 1; | |
2750 } | |
2751 } | |
2752 | |
2753 /* Calls to this function return successively larger modes that can be used | |
2754 to represent the bitfield. Return true if another bitfield mode is | |
2755 available, storing it in *OUT_MODE if so. */ | |
2756 | |
2757 bool | |
2758 bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode) | |
2759 { | |
2760 scalar_int_mode mode; | |
2761 for (; m_mode.exists (&mode); m_mode = GET_MODE_WIDER_MODE (mode)) | |
2762 { | |
2763 unsigned int unit = GET_MODE_BITSIZE (mode); | |
2764 | |
2765 /* Skip modes that don't have full precision. */ | |
2766 if (unit != GET_MODE_PRECISION (mode)) | |
2767 continue; | |
2768 | |
2769 /* Stop if the mode is too wide to handle efficiently. */ | |
2770 if (unit > MAX_FIXED_MODE_SIZE) | |
2771 break; | |
2772 | |
2773 /* Don't deliver more than one multiword mode; the smallest one | |
2774 should be used. */ | |
2775 if (m_count > 0 && unit > BITS_PER_WORD) | |
2776 break; | |
2777 | |
2778 /* Skip modes that are too small. */ | |
2779 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit; | |
2780 unsigned HOST_WIDE_INT subend = substart + m_bitsize; | |
2781 if (subend > unit) | |
2782 continue; | |
2783 | |
2784 /* Stop if the mode goes outside the bitregion. */ | |
2785 HOST_WIDE_INT start = m_bitpos - substart; | |
2786 if (m_bitregion_start && start < m_bitregion_start) | |
2787 break; | |
2788 HOST_WIDE_INT end = start + unit; | |
2789 if (end > m_bitregion_end + 1) | |
2790 break; | |
2791 | |
2792 /* Stop if the mode requires too much alignment. */ | |
2793 if (GET_MODE_ALIGNMENT (mode) > m_align | |
2794 && targetm.slow_unaligned_access (mode, m_align)) | |
2795 break; | |
2796 | |
2797 *out_mode = mode; | |
2798 m_mode = GET_MODE_WIDER_MODE (mode); | |
2799 m_count++; | |
2800 return true; | |
2801 } | |
2802 return false; | |
2803 } | |
2804 | |
2805 /* Return true if smaller modes are generally preferred for this kind | |
2806 of bitfield. */ | |
2807 | |
2808 bool | |
2809 bit_field_mode_iterator::prefer_smaller_modes () | |
2810 { | |
2811 return (m_volatilep | |
2812 ? targetm.narrow_volatile_bitfield () | |
2813 : !SLOW_BYTE_ACCESS); | |
2814 } | |
2815 | |
0 | 2816 /* Find the best machine mode to use when referencing a bit field of length |
2817 BITSIZE bits starting at BITPOS. | |
2818 | |
111 | 2819 BITREGION_START is the bit position of the first bit in this |
2820 sequence of bit fields. BITREGION_END is the last bit in this | |
2821 sequence. If these two fields are non-zero, we should restrict the | |
2822 memory access to that range. Otherwise, we are allowed to touch | |
2823 any adjacent non bit-fields. | |
2824 | |
2825 The chosen mode must have no more than LARGEST_MODE_BITSIZE bits. | |
2826 INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller | |
2827 doesn't want to apply a specific limit. | |
2828 | |
2829 If no mode meets all these conditions, we return VOIDmode. | |
2830 | |
0 | 2831 The underlying object is known to be aligned to a boundary of ALIGN bits. |
2832 | |
2833 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the | |
2834 smallest mode meeting these conditions. | |
2835 | |
2836 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the | |
2837 largest mode (but a mode no wider than UNITS_PER_WORD) that meets | |
2838 all the conditions. | |
2839 | |
2840 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to | |
2841 decide which of the above modes should be used. */ | |
2842 | |
111 | 2843 bool |
2844 get_best_mode (int bitsize, int bitpos, | |
2845 unsigned HOST_WIDE_INT bitregion_start, | |
2846 unsigned HOST_WIDE_INT bitregion_end, | |
2847 unsigned int align, | |
2848 unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep, | |
2849 scalar_int_mode *best_mode) | |
0 | 2850 { |
111 | 2851 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start, |
2852 bitregion_end, align, volatilep); | |
2853 scalar_int_mode mode; | |
2854 bool found = false; | |
2855 while (iter.next_mode (&mode) | |
2856 /* ??? For historical reasons, reject modes that would normally | |
2857 receive greater alignment, even if unaligned accesses are | |
2858 acceptable. This has both advantages and disadvantages. | |
2859 Removing this check means that something like: | |
2860 | |
2861 struct s { unsigned int x; unsigned int y; }; | |
2862 int f (struct s *s) { return s->x == 0 && s->y == 0; } | |
2863 | |
2864 can be implemented using a single load and compare on | |
2865 64-bit machines that have no alignment restrictions. | |
2866 For example, on powerpc64-linux-gnu, we would generate: | |
2867 | |
2868 ld 3,0(3) | |
2869 cntlzd 3,3 | |
2870 srdi 3,3,6 | |
2871 blr | |
2872 | |
2873 rather than: | |
2874 | |
2875 lwz 9,0(3) | |
2876 cmpwi 7,9,0 | |
2877 bne 7,.L3 | |
2878 lwz 3,4(3) | |
2879 cntlzw 3,3 | |
2880 srwi 3,3,5 | |
2881 extsw 3,3 | |
2882 blr | |
2883 .p2align 4,,15 | |
2884 .L3: | |
2885 li 3,0 | |
2886 blr | |
2887 | |
2888 However, accessing more than one field can make life harder | |
2889 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c | |
2890 has a series of unsigned short copies followed by a series of | |
2891 unsigned short comparisons. With this check, both the copies | |
2892 and comparisons remain 16-bit accesses and FRE is able | |
2893 to eliminate the latter. Without the check, the comparisons | |
2894 can be done using 2 64-bit operations, which FRE isn't able | |
2895 to handle in the same way. | |
2896 | |
2897 Either way, it would probably be worth disabling this check | |
2898 during expand. One particular example where removing the | |
2899 check would help is the get_best_mode call in store_bit_field. | |
2900 If we are given a memory bitregion of 128 bits that is aligned | |
2901 to a 64-bit boundary, and the bitfield we want to modify is | |
2902 in the second half of the bitregion, this check causes | |
2903 store_bitfield to turn the memory into a 64-bit reference | |
2904 to the _first_ half of the region. We later use | |
2905 adjust_bitfield_address to get a reference to the correct half, | |
2906 but doing so looks to adjust_bitfield_address as though we are | |
2907 moving past the end of the original object, so it drops the | |
2908 associated MEM_EXPR and MEM_OFFSET. Removing the check | |
2909 causes store_bit_field to keep a 128-bit memory reference, | |
2910 so that the final bitfield reference still has a MEM_EXPR | |
2911 and MEM_OFFSET. */ | |
2912 && GET_MODE_ALIGNMENT (mode) <= align | |
2913 && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize) | |
0 | 2914 { |
111 | 2915 *best_mode = mode; |
2916 found = true; | |
2917 if (iter.prefer_smaller_modes ()) | |
0 | 2918 break; |
2919 } | |
2920 | |
111 | 2921 return found; |
0 | 2922 } |
2923 | |
2924 /* Gets minimal and maximal values for MODE (signed or unsigned depending on | |
2925 SIGN). The returned constants are made to be usable in TARGET_MODE. */ | |
2926 | |
2927 void | |
111 | 2928 get_mode_bounds (scalar_int_mode mode, int sign, |
2929 scalar_int_mode target_mode, | |
0 | 2930 rtx *mmin, rtx *mmax) |
2931 { | |
111 | 2932 unsigned size = GET_MODE_PRECISION (mode); |
0 | 2933 unsigned HOST_WIDE_INT min_val, max_val; |
2934 | |
2935 gcc_assert (size <= HOST_BITS_PER_WIDE_INT); | |
2936 | |
111 | 2937 /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */ |
2938 if (mode == BImode) | |
0 | 2939 { |
111 | 2940 if (STORE_FLAG_VALUE < 0) |
2941 { | |
2942 min_val = STORE_FLAG_VALUE; | |
2943 max_val = 0; | |
2944 } | |
2945 else | |
2946 { | |
2947 min_val = 0; | |
2948 max_val = STORE_FLAG_VALUE; | |
2949 } | |
2950 } | |
2951 else if (sign) | |
2952 { | |
2953 min_val = -(HOST_WIDE_INT_1U << (size - 1)); | |
2954 max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1; | |
0 | 2955 } |
2956 else | |
2957 { | |
2958 min_val = 0; | |
111 | 2959 max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1; |
0 | 2960 } |
2961 | |
2962 *mmin = gen_int_mode (min_val, target_mode); | |
2963 *mmax = gen_int_mode (max_val, target_mode); | |
2964 } | |
2965 | |
2966 #include "gt-stor-layout.h" |