Mercurial > hg > CbC > CbC_gcc
comparison gcc/cse.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 |
parents | |
children | 58ad6c70ea60 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* Common subexpression elimination for GNU compiler. | |
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998 | |
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 | |
4 Free Software Foundation, Inc. | |
5 | |
6 This file is part of GCC. | |
7 | |
8 GCC is free software; you can redistribute it and/or modify it under | |
9 the terms of the GNU General Public License as published by the Free | |
10 Software Foundation; either version 3, or (at your option) any later | |
11 version. | |
12 | |
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 for more details. | |
17 | |
18 You should have received a copy of the GNU General Public License | |
19 along with GCC; see the file COPYING3. If not see | |
20 <http://www.gnu.org/licenses/>. */ | |
21 | |
22 #include "config.h" | |
23 /* stdio.h must precede rtl.h for FFS. */ | |
24 #include "system.h" | |
25 #include "coretypes.h" | |
26 #include "tm.h" | |
27 #include "rtl.h" | |
28 #include "tm_p.h" | |
29 #include "hard-reg-set.h" | |
30 #include "regs.h" | |
31 #include "basic-block.h" | |
32 #include "flags.h" | |
33 #include "real.h" | |
34 #include "insn-config.h" | |
35 #include "recog.h" | |
36 #include "function.h" | |
37 #include "expr.h" | |
38 #include "toplev.h" | |
39 #include "output.h" | |
40 #include "ggc.h" | |
41 #include "timevar.h" | |
42 #include "except.h" | |
43 #include "target.h" | |
44 #include "params.h" | |
45 #include "rtlhooks-def.h" | |
46 #include "tree-pass.h" | |
47 #include "df.h" | |
48 #include "dbgcnt.h" | |
49 | |
50 /* The basic idea of common subexpression elimination is to go | |
51 through the code, keeping a record of expressions that would | |
52 have the same value at the current scan point, and replacing | |
53 expressions encountered with the cheapest equivalent expression. | |
54 | |
55 It is too complicated to keep track of the different possibilities | |
56 when control paths merge in this code; so, at each label, we forget all | |
57 that is known and start fresh. This can be described as processing each | |
58 extended basic block separately. We have a separate pass to perform | |
59 global CSE. | |
60 | |
61 Note CSE can turn a conditional or computed jump into a nop or | |
62 an unconditional jump. When this occurs we arrange to run the jump | |
63 optimizer after CSE to delete the unreachable code. | |
64 | |
65 We use two data structures to record the equivalent expressions: | |
66 a hash table for most expressions, and a vector of "quantity | |
67 numbers" to record equivalent (pseudo) registers. | |
68 | |
69 The use of the special data structure for registers is desirable | |
70 because it is faster. It is possible because registers references | |
71 contain a fairly small number, the register number, taken from | |
72 a contiguously allocated series, and two register references are | |
73 identical if they have the same number. General expressions | |
74 do not have any such thing, so the only way to retrieve the | |
75 information recorded on an expression other than a register | |
76 is to keep it in a hash table. | |
77 | |
78 Registers and "quantity numbers": | |
79 | |
80 At the start of each basic block, all of the (hardware and pseudo) | |
81 registers used in the function are given distinct quantity | |
82 numbers to indicate their contents. During scan, when the code | |
83 copies one register into another, we copy the quantity number. | |
84 When a register is loaded in any other way, we allocate a new | |
85 quantity number to describe the value generated by this operation. | |
86 `REG_QTY (N)' records what quantity register N is currently thought | |
87 of as containing. | |
88 | |
89 All real quantity numbers are greater than or equal to zero. | |
90 If register N has not been assigned a quantity, `REG_QTY (N)' will | |
91 equal -N - 1, which is always negative. | |
92 | |
93 Quantity numbers below zero do not exist and none of the `qty_table' | |
94 entries should be referenced with a negative index. | |
95 | |
96 We also maintain a bidirectional chain of registers for each | |
97 quantity number. The `qty_table` members `first_reg' and `last_reg', | |
98 and `reg_eqv_table' members `next' and `prev' hold these chains. | |
99 | |
100 The first register in a chain is the one whose lifespan is least local. | |
101 Among equals, it is the one that was seen first. | |
102 We replace any equivalent register with that one. | |
103 | |
104 If two registers have the same quantity number, it must be true that | |
105 REG expressions with qty_table `mode' must be in the hash table for both | |
106 registers and must be in the same class. | |
107 | |
108 The converse is not true. Since hard registers may be referenced in | |
109 any mode, two REG expressions might be equivalent in the hash table | |
110 but not have the same quantity number if the quantity number of one | |
111 of the registers is not the same mode as those expressions. | |
112 | |
113 Constants and quantity numbers | |
114 | |
115 When a quantity has a known constant value, that value is stored | |
116 in the appropriate qty_table `const_rtx'. This is in addition to | |
117 putting the constant in the hash table as is usual for non-regs. | |
118 | |
119 Whether a reg or a constant is preferred is determined by the configuration | |
120 macro CONST_COSTS and will often depend on the constant value. In any | |
121 event, expressions containing constants can be simplified, by fold_rtx. | |
122 | |
123 When a quantity has a known nearly constant value (such as an address | |
124 of a stack slot), that value is stored in the appropriate qty_table | |
125 `const_rtx'. | |
126 | |
127 Integer constants don't have a machine mode. However, cse | |
128 determines the intended machine mode from the destination | |
129 of the instruction that moves the constant. The machine mode | |
130 is recorded in the hash table along with the actual RTL | |
131 constant expression so that different modes are kept separate. | |
132 | |
133 Other expressions: | |
134 | |
135 To record known equivalences among expressions in general | |
136 we use a hash table called `table'. It has a fixed number of buckets | |
137 that contain chains of `struct table_elt' elements for expressions. | |
138 These chains connect the elements whose expressions have the same | |
139 hash codes. | |
140 | |
141 Other chains through the same elements connect the elements which | |
142 currently have equivalent values. | |
143 | |
144 Register references in an expression are canonicalized before hashing | |
145 the expression. This is done using `reg_qty' and qty_table `first_reg'. | |
146 The hash code of a register reference is computed using the quantity | |
147 number, not the register number. | |
148 | |
149 When the value of an expression changes, it is necessary to remove from the | |
150 hash table not just that expression but all expressions whose values | |
151 could be different as a result. | |
152 | |
153 1. If the value changing is in memory, except in special cases | |
154 ANYTHING referring to memory could be changed. That is because | |
155 nobody knows where a pointer does not point. | |
156 The function `invalidate_memory' removes what is necessary. | |
157 | |
158 The special cases are when the address is constant or is | |
159 a constant plus a fixed register such as the frame pointer | |
160 or a static chain pointer. When such addresses are stored in, | |
161 we can tell exactly which other such addresses must be invalidated | |
162 due to overlap. `invalidate' does this. | |
163 All expressions that refer to non-constant | |
164 memory addresses are also invalidated. `invalidate_memory' does this. | |
165 | |
166 2. If the value changing is a register, all expressions | |
167 containing references to that register, and only those, | |
168 must be removed. | |
169 | |
170 Because searching the entire hash table for expressions that contain | |
171 a register is very slow, we try to figure out when it isn't necessary. | |
172 Precisely, this is necessary only when expressions have been | |
173 entered in the hash table using this register, and then the value has | |
174 changed, and then another expression wants to be added to refer to | |
175 the register's new value. This sequence of circumstances is rare | |
176 within any one basic block. | |
177 | |
178 `REG_TICK' and `REG_IN_TABLE', accessors for members of | |
179 cse_reg_info, are used to detect this case. REG_TICK (i) is | |
180 incremented whenever a value is stored in register i. | |
181 REG_IN_TABLE (i) holds -1 if no references to register i have been | |
182 entered in the table; otherwise, it contains the value REG_TICK (i) | |
183 had when the references were entered. If we want to enter a | |
184 reference and REG_IN_TABLE (i) != REG_TICK (i), we must scan and | |
185 remove old references. Until we want to enter a new entry, the | |
186 mere fact that the two vectors don't match makes the entries be | |
187 ignored if anyone tries to match them. | |
188 | |
189 Registers themselves are entered in the hash table as well as in | |
190 the equivalent-register chains. However, `REG_TICK' and | |
191 `REG_IN_TABLE' do not apply to expressions which are simple | |
192 register references. These expressions are removed from the table | |
193 immediately when they become invalid, and this can be done even if | |
194 we do not immediately search for all the expressions that refer to | |
195 the register. | |
196 | |
197 A CLOBBER rtx in an instruction invalidates its operand for further | |
198 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK | |
199 invalidates everything that resides in memory. | |
200 | |
201 Related expressions: | |
202 | |
203 Constant expressions that differ only by an additive integer | |
204 are called related. When a constant expression is put in | |
205 the table, the related expression with no constant term | |
206 is also entered. These are made to point at each other | |
207 so that it is possible to find out if there exists any | |
208 register equivalent to an expression related to a given expression. */ | |
209 | |
210 /* Length of qty_table vector. We know in advance we will not need | |
211 a quantity number this big. */ | |
212 | |
213 static int max_qty; | |
214 | |
215 /* Next quantity number to be allocated. | |
216 This is 1 + the largest number needed so far. */ | |
217 | |
218 static int next_qty; | |
219 | |
220 /* Per-qty information tracking. | |
221 | |
222 `first_reg' and `last_reg' track the head and tail of the | |
223 chain of registers which currently contain this quantity. | |
224 | |
225 `mode' contains the machine mode of this quantity. | |
226 | |
227 `const_rtx' holds the rtx of the constant value of this | |
228 quantity, if known. A summations of the frame/arg pointer | |
229 and a constant can also be entered here. When this holds | |
230 a known value, `const_insn' is the insn which stored the | |
231 constant value. | |
232 | |
233 `comparison_{code,const,qty}' are used to track when a | |
234 comparison between a quantity and some constant or register has | |
235 been passed. In such a case, we know the results of the comparison | |
236 in case we see it again. These members record a comparison that | |
237 is known to be true. `comparison_code' holds the rtx code of such | |
238 a comparison, else it is set to UNKNOWN and the other two | |
239 comparison members are undefined. `comparison_const' holds | |
240 the constant being compared against, or zero if the comparison | |
241 is not against a constant. `comparison_qty' holds the quantity | |
242 being compared against when the result is known. If the comparison | |
243 is not with a register, `comparison_qty' is -1. */ | |
244 | |
245 struct qty_table_elem | |
246 { | |
247 rtx const_rtx; | |
248 rtx const_insn; | |
249 rtx comparison_const; | |
250 int comparison_qty; | |
251 unsigned int first_reg, last_reg; | |
252 /* The sizes of these fields should match the sizes of the | |
253 code and mode fields of struct rtx_def (see rtl.h). */ | |
254 ENUM_BITFIELD(rtx_code) comparison_code : 16; | |
255 ENUM_BITFIELD(machine_mode) mode : 8; | |
256 }; | |
257 | |
258 /* The table of all qtys, indexed by qty number. */ | |
259 static struct qty_table_elem *qty_table; | |
260 | |
261 /* Structure used to pass arguments via for_each_rtx to function | |
262 cse_change_cc_mode. */ | |
263 struct change_cc_mode_args | |
264 { | |
265 rtx insn; | |
266 rtx newreg; | |
267 }; | |
268 | |
269 #ifdef HAVE_cc0 | |
270 /* For machines that have a CC0, we do not record its value in the hash | |
271 table since its use is guaranteed to be the insn immediately following | |
272 its definition and any other insn is presumed to invalidate it. | |
273 | |
274 Instead, we store below the current and last value assigned to CC0. | |
275 If it should happen to be a constant, it is stored in preference | |
276 to the actual assigned value. In case it is a constant, we store | |
277 the mode in which the constant should be interpreted. */ | |
278 | |
279 static rtx this_insn_cc0, prev_insn_cc0; | |
280 static enum machine_mode this_insn_cc0_mode, prev_insn_cc0_mode; | |
281 #endif | |
282 | |
283 /* Insn being scanned. */ | |
284 | |
285 static rtx this_insn; | |
286 static bool optimize_this_for_speed_p; | |
287 | |
288 /* Index by register number, gives the number of the next (or | |
289 previous) register in the chain of registers sharing the same | |
290 value. | |
291 | |
292 Or -1 if this register is at the end of the chain. | |
293 | |
294 If REG_QTY (N) == -N - 1, reg_eqv_table[N].next is undefined. */ | |
295 | |
296 /* Per-register equivalence chain. */ | |
297 struct reg_eqv_elem | |
298 { | |
299 int next, prev; | |
300 }; | |
301 | |
302 /* The table of all register equivalence chains. */ | |
303 static struct reg_eqv_elem *reg_eqv_table; | |
304 | |
305 struct cse_reg_info | |
306 { | |
307 /* The timestamp at which this register is initialized. */ | |
308 unsigned int timestamp; | |
309 | |
310 /* The quantity number of the register's current contents. */ | |
311 int reg_qty; | |
312 | |
313 /* The number of times the register has been altered in the current | |
314 basic block. */ | |
315 int reg_tick; | |
316 | |
317 /* The REG_TICK value at which rtx's containing this register are | |
318 valid in the hash table. If this does not equal the current | |
319 reg_tick value, such expressions existing in the hash table are | |
320 invalid. */ | |
321 int reg_in_table; | |
322 | |
323 /* The SUBREG that was set when REG_TICK was last incremented. Set | |
324 to -1 if the last store was to the whole register, not a subreg. */ | |
325 unsigned int subreg_ticked; | |
326 }; | |
327 | |
328 /* A table of cse_reg_info indexed by register numbers. */ | |
329 static struct cse_reg_info *cse_reg_info_table; | |
330 | |
331 /* The size of the above table. */ | |
332 static unsigned int cse_reg_info_table_size; | |
333 | |
334 /* The index of the first entry that has not been initialized. */ | |
335 static unsigned int cse_reg_info_table_first_uninitialized; | |
336 | |
337 /* The timestamp at the beginning of the current run of | |
338 cse_extended_basic_block. We increment this variable at the beginning of | |
339 the current run of cse_extended_basic_block. The timestamp field of a | |
340 cse_reg_info entry matches the value of this variable if and only | |
341 if the entry has been initialized during the current run of | |
342 cse_extended_basic_block. */ | |
343 static unsigned int cse_reg_info_timestamp; | |
344 | |
345 /* A HARD_REG_SET containing all the hard registers for which there is | |
346 currently a REG expression in the hash table. Note the difference | |
347 from the above variables, which indicate if the REG is mentioned in some | |
348 expression in the table. */ | |
349 | |
350 static HARD_REG_SET hard_regs_in_table; | |
351 | |
352 /* True if CSE has altered the CFG. */ | |
353 static bool cse_cfg_altered; | |
354 | |
355 /* True if CSE has altered conditional jump insns in such a way | |
356 that jump optimization should be redone. */ | |
357 static bool cse_jumps_altered; | |
358 | |
359 /* True if we put a LABEL_REF into the hash table for an INSN | |
360 without a REG_LABEL_OPERAND, we have to rerun jump after CSE | |
361 to put in the note. */ | |
362 static bool recorded_label_ref; | |
363 | |
364 /* canon_hash stores 1 in do_not_record | |
365 if it notices a reference to CC0, PC, or some other volatile | |
366 subexpression. */ | |
367 | |
368 static int do_not_record; | |
369 | |
370 /* canon_hash stores 1 in hash_arg_in_memory | |
371 if it notices a reference to memory within the expression being hashed. */ | |
372 | |
373 static int hash_arg_in_memory; | |
374 | |
375 /* The hash table contains buckets which are chains of `struct table_elt's, | |
376 each recording one expression's information. | |
377 That expression is in the `exp' field. | |
378 | |
379 The canon_exp field contains a canonical (from the point of view of | |
380 alias analysis) version of the `exp' field. | |
381 | |
382 Those elements with the same hash code are chained in both directions | |
383 through the `next_same_hash' and `prev_same_hash' fields. | |
384 | |
385 Each set of expressions with equivalent values | |
386 are on a two-way chain through the `next_same_value' | |
387 and `prev_same_value' fields, and all point with | |
388 the `first_same_value' field at the first element in | |
389 that chain. The chain is in order of increasing cost. | |
390 Each element's cost value is in its `cost' field. | |
391 | |
392 The `in_memory' field is nonzero for elements that | |
393 involve any reference to memory. These elements are removed | |
394 whenever a write is done to an unidentified location in memory. | |
395 To be safe, we assume that a memory address is unidentified unless | |
396 the address is either a symbol constant or a constant plus | |
397 the frame pointer or argument pointer. | |
398 | |
399 The `related_value' field is used to connect related expressions | |
400 (that differ by adding an integer). | |
401 The related expressions are chained in a circular fashion. | |
402 `related_value' is zero for expressions for which this | |
403 chain is not useful. | |
404 | |
405 The `cost' field stores the cost of this element's expression. | |
406 The `regcost' field stores the value returned by approx_reg_cost for | |
407 this element's expression. | |
408 | |
409 The `is_const' flag is set if the element is a constant (including | |
410 a fixed address). | |
411 | |
412 The `flag' field is used as a temporary during some search routines. | |
413 | |
414 The `mode' field is usually the same as GET_MODE (`exp'), but | |
415 if `exp' is a CONST_INT and has no machine mode then the `mode' | |
416 field is the mode it was being used as. Each constant is | |
417 recorded separately for each mode it is used with. */ | |
418 | |
419 struct table_elt | |
420 { | |
421 rtx exp; | |
422 rtx canon_exp; | |
423 struct table_elt *next_same_hash; | |
424 struct table_elt *prev_same_hash; | |
425 struct table_elt *next_same_value; | |
426 struct table_elt *prev_same_value; | |
427 struct table_elt *first_same_value; | |
428 struct table_elt *related_value; | |
429 int cost; | |
430 int regcost; | |
431 /* The size of this field should match the size | |
432 of the mode field of struct rtx_def (see rtl.h). */ | |
433 ENUM_BITFIELD(machine_mode) mode : 8; | |
434 char in_memory; | |
435 char is_const; | |
436 char flag; | |
437 }; | |
438 | |
439 /* We don't want a lot of buckets, because we rarely have very many | |
440 things stored in the hash table, and a lot of buckets slows | |
441 down a lot of loops that happen frequently. */ | |
442 #define HASH_SHIFT 5 | |
443 #define HASH_SIZE (1 << HASH_SHIFT) | |
444 #define HASH_MASK (HASH_SIZE - 1) | |
445 | |
446 /* Compute hash code of X in mode M. Special-case case where X is a pseudo | |
447 register (hard registers may require `do_not_record' to be set). */ | |
448 | |
449 #define HASH(X, M) \ | |
450 ((REG_P (X) && REGNO (X) >= FIRST_PSEUDO_REGISTER \ | |
451 ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X))) \ | |
452 : canon_hash (X, M)) & HASH_MASK) | |
453 | |
454 /* Like HASH, but without side-effects. */ | |
455 #define SAFE_HASH(X, M) \ | |
456 ((REG_P (X) && REGNO (X) >= FIRST_PSEUDO_REGISTER \ | |
457 ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X))) \ | |
458 : safe_hash (X, M)) & HASH_MASK) | |
459 | |
460 /* Determine whether register number N is considered a fixed register for the | |
461 purpose of approximating register costs. | |
462 It is desirable to replace other regs with fixed regs, to reduce need for | |
463 non-fixed hard regs. | |
464 A reg wins if it is either the frame pointer or designated as fixed. */ | |
465 #define FIXED_REGNO_P(N) \ | |
466 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \ | |
467 || fixed_regs[N] || global_regs[N]) | |
468 | |
469 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed | |
470 hard registers and pointers into the frame are the cheapest with a cost | |
471 of 0. Next come pseudos with a cost of one and other hard registers with | |
472 a cost of 2. Aside from these special cases, call `rtx_cost'. */ | |
473 | |
474 #define CHEAP_REGNO(N) \ | |
475 (REGNO_PTR_FRAME_P(N) \ | |
476 || (HARD_REGISTER_NUM_P (N) \ | |
477 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS)) | |
478 | |
479 #define COST(X) (REG_P (X) ? 0 : notreg_cost (X, SET)) | |
480 #define COST_IN(X,OUTER) (REG_P (X) ? 0 : notreg_cost (X, OUTER)) | |
481 | |
482 /* Get the number of times this register has been updated in this | |
483 basic block. */ | |
484 | |
485 #define REG_TICK(N) (get_cse_reg_info (N)->reg_tick) | |
486 | |
487 /* Get the point at which REG was recorded in the table. */ | |
488 | |
489 #define REG_IN_TABLE(N) (get_cse_reg_info (N)->reg_in_table) | |
490 | |
491 /* Get the SUBREG set at the last increment to REG_TICK (-1 if not a | |
492 SUBREG). */ | |
493 | |
494 #define SUBREG_TICKED(N) (get_cse_reg_info (N)->subreg_ticked) | |
495 | |
496 /* Get the quantity number for REG. */ | |
497 | |
498 #define REG_QTY(N) (get_cse_reg_info (N)->reg_qty) | |
499 | |
500 /* Determine if the quantity number for register X represents a valid index | |
501 into the qty_table. */ | |
502 | |
503 #define REGNO_QTY_VALID_P(N) (REG_QTY (N) >= 0) | |
504 | |
505 static struct table_elt *table[HASH_SIZE]; | |
506 | |
507 /* Chain of `struct table_elt's made so far for this function | |
508 but currently removed from the table. */ | |
509 | |
510 static struct table_elt *free_element_chain; | |
511 | |
512 /* Set to the cost of a constant pool reference if one was found for a | |
513 symbolic constant. If this was found, it means we should try to | |
514 convert constants into constant pool entries if they don't fit in | |
515 the insn. */ | |
516 | |
517 static int constant_pool_entries_cost; | |
518 static int constant_pool_entries_regcost; | |
519 | |
520 /* This data describes a block that will be processed by | |
521 cse_extended_basic_block. */ | |
522 | |
523 struct cse_basic_block_data | |
524 { | |
525 /* Total number of SETs in block. */ | |
526 int nsets; | |
527 /* Size of current branch path, if any. */ | |
528 int path_size; | |
529 /* Current path, indicating which basic_blocks will be processed. */ | |
530 struct branch_path | |
531 { | |
532 /* The basic block for this path entry. */ | |
533 basic_block bb; | |
534 } *path; | |
535 }; | |
536 | |
537 | |
538 /* Pointers to the live in/live out bitmaps for the boundaries of the | |
539 current EBB. */ | |
540 static bitmap cse_ebb_live_in, cse_ebb_live_out; | |
541 | |
542 /* A simple bitmap to track which basic blocks have been visited | |
543 already as part of an already processed extended basic block. */ | |
544 static sbitmap cse_visited_basic_blocks; | |
545 | |
546 static bool fixed_base_plus_p (rtx x); | |
547 static int notreg_cost (rtx, enum rtx_code); | |
548 static int approx_reg_cost_1 (rtx *, void *); | |
549 static int approx_reg_cost (rtx); | |
550 static int preferable (int, int, int, int); | |
551 static void new_basic_block (void); | |
552 static void make_new_qty (unsigned int, enum machine_mode); | |
553 static void make_regs_eqv (unsigned int, unsigned int); | |
554 static void delete_reg_equiv (unsigned int); | |
555 static int mention_regs (rtx); | |
556 static int insert_regs (rtx, struct table_elt *, int); | |
557 static void remove_from_table (struct table_elt *, unsigned); | |
558 static void remove_pseudo_from_table (rtx, unsigned); | |
559 static struct table_elt *lookup (rtx, unsigned, enum machine_mode); | |
560 static struct table_elt *lookup_for_remove (rtx, unsigned, enum machine_mode); | |
561 static rtx lookup_as_function (rtx, enum rtx_code); | |
562 static struct table_elt *insert (rtx, struct table_elt *, unsigned, | |
563 enum machine_mode); | |
564 static void merge_equiv_classes (struct table_elt *, struct table_elt *); | |
565 static void invalidate (rtx, enum machine_mode); | |
566 static bool cse_rtx_varies_p (const_rtx, bool); | |
567 static void remove_invalid_refs (unsigned int); | |
568 static void remove_invalid_subreg_refs (unsigned int, unsigned int, | |
569 enum machine_mode); | |
570 static void rehash_using_reg (rtx); | |
571 static void invalidate_memory (void); | |
572 static void invalidate_for_call (void); | |
573 static rtx use_related_value (rtx, struct table_elt *); | |
574 | |
575 static inline unsigned canon_hash (rtx, enum machine_mode); | |
576 static inline unsigned safe_hash (rtx, enum machine_mode); | |
577 static inline unsigned hash_rtx_string (const char *); | |
578 | |
579 static rtx canon_reg (rtx, rtx); | |
580 static enum rtx_code find_comparison_args (enum rtx_code, rtx *, rtx *, | |
581 enum machine_mode *, | |
582 enum machine_mode *); | |
583 static rtx fold_rtx (rtx, rtx); | |
584 static rtx equiv_constant (rtx); | |
585 static void record_jump_equiv (rtx, bool); | |
586 static void record_jump_cond (enum rtx_code, enum machine_mode, rtx, rtx, | |
587 int); | |
588 static void cse_insn (rtx); | |
589 static void cse_prescan_path (struct cse_basic_block_data *); | |
590 static void invalidate_from_clobbers (rtx); | |
591 static rtx cse_process_notes (rtx, rtx, bool *); | |
592 static void cse_extended_basic_block (struct cse_basic_block_data *); | |
593 static void count_reg_usage (rtx, int *, rtx, int); | |
594 static int check_for_label_ref (rtx *, void *); | |
595 extern void dump_class (struct table_elt*); | |
596 static void get_cse_reg_info_1 (unsigned int regno); | |
597 static struct cse_reg_info * get_cse_reg_info (unsigned int regno); | |
598 static int check_dependence (rtx *, void *); | |
599 | |
600 static void flush_hash_table (void); | |
601 static bool insn_live_p (rtx, int *); | |
602 static bool set_live_p (rtx, rtx, int *); | |
603 static int cse_change_cc_mode (rtx *, void *); | |
604 static void cse_change_cc_mode_insn (rtx, rtx); | |
605 static void cse_change_cc_mode_insns (rtx, rtx, rtx); | |
606 static enum machine_mode cse_cc_succs (basic_block, basic_block, rtx, rtx, | |
607 bool); | |
608 | |
609 | |
610 #undef RTL_HOOKS_GEN_LOWPART | |
611 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_if_possible | |
612 | |
613 static const struct rtl_hooks cse_rtl_hooks = RTL_HOOKS_INITIALIZER; | |
614 | |
615 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for | |
616 virtual regs here because the simplify_*_operation routines are called | |
617 by integrate.c, which is called before virtual register instantiation. */ | |
618 | |
619 static bool | |
620 fixed_base_plus_p (rtx x) | |
621 { | |
622 switch (GET_CODE (x)) | |
623 { | |
624 case REG: | |
625 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx) | |
626 return true; | |
627 if (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]) | |
628 return true; | |
629 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER | |
630 && REGNO (x) <= LAST_VIRTUAL_REGISTER) | |
631 return true; | |
632 return false; | |
633 | |
634 case PLUS: | |
635 if (GET_CODE (XEXP (x, 1)) != CONST_INT) | |
636 return false; | |
637 return fixed_base_plus_p (XEXP (x, 0)); | |
638 | |
639 default: | |
640 return false; | |
641 } | |
642 } | |
643 | |
644 /* Dump the expressions in the equivalence class indicated by CLASSP. | |
645 This function is used only for debugging. */ | |
646 void | |
647 dump_class (struct table_elt *classp) | |
648 { | |
649 struct table_elt *elt; | |
650 | |
651 fprintf (stderr, "Equivalence chain for "); | |
652 print_rtl (stderr, classp->exp); | |
653 fprintf (stderr, ": \n"); | |
654 | |
655 for (elt = classp->first_same_value; elt; elt = elt->next_same_value) | |
656 { | |
657 print_rtl (stderr, elt->exp); | |
658 fprintf (stderr, "\n"); | |
659 } | |
660 } | |
661 | |
662 /* Subroutine of approx_reg_cost; called through for_each_rtx. */ | |
663 | |
664 static int | |
665 approx_reg_cost_1 (rtx *xp, void *data) | |
666 { | |
667 rtx x = *xp; | |
668 int *cost_p = (int *) data; | |
669 | |
670 if (x && REG_P (x)) | |
671 { | |
672 unsigned int regno = REGNO (x); | |
673 | |
674 if (! CHEAP_REGNO (regno)) | |
675 { | |
676 if (regno < FIRST_PSEUDO_REGISTER) | |
677 { | |
678 if (SMALL_REGISTER_CLASSES) | |
679 return 1; | |
680 *cost_p += 2; | |
681 } | |
682 else | |
683 *cost_p += 1; | |
684 } | |
685 } | |
686 | |
687 return 0; | |
688 } | |
689 | |
690 /* Return an estimate of the cost of the registers used in an rtx. | |
691 This is mostly the number of different REG expressions in the rtx; | |
692 however for some exceptions like fixed registers we use a cost of | |
693 0. If any other hard register reference occurs, return MAX_COST. */ | |
694 | |
695 static int | |
696 approx_reg_cost (rtx x) | |
697 { | |
698 int cost = 0; | |
699 | |
700 if (for_each_rtx (&x, approx_reg_cost_1, (void *) &cost)) | |
701 return MAX_COST; | |
702 | |
703 return cost; | |
704 } | |
705 | |
706 /* Return a negative value if an rtx A, whose costs are given by COST_A | |
707 and REGCOST_A, is more desirable than an rtx B. | |
708 Return a positive value if A is less desirable, or 0 if the two are | |
709 equally good. */ | |
710 static int | |
711 preferable (int cost_a, int regcost_a, int cost_b, int regcost_b) | |
712 { | |
713 /* First, get rid of cases involving expressions that are entirely | |
714 unwanted. */ | |
715 if (cost_a != cost_b) | |
716 { | |
717 if (cost_a == MAX_COST) | |
718 return 1; | |
719 if (cost_b == MAX_COST) | |
720 return -1; | |
721 } | |
722 | |
723 /* Avoid extending lifetimes of hardregs. */ | |
724 if (regcost_a != regcost_b) | |
725 { | |
726 if (regcost_a == MAX_COST) | |
727 return 1; | |
728 if (regcost_b == MAX_COST) | |
729 return -1; | |
730 } | |
731 | |
732 /* Normal operation costs take precedence. */ | |
733 if (cost_a != cost_b) | |
734 return cost_a - cost_b; | |
735 /* Only if these are identical consider effects on register pressure. */ | |
736 if (regcost_a != regcost_b) | |
737 return regcost_a - regcost_b; | |
738 return 0; | |
739 } | |
740 | |
741 /* Internal function, to compute cost when X is not a register; called | |
742 from COST macro to keep it simple. */ | |
743 | |
744 static int | |
745 notreg_cost (rtx x, enum rtx_code outer) | |
746 { | |
747 return ((GET_CODE (x) == SUBREG | |
748 && REG_P (SUBREG_REG (x)) | |
749 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT | |
750 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT | |
751 && (GET_MODE_SIZE (GET_MODE (x)) | |
752 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) | |
753 && subreg_lowpart_p (x) | |
754 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)), | |
755 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))) | |
756 ? 0 | |
757 : rtx_cost (x, outer, optimize_this_for_speed_p) * 2); | |
758 } | |
759 | |
760 | |
761 /* Initialize CSE_REG_INFO_TABLE. */ | |
762 | |
763 static void | |
764 init_cse_reg_info (unsigned int nregs) | |
765 { | |
766 /* Do we need to grow the table? */ | |
767 if (nregs > cse_reg_info_table_size) | |
768 { | |
769 unsigned int new_size; | |
770 | |
771 if (cse_reg_info_table_size < 2048) | |
772 { | |
773 /* Compute a new size that is a power of 2 and no smaller | |
774 than the large of NREGS and 64. */ | |
775 new_size = (cse_reg_info_table_size | |
776 ? cse_reg_info_table_size : 64); | |
777 | |
778 while (new_size < nregs) | |
779 new_size *= 2; | |
780 } | |
781 else | |
782 { | |
783 /* If we need a big table, allocate just enough to hold | |
784 NREGS registers. */ | |
785 new_size = nregs; | |
786 } | |
787 | |
788 /* Reallocate the table with NEW_SIZE entries. */ | |
789 if (cse_reg_info_table) | |
790 free (cse_reg_info_table); | |
791 cse_reg_info_table = XNEWVEC (struct cse_reg_info, new_size); | |
792 cse_reg_info_table_size = new_size; | |
793 cse_reg_info_table_first_uninitialized = 0; | |
794 } | |
795 | |
796 /* Do we have all of the first NREGS entries initialized? */ | |
797 if (cse_reg_info_table_first_uninitialized < nregs) | |
798 { | |
799 unsigned int old_timestamp = cse_reg_info_timestamp - 1; | |
800 unsigned int i; | |
801 | |
802 /* Put the old timestamp on newly allocated entries so that they | |
803 will all be considered out of date. We do not touch those | |
804 entries beyond the first NREGS entries to be nice to the | |
805 virtual memory. */ | |
806 for (i = cse_reg_info_table_first_uninitialized; i < nregs; i++) | |
807 cse_reg_info_table[i].timestamp = old_timestamp; | |
808 | |
809 cse_reg_info_table_first_uninitialized = nregs; | |
810 } | |
811 } | |
812 | |
813 /* Given REGNO, initialize the cse_reg_info entry for REGNO. */ | |
814 | |
815 static void | |
816 get_cse_reg_info_1 (unsigned int regno) | |
817 { | |
818 /* Set TIMESTAMP field to CSE_REG_INFO_TIMESTAMP so that this | |
819 entry will be considered to have been initialized. */ | |
820 cse_reg_info_table[regno].timestamp = cse_reg_info_timestamp; | |
821 | |
822 /* Initialize the rest of the entry. */ | |
823 cse_reg_info_table[regno].reg_tick = 1; | |
824 cse_reg_info_table[regno].reg_in_table = -1; | |
825 cse_reg_info_table[regno].subreg_ticked = -1; | |
826 cse_reg_info_table[regno].reg_qty = -regno - 1; | |
827 } | |
828 | |
829 /* Find a cse_reg_info entry for REGNO. */ | |
830 | |
831 static inline struct cse_reg_info * | |
832 get_cse_reg_info (unsigned int regno) | |
833 { | |
834 struct cse_reg_info *p = &cse_reg_info_table[regno]; | |
835 | |
836 /* If this entry has not been initialized, go ahead and initialize | |
837 it. */ | |
838 if (p->timestamp != cse_reg_info_timestamp) | |
839 get_cse_reg_info_1 (regno); | |
840 | |
841 return p; | |
842 } | |
843 | |
844 /* Clear the hash table and initialize each register with its own quantity, | |
845 for a new basic block. */ | |
846 | |
847 static void | |
848 new_basic_block (void) | |
849 { | |
850 int i; | |
851 | |
852 next_qty = 0; | |
853 | |
854 /* Invalidate cse_reg_info_table. */ | |
855 cse_reg_info_timestamp++; | |
856 | |
857 /* Clear out hash table state for this pass. */ | |
858 CLEAR_HARD_REG_SET (hard_regs_in_table); | |
859 | |
860 /* The per-quantity values used to be initialized here, but it is | |
861 much faster to initialize each as it is made in `make_new_qty'. */ | |
862 | |
863 for (i = 0; i < HASH_SIZE; i++) | |
864 { | |
865 struct table_elt *first; | |
866 | |
867 first = table[i]; | |
868 if (first != NULL) | |
869 { | |
870 struct table_elt *last = first; | |
871 | |
872 table[i] = NULL; | |
873 | |
874 while (last->next_same_hash != NULL) | |
875 last = last->next_same_hash; | |
876 | |
877 /* Now relink this hash entire chain into | |
878 the free element list. */ | |
879 | |
880 last->next_same_hash = free_element_chain; | |
881 free_element_chain = first; | |
882 } | |
883 } | |
884 | |
885 #ifdef HAVE_cc0 | |
886 prev_insn_cc0 = 0; | |
887 #endif | |
888 } | |
889 | |
890 /* Say that register REG contains a quantity in mode MODE not in any | |
891 register before and initialize that quantity. */ | |
892 | |
893 static void | |
894 make_new_qty (unsigned int reg, enum machine_mode mode) | |
895 { | |
896 int q; | |
897 struct qty_table_elem *ent; | |
898 struct reg_eqv_elem *eqv; | |
899 | |
900 gcc_assert (next_qty < max_qty); | |
901 | |
902 q = REG_QTY (reg) = next_qty++; | |
903 ent = &qty_table[q]; | |
904 ent->first_reg = reg; | |
905 ent->last_reg = reg; | |
906 ent->mode = mode; | |
907 ent->const_rtx = ent->const_insn = NULL_RTX; | |
908 ent->comparison_code = UNKNOWN; | |
909 | |
910 eqv = ®_eqv_table[reg]; | |
911 eqv->next = eqv->prev = -1; | |
912 } | |
913 | |
914 /* Make reg NEW equivalent to reg OLD. | |
915 OLD is not changing; NEW is. */ | |
916 | |
917 static void | |
918 make_regs_eqv (unsigned int new_reg, unsigned int old_reg) | |
919 { | |
920 unsigned int lastr, firstr; | |
921 int q = REG_QTY (old_reg); | |
922 struct qty_table_elem *ent; | |
923 | |
924 ent = &qty_table[q]; | |
925 | |
926 /* Nothing should become eqv until it has a "non-invalid" qty number. */ | |
927 gcc_assert (REGNO_QTY_VALID_P (old_reg)); | |
928 | |
929 REG_QTY (new_reg) = q; | |
930 firstr = ent->first_reg; | |
931 lastr = ent->last_reg; | |
932 | |
933 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other | |
934 hard regs. Among pseudos, if NEW will live longer than any other reg | |
935 of the same qty, and that is beyond the current basic block, | |
936 make it the new canonical replacement for this qty. */ | |
937 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr)) | |
938 /* Certain fixed registers might be of the class NO_REGS. This means | |
939 that not only can they not be allocated by the compiler, but | |
940 they cannot be used in substitutions or canonicalizations | |
941 either. */ | |
942 && (new_reg >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new_reg) != NO_REGS) | |
943 && ((new_reg < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new_reg)) | |
944 || (new_reg >= FIRST_PSEUDO_REGISTER | |
945 && (firstr < FIRST_PSEUDO_REGISTER | |
946 || (bitmap_bit_p (cse_ebb_live_out, new_reg) | |
947 && !bitmap_bit_p (cse_ebb_live_out, firstr)) | |
948 || (bitmap_bit_p (cse_ebb_live_in, new_reg) | |
949 && !bitmap_bit_p (cse_ebb_live_in, firstr)))))) | |
950 { | |
951 reg_eqv_table[firstr].prev = new_reg; | |
952 reg_eqv_table[new_reg].next = firstr; | |
953 reg_eqv_table[new_reg].prev = -1; | |
954 ent->first_reg = new_reg; | |
955 } | |
956 else | |
957 { | |
958 /* If NEW is a hard reg (known to be non-fixed), insert at end. | |
959 Otherwise, insert before any non-fixed hard regs that are at the | |
960 end. Registers of class NO_REGS cannot be used as an | |
961 equivalent for anything. */ | |
962 while (lastr < FIRST_PSEUDO_REGISTER && reg_eqv_table[lastr].prev >= 0 | |
963 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr)) | |
964 && new_reg >= FIRST_PSEUDO_REGISTER) | |
965 lastr = reg_eqv_table[lastr].prev; | |
966 reg_eqv_table[new_reg].next = reg_eqv_table[lastr].next; | |
967 if (reg_eqv_table[lastr].next >= 0) | |
968 reg_eqv_table[reg_eqv_table[lastr].next].prev = new_reg; | |
969 else | |
970 qty_table[q].last_reg = new_reg; | |
971 reg_eqv_table[lastr].next = new_reg; | |
972 reg_eqv_table[new_reg].prev = lastr; | |
973 } | |
974 } | |
975 | |
976 /* Remove REG from its equivalence class. */ | |
977 | |
978 static void | |
979 delete_reg_equiv (unsigned int reg) | |
980 { | |
981 struct qty_table_elem *ent; | |
982 int q = REG_QTY (reg); | |
983 int p, n; | |
984 | |
985 /* If invalid, do nothing. */ | |
986 if (! REGNO_QTY_VALID_P (reg)) | |
987 return; | |
988 | |
989 ent = &qty_table[q]; | |
990 | |
991 p = reg_eqv_table[reg].prev; | |
992 n = reg_eqv_table[reg].next; | |
993 | |
994 if (n != -1) | |
995 reg_eqv_table[n].prev = p; | |
996 else | |
997 ent->last_reg = p; | |
998 if (p != -1) | |
999 reg_eqv_table[p].next = n; | |
1000 else | |
1001 ent->first_reg = n; | |
1002 | |
1003 REG_QTY (reg) = -reg - 1; | |
1004 } | |
1005 | |
1006 /* Remove any invalid expressions from the hash table | |
1007 that refer to any of the registers contained in expression X. | |
1008 | |
1009 Make sure that newly inserted references to those registers | |
1010 as subexpressions will be considered valid. | |
1011 | |
1012 mention_regs is not called when a register itself | |
1013 is being stored in the table. | |
1014 | |
1015 Return 1 if we have done something that may have changed the hash code | |
1016 of X. */ | |
1017 | |
1018 static int | |
1019 mention_regs (rtx x) | |
1020 { | |
1021 enum rtx_code code; | |
1022 int i, j; | |
1023 const char *fmt; | |
1024 int changed = 0; | |
1025 | |
1026 if (x == 0) | |
1027 return 0; | |
1028 | |
1029 code = GET_CODE (x); | |
1030 if (code == REG) | |
1031 { | |
1032 unsigned int regno = REGNO (x); | |
1033 unsigned int endregno = END_REGNO (x); | |
1034 unsigned int i; | |
1035 | |
1036 for (i = regno; i < endregno; i++) | |
1037 { | |
1038 if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i)) | |
1039 remove_invalid_refs (i); | |
1040 | |
1041 REG_IN_TABLE (i) = REG_TICK (i); | |
1042 SUBREG_TICKED (i) = -1; | |
1043 } | |
1044 | |
1045 return 0; | |
1046 } | |
1047 | |
1048 /* If this is a SUBREG, we don't want to discard other SUBREGs of the same | |
1049 pseudo if they don't use overlapping words. We handle only pseudos | |
1050 here for simplicity. */ | |
1051 if (code == SUBREG && REG_P (SUBREG_REG (x)) | |
1052 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER) | |
1053 { | |
1054 unsigned int i = REGNO (SUBREG_REG (x)); | |
1055 | |
1056 if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i)) | |
1057 { | |
1058 /* If REG_IN_TABLE (i) differs from REG_TICK (i) by one, and | |
1059 the last store to this register really stored into this | |
1060 subreg, then remove the memory of this subreg. | |
1061 Otherwise, remove any memory of the entire register and | |
1062 all its subregs from the table. */ | |
1063 if (REG_TICK (i) - REG_IN_TABLE (i) > 1 | |
1064 || SUBREG_TICKED (i) != REGNO (SUBREG_REG (x))) | |
1065 remove_invalid_refs (i); | |
1066 else | |
1067 remove_invalid_subreg_refs (i, SUBREG_BYTE (x), GET_MODE (x)); | |
1068 } | |
1069 | |
1070 REG_IN_TABLE (i) = REG_TICK (i); | |
1071 SUBREG_TICKED (i) = REGNO (SUBREG_REG (x)); | |
1072 return 0; | |
1073 } | |
1074 | |
1075 /* If X is a comparison or a COMPARE and either operand is a register | |
1076 that does not have a quantity, give it one. This is so that a later | |
1077 call to record_jump_equiv won't cause X to be assigned a different | |
1078 hash code and not found in the table after that call. | |
1079 | |
1080 It is not necessary to do this here, since rehash_using_reg can | |
1081 fix up the table later, but doing this here eliminates the need to | |
1082 call that expensive function in the most common case where the only | |
1083 use of the register is in the comparison. */ | |
1084 | |
1085 if (code == COMPARE || COMPARISON_P (x)) | |
1086 { | |
1087 if (REG_P (XEXP (x, 0)) | |
1088 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))) | |
1089 if (insert_regs (XEXP (x, 0), NULL, 0)) | |
1090 { | |
1091 rehash_using_reg (XEXP (x, 0)); | |
1092 changed = 1; | |
1093 } | |
1094 | |
1095 if (REG_P (XEXP (x, 1)) | |
1096 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1)))) | |
1097 if (insert_regs (XEXP (x, 1), NULL, 0)) | |
1098 { | |
1099 rehash_using_reg (XEXP (x, 1)); | |
1100 changed = 1; | |
1101 } | |
1102 } | |
1103 | |
1104 fmt = GET_RTX_FORMAT (code); | |
1105 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
1106 if (fmt[i] == 'e') | |
1107 changed |= mention_regs (XEXP (x, i)); | |
1108 else if (fmt[i] == 'E') | |
1109 for (j = 0; j < XVECLEN (x, i); j++) | |
1110 changed |= mention_regs (XVECEXP (x, i, j)); | |
1111 | |
1112 return changed; | |
1113 } | |
1114 | |
1115 /* Update the register quantities for inserting X into the hash table | |
1116 with a value equivalent to CLASSP. | |
1117 (If the class does not contain a REG, it is irrelevant.) | |
1118 If MODIFIED is nonzero, X is a destination; it is being modified. | |
1119 Note that delete_reg_equiv should be called on a register | |
1120 before insert_regs is done on that register with MODIFIED != 0. | |
1121 | |
1122 Nonzero value means that elements of reg_qty have changed | |
1123 so X's hash code may be different. */ | |
1124 | |
1125 static int | |
1126 insert_regs (rtx x, struct table_elt *classp, int modified) | |
1127 { | |
1128 if (REG_P (x)) | |
1129 { | |
1130 unsigned int regno = REGNO (x); | |
1131 int qty_valid; | |
1132 | |
1133 /* If REGNO is in the equivalence table already but is of the | |
1134 wrong mode for that equivalence, don't do anything here. */ | |
1135 | |
1136 qty_valid = REGNO_QTY_VALID_P (regno); | |
1137 if (qty_valid) | |
1138 { | |
1139 struct qty_table_elem *ent = &qty_table[REG_QTY (regno)]; | |
1140 | |
1141 if (ent->mode != GET_MODE (x)) | |
1142 return 0; | |
1143 } | |
1144 | |
1145 if (modified || ! qty_valid) | |
1146 { | |
1147 if (classp) | |
1148 for (classp = classp->first_same_value; | |
1149 classp != 0; | |
1150 classp = classp->next_same_value) | |
1151 if (REG_P (classp->exp) | |
1152 && GET_MODE (classp->exp) == GET_MODE (x)) | |
1153 { | |
1154 unsigned c_regno = REGNO (classp->exp); | |
1155 | |
1156 gcc_assert (REGNO_QTY_VALID_P (c_regno)); | |
1157 | |
1158 /* Suppose that 5 is hard reg and 100 and 101 are | |
1159 pseudos. Consider | |
1160 | |
1161 (set (reg:si 100) (reg:si 5)) | |
1162 (set (reg:si 5) (reg:si 100)) | |
1163 (set (reg:di 101) (reg:di 5)) | |
1164 | |
1165 We would now set REG_QTY (101) = REG_QTY (5), but the | |
1166 entry for 5 is in SImode. When we use this later in | |
1167 copy propagation, we get the register in wrong mode. */ | |
1168 if (qty_table[REG_QTY (c_regno)].mode != GET_MODE (x)) | |
1169 continue; | |
1170 | |
1171 make_regs_eqv (regno, c_regno); | |
1172 return 1; | |
1173 } | |
1174 | |
1175 /* Mention_regs for a SUBREG checks if REG_TICK is exactly one larger | |
1176 than REG_IN_TABLE to find out if there was only a single preceding | |
1177 invalidation - for the SUBREG - or another one, which would be | |
1178 for the full register. However, if we find here that REG_TICK | |
1179 indicates that the register is invalid, it means that it has | |
1180 been invalidated in a separate operation. The SUBREG might be used | |
1181 now (then this is a recursive call), or we might use the full REG | |
1182 now and a SUBREG of it later. So bump up REG_TICK so that | |
1183 mention_regs will do the right thing. */ | |
1184 if (! modified | |
1185 && REG_IN_TABLE (regno) >= 0 | |
1186 && REG_TICK (regno) == REG_IN_TABLE (regno) + 1) | |
1187 REG_TICK (regno)++; | |
1188 make_new_qty (regno, GET_MODE (x)); | |
1189 return 1; | |
1190 } | |
1191 | |
1192 return 0; | |
1193 } | |
1194 | |
1195 /* If X is a SUBREG, we will likely be inserting the inner register in the | |
1196 table. If that register doesn't have an assigned quantity number at | |
1197 this point but does later, the insertion that we will be doing now will | |
1198 not be accessible because its hash code will have changed. So assign | |
1199 a quantity number now. */ | |
1200 | |
1201 else if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)) | |
1202 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x)))) | |
1203 { | |
1204 insert_regs (SUBREG_REG (x), NULL, 0); | |
1205 mention_regs (x); | |
1206 return 1; | |
1207 } | |
1208 else | |
1209 return mention_regs (x); | |
1210 } | |
1211 | |
1212 /* Look in or update the hash table. */ | |
1213 | |
1214 /* Remove table element ELT from use in the table. | |
1215 HASH is its hash code, made using the HASH macro. | |
1216 It's an argument because often that is known in advance | |
1217 and we save much time not recomputing it. */ | |
1218 | |
1219 static void | |
1220 remove_from_table (struct table_elt *elt, unsigned int hash) | |
1221 { | |
1222 if (elt == 0) | |
1223 return; | |
1224 | |
1225 /* Mark this element as removed. See cse_insn. */ | |
1226 elt->first_same_value = 0; | |
1227 | |
1228 /* Remove the table element from its equivalence class. */ | |
1229 | |
1230 { | |
1231 struct table_elt *prev = elt->prev_same_value; | |
1232 struct table_elt *next = elt->next_same_value; | |
1233 | |
1234 if (next) | |
1235 next->prev_same_value = prev; | |
1236 | |
1237 if (prev) | |
1238 prev->next_same_value = next; | |
1239 else | |
1240 { | |
1241 struct table_elt *newfirst = next; | |
1242 while (next) | |
1243 { | |
1244 next->first_same_value = newfirst; | |
1245 next = next->next_same_value; | |
1246 } | |
1247 } | |
1248 } | |
1249 | |
1250 /* Remove the table element from its hash bucket. */ | |
1251 | |
1252 { | |
1253 struct table_elt *prev = elt->prev_same_hash; | |
1254 struct table_elt *next = elt->next_same_hash; | |
1255 | |
1256 if (next) | |
1257 next->prev_same_hash = prev; | |
1258 | |
1259 if (prev) | |
1260 prev->next_same_hash = next; | |
1261 else if (table[hash] == elt) | |
1262 table[hash] = next; | |
1263 else | |
1264 { | |
1265 /* This entry is not in the proper hash bucket. This can happen | |
1266 when two classes were merged by `merge_equiv_classes'. Search | |
1267 for the hash bucket that it heads. This happens only very | |
1268 rarely, so the cost is acceptable. */ | |
1269 for (hash = 0; hash < HASH_SIZE; hash++) | |
1270 if (table[hash] == elt) | |
1271 table[hash] = next; | |
1272 } | |
1273 } | |
1274 | |
1275 /* Remove the table element from its related-value circular chain. */ | |
1276 | |
1277 if (elt->related_value != 0 && elt->related_value != elt) | |
1278 { | |
1279 struct table_elt *p = elt->related_value; | |
1280 | |
1281 while (p->related_value != elt) | |
1282 p = p->related_value; | |
1283 p->related_value = elt->related_value; | |
1284 if (p->related_value == p) | |
1285 p->related_value = 0; | |
1286 } | |
1287 | |
1288 /* Now add it to the free element chain. */ | |
1289 elt->next_same_hash = free_element_chain; | |
1290 free_element_chain = elt; | |
1291 } | |
1292 | |
1293 /* Same as above, but X is a pseudo-register. */ | |
1294 | |
1295 static void | |
1296 remove_pseudo_from_table (rtx x, unsigned int hash) | |
1297 { | |
1298 struct table_elt *elt; | |
1299 | |
1300 /* Because a pseudo-register can be referenced in more than one | |
1301 mode, we might have to remove more than one table entry. */ | |
1302 while ((elt = lookup_for_remove (x, hash, VOIDmode))) | |
1303 remove_from_table (elt, hash); | |
1304 } | |
1305 | |
1306 /* Look up X in the hash table and return its table element, | |
1307 or 0 if X is not in the table. | |
1308 | |
1309 MODE is the machine-mode of X, or if X is an integer constant | |
1310 with VOIDmode then MODE is the mode with which X will be used. | |
1311 | |
1312 Here we are satisfied to find an expression whose tree structure | |
1313 looks like X. */ | |
1314 | |
1315 static struct table_elt * | |
1316 lookup (rtx x, unsigned int hash, enum machine_mode mode) | |
1317 { | |
1318 struct table_elt *p; | |
1319 | |
1320 for (p = table[hash]; p; p = p->next_same_hash) | |
1321 if (mode == p->mode && ((x == p->exp && REG_P (x)) | |
1322 || exp_equiv_p (x, p->exp, !REG_P (x), false))) | |
1323 return p; | |
1324 | |
1325 return 0; | |
1326 } | |
1327 | |
1328 /* Like `lookup' but don't care whether the table element uses invalid regs. | |
1329 Also ignore discrepancies in the machine mode of a register. */ | |
1330 | |
1331 static struct table_elt * | |
1332 lookup_for_remove (rtx x, unsigned int hash, enum machine_mode mode) | |
1333 { | |
1334 struct table_elt *p; | |
1335 | |
1336 if (REG_P (x)) | |
1337 { | |
1338 unsigned int regno = REGNO (x); | |
1339 | |
1340 /* Don't check the machine mode when comparing registers; | |
1341 invalidating (REG:SI 0) also invalidates (REG:DF 0). */ | |
1342 for (p = table[hash]; p; p = p->next_same_hash) | |
1343 if (REG_P (p->exp) | |
1344 && REGNO (p->exp) == regno) | |
1345 return p; | |
1346 } | |
1347 else | |
1348 { | |
1349 for (p = table[hash]; p; p = p->next_same_hash) | |
1350 if (mode == p->mode | |
1351 && (x == p->exp || exp_equiv_p (x, p->exp, 0, false))) | |
1352 return p; | |
1353 } | |
1354 | |
1355 return 0; | |
1356 } | |
1357 | |
1358 /* Look for an expression equivalent to X and with code CODE. | |
1359 If one is found, return that expression. */ | |
1360 | |
1361 static rtx | |
1362 lookup_as_function (rtx x, enum rtx_code code) | |
1363 { | |
1364 struct table_elt *p | |
1365 = lookup (x, SAFE_HASH (x, VOIDmode), GET_MODE (x)); | |
1366 | |
1367 if (p == 0) | |
1368 return 0; | |
1369 | |
1370 for (p = p->first_same_value; p; p = p->next_same_value) | |
1371 if (GET_CODE (p->exp) == code | |
1372 /* Make sure this is a valid entry in the table. */ | |
1373 && exp_equiv_p (p->exp, p->exp, 1, false)) | |
1374 return p->exp; | |
1375 | |
1376 return 0; | |
1377 } | |
1378 | |
1379 /* Insert X in the hash table, assuming HASH is its hash code | |
1380 and CLASSP is an element of the class it should go in | |
1381 (or 0 if a new class should be made). | |
1382 It is inserted at the proper position to keep the class in | |
1383 the order cheapest first. | |
1384 | |
1385 MODE is the machine-mode of X, or if X is an integer constant | |
1386 with VOIDmode then MODE is the mode with which X will be used. | |
1387 | |
1388 For elements of equal cheapness, the most recent one | |
1389 goes in front, except that the first element in the list | |
1390 remains first unless a cheaper element is added. The order of | |
1391 pseudo-registers does not matter, as canon_reg will be called to | |
1392 find the cheapest when a register is retrieved from the table. | |
1393 | |
1394 The in_memory field in the hash table element is set to 0. | |
1395 The caller must set it nonzero if appropriate. | |
1396 | |
1397 You should call insert_regs (X, CLASSP, MODIFY) before calling here, | |
1398 and if insert_regs returns a nonzero value | |
1399 you must then recompute its hash code before calling here. | |
1400 | |
1401 If necessary, update table showing constant values of quantities. */ | |
1402 | |
1403 #define CHEAPER(X, Y) \ | |
1404 (preferable ((X)->cost, (X)->regcost, (Y)->cost, (Y)->regcost) < 0) | |
1405 | |
1406 static struct table_elt * | |
1407 insert (rtx x, struct table_elt *classp, unsigned int hash, enum machine_mode mode) | |
1408 { | |
1409 struct table_elt *elt; | |
1410 | |
1411 /* If X is a register and we haven't made a quantity for it, | |
1412 something is wrong. */ | |
1413 gcc_assert (!REG_P (x) || REGNO_QTY_VALID_P (REGNO (x))); | |
1414 | |
1415 /* If X is a hard register, show it is being put in the table. */ | |
1416 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) | |
1417 add_to_hard_reg_set (&hard_regs_in_table, GET_MODE (x), REGNO (x)); | |
1418 | |
1419 /* Put an element for X into the right hash bucket. */ | |
1420 | |
1421 elt = free_element_chain; | |
1422 if (elt) | |
1423 free_element_chain = elt->next_same_hash; | |
1424 else | |
1425 elt = XNEW (struct table_elt); | |
1426 | |
1427 elt->exp = x; | |
1428 elt->canon_exp = NULL_RTX; | |
1429 elt->cost = COST (x); | |
1430 elt->regcost = approx_reg_cost (x); | |
1431 elt->next_same_value = 0; | |
1432 elt->prev_same_value = 0; | |
1433 elt->next_same_hash = table[hash]; | |
1434 elt->prev_same_hash = 0; | |
1435 elt->related_value = 0; | |
1436 elt->in_memory = 0; | |
1437 elt->mode = mode; | |
1438 elt->is_const = (CONSTANT_P (x) || fixed_base_plus_p (x)); | |
1439 | |
1440 if (table[hash]) | |
1441 table[hash]->prev_same_hash = elt; | |
1442 table[hash] = elt; | |
1443 | |
1444 /* Put it into the proper value-class. */ | |
1445 if (classp) | |
1446 { | |
1447 classp = classp->first_same_value; | |
1448 if (CHEAPER (elt, classp)) | |
1449 /* Insert at the head of the class. */ | |
1450 { | |
1451 struct table_elt *p; | |
1452 elt->next_same_value = classp; | |
1453 classp->prev_same_value = elt; | |
1454 elt->first_same_value = elt; | |
1455 | |
1456 for (p = classp; p; p = p->next_same_value) | |
1457 p->first_same_value = elt; | |
1458 } | |
1459 else | |
1460 { | |
1461 /* Insert not at head of the class. */ | |
1462 /* Put it after the last element cheaper than X. */ | |
1463 struct table_elt *p, *next; | |
1464 | |
1465 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt); | |
1466 p = next); | |
1467 | |
1468 /* Put it after P and before NEXT. */ | |
1469 elt->next_same_value = next; | |
1470 if (next) | |
1471 next->prev_same_value = elt; | |
1472 | |
1473 elt->prev_same_value = p; | |
1474 p->next_same_value = elt; | |
1475 elt->first_same_value = classp; | |
1476 } | |
1477 } | |
1478 else | |
1479 elt->first_same_value = elt; | |
1480 | |
1481 /* If this is a constant being set equivalent to a register or a register | |
1482 being set equivalent to a constant, note the constant equivalence. | |
1483 | |
1484 If this is a constant, it cannot be equivalent to a different constant, | |
1485 and a constant is the only thing that can be cheaper than a register. So | |
1486 we know the register is the head of the class (before the constant was | |
1487 inserted). | |
1488 | |
1489 If this is a register that is not already known equivalent to a | |
1490 constant, we must check the entire class. | |
1491 | |
1492 If this is a register that is already known equivalent to an insn, | |
1493 update the qtys `const_insn' to show that `this_insn' is the latest | |
1494 insn making that quantity equivalent to the constant. */ | |
1495 | |
1496 if (elt->is_const && classp && REG_P (classp->exp) | |
1497 && !REG_P (x)) | |
1498 { | |
1499 int exp_q = REG_QTY (REGNO (classp->exp)); | |
1500 struct qty_table_elem *exp_ent = &qty_table[exp_q]; | |
1501 | |
1502 exp_ent->const_rtx = gen_lowpart (exp_ent->mode, x); | |
1503 exp_ent->const_insn = this_insn; | |
1504 } | |
1505 | |
1506 else if (REG_P (x) | |
1507 && classp | |
1508 && ! qty_table[REG_QTY (REGNO (x))].const_rtx | |
1509 && ! elt->is_const) | |
1510 { | |
1511 struct table_elt *p; | |
1512 | |
1513 for (p = classp; p != 0; p = p->next_same_value) | |
1514 { | |
1515 if (p->is_const && !REG_P (p->exp)) | |
1516 { | |
1517 int x_q = REG_QTY (REGNO (x)); | |
1518 struct qty_table_elem *x_ent = &qty_table[x_q]; | |
1519 | |
1520 x_ent->const_rtx | |
1521 = gen_lowpart (GET_MODE (x), p->exp); | |
1522 x_ent->const_insn = this_insn; | |
1523 break; | |
1524 } | |
1525 } | |
1526 } | |
1527 | |
1528 else if (REG_P (x) | |
1529 && qty_table[REG_QTY (REGNO (x))].const_rtx | |
1530 && GET_MODE (x) == qty_table[REG_QTY (REGNO (x))].mode) | |
1531 qty_table[REG_QTY (REGNO (x))].const_insn = this_insn; | |
1532 | |
1533 /* If this is a constant with symbolic value, | |
1534 and it has a term with an explicit integer value, | |
1535 link it up with related expressions. */ | |
1536 if (GET_CODE (x) == CONST) | |
1537 { | |
1538 rtx subexp = get_related_value (x); | |
1539 unsigned subhash; | |
1540 struct table_elt *subelt, *subelt_prev; | |
1541 | |
1542 if (subexp != 0) | |
1543 { | |
1544 /* Get the integer-free subexpression in the hash table. */ | |
1545 subhash = SAFE_HASH (subexp, mode); | |
1546 subelt = lookup (subexp, subhash, mode); | |
1547 if (subelt == 0) | |
1548 subelt = insert (subexp, NULL, subhash, mode); | |
1549 /* Initialize SUBELT's circular chain if it has none. */ | |
1550 if (subelt->related_value == 0) | |
1551 subelt->related_value = subelt; | |
1552 /* Find the element in the circular chain that precedes SUBELT. */ | |
1553 subelt_prev = subelt; | |
1554 while (subelt_prev->related_value != subelt) | |
1555 subelt_prev = subelt_prev->related_value; | |
1556 /* Put new ELT into SUBELT's circular chain just before SUBELT. | |
1557 This way the element that follows SUBELT is the oldest one. */ | |
1558 elt->related_value = subelt_prev->related_value; | |
1559 subelt_prev->related_value = elt; | |
1560 } | |
1561 } | |
1562 | |
1563 return elt; | |
1564 } | |
1565 | |
1566 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from | |
1567 CLASS2 into CLASS1. This is done when we have reached an insn which makes | |
1568 the two classes equivalent. | |
1569 | |
1570 CLASS1 will be the surviving class; CLASS2 should not be used after this | |
1571 call. | |
1572 | |
1573 Any invalid entries in CLASS2 will not be copied. */ | |
1574 | |
1575 static void | |
1576 merge_equiv_classes (struct table_elt *class1, struct table_elt *class2) | |
1577 { | |
1578 struct table_elt *elt, *next, *new_elt; | |
1579 | |
1580 /* Ensure we start with the head of the classes. */ | |
1581 class1 = class1->first_same_value; | |
1582 class2 = class2->first_same_value; | |
1583 | |
1584 /* If they were already equal, forget it. */ | |
1585 if (class1 == class2) | |
1586 return; | |
1587 | |
1588 for (elt = class2; elt; elt = next) | |
1589 { | |
1590 unsigned int hash; | |
1591 rtx exp = elt->exp; | |
1592 enum machine_mode mode = elt->mode; | |
1593 | |
1594 next = elt->next_same_value; | |
1595 | |
1596 /* Remove old entry, make a new one in CLASS1's class. | |
1597 Don't do this for invalid entries as we cannot find their | |
1598 hash code (it also isn't necessary). */ | |
1599 if (REG_P (exp) || exp_equiv_p (exp, exp, 1, false)) | |
1600 { | |
1601 bool need_rehash = false; | |
1602 | |
1603 hash_arg_in_memory = 0; | |
1604 hash = HASH (exp, mode); | |
1605 | |
1606 if (REG_P (exp)) | |
1607 { | |
1608 need_rehash = REGNO_QTY_VALID_P (REGNO (exp)); | |
1609 delete_reg_equiv (REGNO (exp)); | |
1610 } | |
1611 | |
1612 if (REG_P (exp) && REGNO (exp) >= FIRST_PSEUDO_REGISTER) | |
1613 remove_pseudo_from_table (exp, hash); | |
1614 else | |
1615 remove_from_table (elt, hash); | |
1616 | |
1617 if (insert_regs (exp, class1, 0) || need_rehash) | |
1618 { | |
1619 rehash_using_reg (exp); | |
1620 hash = HASH (exp, mode); | |
1621 } | |
1622 new_elt = insert (exp, class1, hash, mode); | |
1623 new_elt->in_memory = hash_arg_in_memory; | |
1624 } | |
1625 } | |
1626 } | |
1627 | |
1628 /* Flush the entire hash table. */ | |
1629 | |
1630 static void | |
1631 flush_hash_table (void) | |
1632 { | |
1633 int i; | |
1634 struct table_elt *p; | |
1635 | |
1636 for (i = 0; i < HASH_SIZE; i++) | |
1637 for (p = table[i]; p; p = table[i]) | |
1638 { | |
1639 /* Note that invalidate can remove elements | |
1640 after P in the current hash chain. */ | |
1641 if (REG_P (p->exp)) | |
1642 invalidate (p->exp, VOIDmode); | |
1643 else | |
1644 remove_from_table (p, i); | |
1645 } | |
1646 } | |
1647 | |
1648 /* Function called for each rtx to check whether true dependence exist. */ | |
1649 struct check_dependence_data | |
1650 { | |
1651 enum machine_mode mode; | |
1652 rtx exp; | |
1653 rtx addr; | |
1654 }; | |
1655 | |
1656 static int | |
1657 check_dependence (rtx *x, void *data) | |
1658 { | |
1659 struct check_dependence_data *d = (struct check_dependence_data *) data; | |
1660 if (*x && MEM_P (*x)) | |
1661 return canon_true_dependence (d->exp, d->mode, d->addr, *x, | |
1662 cse_rtx_varies_p); | |
1663 else | |
1664 return 0; | |
1665 } | |
1666 | |
1667 /* Remove from the hash table, or mark as invalid, all expressions whose | |
1668 values could be altered by storing in X. X is a register, a subreg, or | |
1669 a memory reference with nonvarying address (because, when a memory | |
1670 reference with a varying address is stored in, all memory references are | |
1671 removed by invalidate_memory so specific invalidation is superfluous). | |
1672 FULL_MODE, if not VOIDmode, indicates that this much should be | |
1673 invalidated instead of just the amount indicated by the mode of X. This | |
1674 is only used for bitfield stores into memory. | |
1675 | |
1676 A nonvarying address may be just a register or just a symbol reference, | |
1677 or it may be either of those plus a numeric offset. */ | |
1678 | |
1679 static void | |
1680 invalidate (rtx x, enum machine_mode full_mode) | |
1681 { | |
1682 int i; | |
1683 struct table_elt *p; | |
1684 rtx addr; | |
1685 | |
1686 switch (GET_CODE (x)) | |
1687 { | |
1688 case REG: | |
1689 { | |
1690 /* If X is a register, dependencies on its contents are recorded | |
1691 through the qty number mechanism. Just change the qty number of | |
1692 the register, mark it as invalid for expressions that refer to it, | |
1693 and remove it itself. */ | |
1694 unsigned int regno = REGNO (x); | |
1695 unsigned int hash = HASH (x, GET_MODE (x)); | |
1696 | |
1697 /* Remove REGNO from any quantity list it might be on and indicate | |
1698 that its value might have changed. If it is a pseudo, remove its | |
1699 entry from the hash table. | |
1700 | |
1701 For a hard register, we do the first two actions above for any | |
1702 additional hard registers corresponding to X. Then, if any of these | |
1703 registers are in the table, we must remove any REG entries that | |
1704 overlap these registers. */ | |
1705 | |
1706 delete_reg_equiv (regno); | |
1707 REG_TICK (regno)++; | |
1708 SUBREG_TICKED (regno) = -1; | |
1709 | |
1710 if (regno >= FIRST_PSEUDO_REGISTER) | |
1711 remove_pseudo_from_table (x, hash); | |
1712 else | |
1713 { | |
1714 HOST_WIDE_INT in_table | |
1715 = TEST_HARD_REG_BIT (hard_regs_in_table, regno); | |
1716 unsigned int endregno = END_HARD_REGNO (x); | |
1717 unsigned int tregno, tendregno, rn; | |
1718 struct table_elt *p, *next; | |
1719 | |
1720 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno); | |
1721 | |
1722 for (rn = regno + 1; rn < endregno; rn++) | |
1723 { | |
1724 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, rn); | |
1725 CLEAR_HARD_REG_BIT (hard_regs_in_table, rn); | |
1726 delete_reg_equiv (rn); | |
1727 REG_TICK (rn)++; | |
1728 SUBREG_TICKED (rn) = -1; | |
1729 } | |
1730 | |
1731 if (in_table) | |
1732 for (hash = 0; hash < HASH_SIZE; hash++) | |
1733 for (p = table[hash]; p; p = next) | |
1734 { | |
1735 next = p->next_same_hash; | |
1736 | |
1737 if (!REG_P (p->exp) | |
1738 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER) | |
1739 continue; | |
1740 | |
1741 tregno = REGNO (p->exp); | |
1742 tendregno = END_HARD_REGNO (p->exp); | |
1743 if (tendregno > regno && tregno < endregno) | |
1744 remove_from_table (p, hash); | |
1745 } | |
1746 } | |
1747 } | |
1748 return; | |
1749 | |
1750 case SUBREG: | |
1751 invalidate (SUBREG_REG (x), VOIDmode); | |
1752 return; | |
1753 | |
1754 case PARALLEL: | |
1755 for (i = XVECLEN (x, 0) - 1; i >= 0; --i) | |
1756 invalidate (XVECEXP (x, 0, i), VOIDmode); | |
1757 return; | |
1758 | |
1759 case EXPR_LIST: | |
1760 /* This is part of a disjoint return value; extract the location in | |
1761 question ignoring the offset. */ | |
1762 invalidate (XEXP (x, 0), VOIDmode); | |
1763 return; | |
1764 | |
1765 case MEM: | |
1766 addr = canon_rtx (get_addr (XEXP (x, 0))); | |
1767 /* Calculate the canonical version of X here so that | |
1768 true_dependence doesn't generate new RTL for X on each call. */ | |
1769 x = canon_rtx (x); | |
1770 | |
1771 /* Remove all hash table elements that refer to overlapping pieces of | |
1772 memory. */ | |
1773 if (full_mode == VOIDmode) | |
1774 full_mode = GET_MODE (x); | |
1775 | |
1776 for (i = 0; i < HASH_SIZE; i++) | |
1777 { | |
1778 struct table_elt *next; | |
1779 | |
1780 for (p = table[i]; p; p = next) | |
1781 { | |
1782 next = p->next_same_hash; | |
1783 if (p->in_memory) | |
1784 { | |
1785 struct check_dependence_data d; | |
1786 | |
1787 /* Just canonicalize the expression once; | |
1788 otherwise each time we call invalidate | |
1789 true_dependence will canonicalize the | |
1790 expression again. */ | |
1791 if (!p->canon_exp) | |
1792 p->canon_exp = canon_rtx (p->exp); | |
1793 d.exp = x; | |
1794 d.addr = addr; | |
1795 d.mode = full_mode; | |
1796 if (for_each_rtx (&p->canon_exp, check_dependence, &d)) | |
1797 remove_from_table (p, i); | |
1798 } | |
1799 } | |
1800 } | |
1801 return; | |
1802 | |
1803 default: | |
1804 gcc_unreachable (); | |
1805 } | |
1806 } | |
1807 | |
1808 /* Remove all expressions that refer to register REGNO, | |
1809 since they are already invalid, and we are about to | |
1810 mark that register valid again and don't want the old | |
1811 expressions to reappear as valid. */ | |
1812 | |
1813 static void | |
1814 remove_invalid_refs (unsigned int regno) | |
1815 { | |
1816 unsigned int i; | |
1817 struct table_elt *p, *next; | |
1818 | |
1819 for (i = 0; i < HASH_SIZE; i++) | |
1820 for (p = table[i]; p; p = next) | |
1821 { | |
1822 next = p->next_same_hash; | |
1823 if (!REG_P (p->exp) | |
1824 && refers_to_regno_p (regno, regno + 1, p->exp, (rtx *) 0)) | |
1825 remove_from_table (p, i); | |
1826 } | |
1827 } | |
1828 | |
1829 /* Likewise for a subreg with subreg_reg REGNO, subreg_byte OFFSET, | |
1830 and mode MODE. */ | |
1831 static void | |
1832 remove_invalid_subreg_refs (unsigned int regno, unsigned int offset, | |
1833 enum machine_mode mode) | |
1834 { | |
1835 unsigned int i; | |
1836 struct table_elt *p, *next; | |
1837 unsigned int end = offset + (GET_MODE_SIZE (mode) - 1); | |
1838 | |
1839 for (i = 0; i < HASH_SIZE; i++) | |
1840 for (p = table[i]; p; p = next) | |
1841 { | |
1842 rtx exp = p->exp; | |
1843 next = p->next_same_hash; | |
1844 | |
1845 if (!REG_P (exp) | |
1846 && (GET_CODE (exp) != SUBREG | |
1847 || !REG_P (SUBREG_REG (exp)) | |
1848 || REGNO (SUBREG_REG (exp)) != regno | |
1849 || (((SUBREG_BYTE (exp) | |
1850 + (GET_MODE_SIZE (GET_MODE (exp)) - 1)) >= offset) | |
1851 && SUBREG_BYTE (exp) <= end)) | |
1852 && refers_to_regno_p (regno, regno + 1, p->exp, (rtx *) 0)) | |
1853 remove_from_table (p, i); | |
1854 } | |
1855 } | |
1856 | |
1857 /* Recompute the hash codes of any valid entries in the hash table that | |
1858 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG. | |
1859 | |
1860 This is called when we make a jump equivalence. */ | |
1861 | |
1862 static void | |
1863 rehash_using_reg (rtx x) | |
1864 { | |
1865 unsigned int i; | |
1866 struct table_elt *p, *next; | |
1867 unsigned hash; | |
1868 | |
1869 if (GET_CODE (x) == SUBREG) | |
1870 x = SUBREG_REG (x); | |
1871 | |
1872 /* If X is not a register or if the register is known not to be in any | |
1873 valid entries in the table, we have no work to do. */ | |
1874 | |
1875 if (!REG_P (x) | |
1876 || REG_IN_TABLE (REGNO (x)) < 0 | |
1877 || REG_IN_TABLE (REGNO (x)) != REG_TICK (REGNO (x))) | |
1878 return; | |
1879 | |
1880 /* Scan all hash chains looking for valid entries that mention X. | |
1881 If we find one and it is in the wrong hash chain, move it. */ | |
1882 | |
1883 for (i = 0; i < HASH_SIZE; i++) | |
1884 for (p = table[i]; p; p = next) | |
1885 { | |
1886 next = p->next_same_hash; | |
1887 if (reg_mentioned_p (x, p->exp) | |
1888 && exp_equiv_p (p->exp, p->exp, 1, false) | |
1889 && i != (hash = SAFE_HASH (p->exp, p->mode))) | |
1890 { | |
1891 if (p->next_same_hash) | |
1892 p->next_same_hash->prev_same_hash = p->prev_same_hash; | |
1893 | |
1894 if (p->prev_same_hash) | |
1895 p->prev_same_hash->next_same_hash = p->next_same_hash; | |
1896 else | |
1897 table[i] = p->next_same_hash; | |
1898 | |
1899 p->next_same_hash = table[hash]; | |
1900 p->prev_same_hash = 0; | |
1901 if (table[hash]) | |
1902 table[hash]->prev_same_hash = p; | |
1903 table[hash] = p; | |
1904 } | |
1905 } | |
1906 } | |
1907 | |
1908 /* Remove from the hash table any expression that is a call-clobbered | |
1909 register. Also update their TICK values. */ | |
1910 | |
1911 static void | |
1912 invalidate_for_call (void) | |
1913 { | |
1914 unsigned int regno, endregno; | |
1915 unsigned int i; | |
1916 unsigned hash; | |
1917 struct table_elt *p, *next; | |
1918 int in_table = 0; | |
1919 | |
1920 /* Go through all the hard registers. For each that is clobbered in | |
1921 a CALL_INSN, remove the register from quantity chains and update | |
1922 reg_tick if defined. Also see if any of these registers is currently | |
1923 in the table. */ | |
1924 | |
1925 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) | |
1926 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) | |
1927 { | |
1928 delete_reg_equiv (regno); | |
1929 if (REG_TICK (regno) >= 0) | |
1930 { | |
1931 REG_TICK (regno)++; | |
1932 SUBREG_TICKED (regno) = -1; | |
1933 } | |
1934 | |
1935 in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0); | |
1936 } | |
1937 | |
1938 /* In the case where we have no call-clobbered hard registers in the | |
1939 table, we are done. Otherwise, scan the table and remove any | |
1940 entry that overlaps a call-clobbered register. */ | |
1941 | |
1942 if (in_table) | |
1943 for (hash = 0; hash < HASH_SIZE; hash++) | |
1944 for (p = table[hash]; p; p = next) | |
1945 { | |
1946 next = p->next_same_hash; | |
1947 | |
1948 if (!REG_P (p->exp) | |
1949 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER) | |
1950 continue; | |
1951 | |
1952 regno = REGNO (p->exp); | |
1953 endregno = END_HARD_REGNO (p->exp); | |
1954 | |
1955 for (i = regno; i < endregno; i++) | |
1956 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) | |
1957 { | |
1958 remove_from_table (p, hash); | |
1959 break; | |
1960 } | |
1961 } | |
1962 } | |
1963 | |
1964 /* Given an expression X of type CONST, | |
1965 and ELT which is its table entry (or 0 if it | |
1966 is not in the hash table), | |
1967 return an alternate expression for X as a register plus integer. | |
1968 If none can be found, return 0. */ | |
1969 | |
1970 static rtx | |
1971 use_related_value (rtx x, struct table_elt *elt) | |
1972 { | |
1973 struct table_elt *relt = 0; | |
1974 struct table_elt *p, *q; | |
1975 HOST_WIDE_INT offset; | |
1976 | |
1977 /* First, is there anything related known? | |
1978 If we have a table element, we can tell from that. | |
1979 Otherwise, must look it up. */ | |
1980 | |
1981 if (elt != 0 && elt->related_value != 0) | |
1982 relt = elt; | |
1983 else if (elt == 0 && GET_CODE (x) == CONST) | |
1984 { | |
1985 rtx subexp = get_related_value (x); | |
1986 if (subexp != 0) | |
1987 relt = lookup (subexp, | |
1988 SAFE_HASH (subexp, GET_MODE (subexp)), | |
1989 GET_MODE (subexp)); | |
1990 } | |
1991 | |
1992 if (relt == 0) | |
1993 return 0; | |
1994 | |
1995 /* Search all related table entries for one that has an | |
1996 equivalent register. */ | |
1997 | |
1998 p = relt; | |
1999 while (1) | |
2000 { | |
2001 /* This loop is strange in that it is executed in two different cases. | |
2002 The first is when X is already in the table. Then it is searching | |
2003 the RELATED_VALUE list of X's class (RELT). The second case is when | |
2004 X is not in the table. Then RELT points to a class for the related | |
2005 value. | |
2006 | |
2007 Ensure that, whatever case we are in, that we ignore classes that have | |
2008 the same value as X. */ | |
2009 | |
2010 if (rtx_equal_p (x, p->exp)) | |
2011 q = 0; | |
2012 else | |
2013 for (q = p->first_same_value; q; q = q->next_same_value) | |
2014 if (REG_P (q->exp)) | |
2015 break; | |
2016 | |
2017 if (q) | |
2018 break; | |
2019 | |
2020 p = p->related_value; | |
2021 | |
2022 /* We went all the way around, so there is nothing to be found. | |
2023 Alternatively, perhaps RELT was in the table for some other reason | |
2024 and it has no related values recorded. */ | |
2025 if (p == relt || p == 0) | |
2026 break; | |
2027 } | |
2028 | |
2029 if (q == 0) | |
2030 return 0; | |
2031 | |
2032 offset = (get_integer_term (x) - get_integer_term (p->exp)); | |
2033 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */ | |
2034 return plus_constant (q->exp, offset); | |
2035 } | |
2036 | |
2037 | |
2038 /* Hash a string. Just add its bytes up. */ | |
2039 static inline unsigned | |
2040 hash_rtx_string (const char *ps) | |
2041 { | |
2042 unsigned hash = 0; | |
2043 const unsigned char *p = (const unsigned char *) ps; | |
2044 | |
2045 if (p) | |
2046 while (*p) | |
2047 hash += *p++; | |
2048 | |
2049 return hash; | |
2050 } | |
2051 | |
2052 /* Same as hash_rtx, but call CB on each rtx if it is not NULL. | |
2053 When the callback returns true, we continue with the new rtx. */ | |
2054 | |
2055 unsigned | |
2056 hash_rtx_cb (const_rtx x, enum machine_mode mode, | |
2057 int *do_not_record_p, int *hash_arg_in_memory_p, | |
2058 bool have_reg_qty, hash_rtx_callback_function cb) | |
2059 { | |
2060 int i, j; | |
2061 unsigned hash = 0; | |
2062 enum rtx_code code; | |
2063 const char *fmt; | |
2064 enum machine_mode newmode; | |
2065 rtx newx; | |
2066 | |
2067 /* Used to turn recursion into iteration. We can't rely on GCC's | |
2068 tail-recursion elimination since we need to keep accumulating values | |
2069 in HASH. */ | |
2070 repeat: | |
2071 if (x == 0) | |
2072 return hash; | |
2073 | |
2074 /* Invoke the callback first. */ | |
2075 if (cb != NULL | |
2076 && ((*cb) (x, mode, &newx, &newmode))) | |
2077 { | |
2078 hash += hash_rtx_cb (newx, newmode, do_not_record_p, | |
2079 hash_arg_in_memory_p, have_reg_qty, cb); | |
2080 return hash; | |
2081 } | |
2082 | |
2083 code = GET_CODE (x); | |
2084 switch (code) | |
2085 { | |
2086 case REG: | |
2087 { | |
2088 unsigned int regno = REGNO (x); | |
2089 | |
2090 if (do_not_record_p && !reload_completed) | |
2091 { | |
2092 /* On some machines, we can't record any non-fixed hard register, | |
2093 because extending its life will cause reload problems. We | |
2094 consider ap, fp, sp, gp to be fixed for this purpose. | |
2095 | |
2096 We also consider CCmode registers to be fixed for this purpose; | |
2097 failure to do so leads to failure to simplify 0<100 type of | |
2098 conditionals. | |
2099 | |
2100 On all machines, we can't record any global registers. | |
2101 Nor should we record any register that is in a small | |
2102 class, as defined by CLASS_LIKELY_SPILLED_P. */ | |
2103 bool record; | |
2104 | |
2105 if (regno >= FIRST_PSEUDO_REGISTER) | |
2106 record = true; | |
2107 else if (x == frame_pointer_rtx | |
2108 || x == hard_frame_pointer_rtx | |
2109 || x == arg_pointer_rtx | |
2110 || x == stack_pointer_rtx | |
2111 || x == pic_offset_table_rtx) | |
2112 record = true; | |
2113 else if (global_regs[regno]) | |
2114 record = false; | |
2115 else if (fixed_regs[regno]) | |
2116 record = true; | |
2117 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC) | |
2118 record = true; | |
2119 else if (SMALL_REGISTER_CLASSES) | |
2120 record = false; | |
2121 else if (CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (regno))) | |
2122 record = false; | |
2123 else | |
2124 record = true; | |
2125 | |
2126 if (!record) | |
2127 { | |
2128 *do_not_record_p = 1; | |
2129 return 0; | |
2130 } | |
2131 } | |
2132 | |
2133 hash += ((unsigned int) REG << 7); | |
2134 hash += (have_reg_qty ? (unsigned) REG_QTY (regno) : regno); | |
2135 return hash; | |
2136 } | |
2137 | |
2138 /* We handle SUBREG of a REG specially because the underlying | |
2139 reg changes its hash value with every value change; we don't | |
2140 want to have to forget unrelated subregs when one subreg changes. */ | |
2141 case SUBREG: | |
2142 { | |
2143 if (REG_P (SUBREG_REG (x))) | |
2144 { | |
2145 hash += (((unsigned int) SUBREG << 7) | |
2146 + REGNO (SUBREG_REG (x)) | |
2147 + (SUBREG_BYTE (x) / UNITS_PER_WORD)); | |
2148 return hash; | |
2149 } | |
2150 break; | |
2151 } | |
2152 | |
2153 case CONST_INT: | |
2154 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode | |
2155 + (unsigned int) INTVAL (x)); | |
2156 return hash; | |
2157 | |
2158 case CONST_DOUBLE: | |
2159 /* This is like the general case, except that it only counts | |
2160 the integers representing the constant. */ | |
2161 hash += (unsigned int) code + (unsigned int) GET_MODE (x); | |
2162 if (GET_MODE (x) != VOIDmode) | |
2163 hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); | |
2164 else | |
2165 hash += ((unsigned int) CONST_DOUBLE_LOW (x) | |
2166 + (unsigned int) CONST_DOUBLE_HIGH (x)); | |
2167 return hash; | |
2168 | |
2169 case CONST_FIXED: | |
2170 hash += (unsigned int) code + (unsigned int) GET_MODE (x); | |
2171 hash += fixed_hash (CONST_FIXED_VALUE (x)); | |
2172 return hash; | |
2173 | |
2174 case CONST_VECTOR: | |
2175 { | |
2176 int units; | |
2177 rtx elt; | |
2178 | |
2179 units = CONST_VECTOR_NUNITS (x); | |
2180 | |
2181 for (i = 0; i < units; ++i) | |
2182 { | |
2183 elt = CONST_VECTOR_ELT (x, i); | |
2184 hash += hash_rtx_cb (elt, GET_MODE (elt), | |
2185 do_not_record_p, hash_arg_in_memory_p, | |
2186 have_reg_qty, cb); | |
2187 } | |
2188 | |
2189 return hash; | |
2190 } | |
2191 | |
2192 /* Assume there is only one rtx object for any given label. */ | |
2193 case LABEL_REF: | |
2194 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap | |
2195 differences and differences between each stage's debugging dumps. */ | |
2196 hash += (((unsigned int) LABEL_REF << 7) | |
2197 + CODE_LABEL_NUMBER (XEXP (x, 0))); | |
2198 return hash; | |
2199 | |
2200 case SYMBOL_REF: | |
2201 { | |
2202 /* Don't hash on the symbol's address to avoid bootstrap differences. | |
2203 Different hash values may cause expressions to be recorded in | |
2204 different orders and thus different registers to be used in the | |
2205 final assembler. This also avoids differences in the dump files | |
2206 between various stages. */ | |
2207 unsigned int h = 0; | |
2208 const unsigned char *p = (const unsigned char *) XSTR (x, 0); | |
2209 | |
2210 while (*p) | |
2211 h += (h << 7) + *p++; /* ??? revisit */ | |
2212 | |
2213 hash += ((unsigned int) SYMBOL_REF << 7) + h; | |
2214 return hash; | |
2215 } | |
2216 | |
2217 case MEM: | |
2218 /* We don't record if marked volatile or if BLKmode since we don't | |
2219 know the size of the move. */ | |
2220 if (do_not_record_p && (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode)) | |
2221 { | |
2222 *do_not_record_p = 1; | |
2223 return 0; | |
2224 } | |
2225 if (hash_arg_in_memory_p && !MEM_READONLY_P (x)) | |
2226 *hash_arg_in_memory_p = 1; | |
2227 | |
2228 /* Now that we have already found this special case, | |
2229 might as well speed it up as much as possible. */ | |
2230 hash += (unsigned) MEM; | |
2231 x = XEXP (x, 0); | |
2232 goto repeat; | |
2233 | |
2234 case USE: | |
2235 /* A USE that mentions non-volatile memory needs special | |
2236 handling since the MEM may be BLKmode which normally | |
2237 prevents an entry from being made. Pure calls are | |
2238 marked by a USE which mentions BLKmode memory. | |
2239 See calls.c:emit_call_1. */ | |
2240 if (MEM_P (XEXP (x, 0)) | |
2241 && ! MEM_VOLATILE_P (XEXP (x, 0))) | |
2242 { | |
2243 hash += (unsigned) USE; | |
2244 x = XEXP (x, 0); | |
2245 | |
2246 if (hash_arg_in_memory_p && !MEM_READONLY_P (x)) | |
2247 *hash_arg_in_memory_p = 1; | |
2248 | |
2249 /* Now that we have already found this special case, | |
2250 might as well speed it up as much as possible. */ | |
2251 hash += (unsigned) MEM; | |
2252 x = XEXP (x, 0); | |
2253 goto repeat; | |
2254 } | |
2255 break; | |
2256 | |
2257 case PRE_DEC: | |
2258 case PRE_INC: | |
2259 case POST_DEC: | |
2260 case POST_INC: | |
2261 case PRE_MODIFY: | |
2262 case POST_MODIFY: | |
2263 case PC: | |
2264 case CC0: | |
2265 case CALL: | |
2266 case UNSPEC_VOLATILE: | |
2267 if (do_not_record_p) { | |
2268 *do_not_record_p = 1; | |
2269 return 0; | |
2270 } | |
2271 else | |
2272 return hash; | |
2273 break; | |
2274 | |
2275 case ASM_OPERANDS: | |
2276 if (do_not_record_p && MEM_VOLATILE_P (x)) | |
2277 { | |
2278 *do_not_record_p = 1; | |
2279 return 0; | |
2280 } | |
2281 else | |
2282 { | |
2283 /* We don't want to take the filename and line into account. */ | |
2284 hash += (unsigned) code + (unsigned) GET_MODE (x) | |
2285 + hash_rtx_string (ASM_OPERANDS_TEMPLATE (x)) | |
2286 + hash_rtx_string (ASM_OPERANDS_OUTPUT_CONSTRAINT (x)) | |
2287 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x); | |
2288 | |
2289 if (ASM_OPERANDS_INPUT_LENGTH (x)) | |
2290 { | |
2291 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++) | |
2292 { | |
2293 hash += (hash_rtx_cb (ASM_OPERANDS_INPUT (x, i), | |
2294 GET_MODE (ASM_OPERANDS_INPUT (x, i)), | |
2295 do_not_record_p, hash_arg_in_memory_p, | |
2296 have_reg_qty, cb) | |
2297 + hash_rtx_string | |
2298 (ASM_OPERANDS_INPUT_CONSTRAINT (x, i))); | |
2299 } | |
2300 | |
2301 hash += hash_rtx_string (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0)); | |
2302 x = ASM_OPERANDS_INPUT (x, 0); | |
2303 mode = GET_MODE (x); | |
2304 goto repeat; | |
2305 } | |
2306 | |
2307 return hash; | |
2308 } | |
2309 break; | |
2310 | |
2311 default: | |
2312 break; | |
2313 } | |
2314 | |
2315 i = GET_RTX_LENGTH (code) - 1; | |
2316 hash += (unsigned) code + (unsigned) GET_MODE (x); | |
2317 fmt = GET_RTX_FORMAT (code); | |
2318 for (; i >= 0; i--) | |
2319 { | |
2320 switch (fmt[i]) | |
2321 { | |
2322 case 'e': | |
2323 /* If we are about to do the last recursive call | |
2324 needed at this level, change it into iteration. | |
2325 This function is called enough to be worth it. */ | |
2326 if (i == 0) | |
2327 { | |
2328 x = XEXP (x, i); | |
2329 goto repeat; | |
2330 } | |
2331 | |
2332 hash += hash_rtx_cb (XEXP (x, i), 0, do_not_record_p, | |
2333 hash_arg_in_memory_p, | |
2334 have_reg_qty, cb); | |
2335 break; | |
2336 | |
2337 case 'E': | |
2338 for (j = 0; j < XVECLEN (x, i); j++) | |
2339 hash += hash_rtx_cb (XVECEXP (x, i, j), 0, do_not_record_p, | |
2340 hash_arg_in_memory_p, | |
2341 have_reg_qty, cb); | |
2342 break; | |
2343 | |
2344 case 's': | |
2345 hash += hash_rtx_string (XSTR (x, i)); | |
2346 break; | |
2347 | |
2348 case 'i': | |
2349 hash += (unsigned int) XINT (x, i); | |
2350 break; | |
2351 | |
2352 case '0': case 't': | |
2353 /* Unused. */ | |
2354 break; | |
2355 | |
2356 default: | |
2357 gcc_unreachable (); | |
2358 } | |
2359 } | |
2360 | |
2361 return hash; | |
2362 } | |
2363 | |
2364 /* Hash an rtx. We are careful to make sure the value is never negative. | |
2365 Equivalent registers hash identically. | |
2366 MODE is used in hashing for CONST_INTs only; | |
2367 otherwise the mode of X is used. | |
2368 | |
2369 Store 1 in DO_NOT_RECORD_P if any subexpression is volatile. | |
2370 | |
2371 If HASH_ARG_IN_MEMORY_P is not NULL, store 1 in it if X contains | |
2372 a MEM rtx which does not have the RTX_UNCHANGING_P bit set. | |
2373 | |
2374 Note that cse_insn knows that the hash code of a MEM expression | |
2375 is just (int) MEM plus the hash code of the address. */ | |
2376 | |
2377 unsigned | |
2378 hash_rtx (const_rtx x, enum machine_mode mode, int *do_not_record_p, | |
2379 int *hash_arg_in_memory_p, bool have_reg_qty) | |
2380 { | |
2381 return hash_rtx_cb (x, mode, do_not_record_p, | |
2382 hash_arg_in_memory_p, have_reg_qty, NULL); | |
2383 } | |
2384 | |
2385 /* Hash an rtx X for cse via hash_rtx. | |
2386 Stores 1 in do_not_record if any subexpression is volatile. | |
2387 Stores 1 in hash_arg_in_memory if X contains a mem rtx which | |
2388 does not have the RTX_UNCHANGING_P bit set. */ | |
2389 | |
2390 static inline unsigned | |
2391 canon_hash (rtx x, enum machine_mode mode) | |
2392 { | |
2393 return hash_rtx (x, mode, &do_not_record, &hash_arg_in_memory, true); | |
2394 } | |
2395 | |
2396 /* Like canon_hash but with no side effects, i.e. do_not_record | |
2397 and hash_arg_in_memory are not changed. */ | |
2398 | |
2399 static inline unsigned | |
2400 safe_hash (rtx x, enum machine_mode mode) | |
2401 { | |
2402 int dummy_do_not_record; | |
2403 return hash_rtx (x, mode, &dummy_do_not_record, NULL, true); | |
2404 } | |
2405 | |
2406 /* Return 1 iff X and Y would canonicalize into the same thing, | |
2407 without actually constructing the canonicalization of either one. | |
2408 If VALIDATE is nonzero, | |
2409 we assume X is an expression being processed from the rtl | |
2410 and Y was found in the hash table. We check register refs | |
2411 in Y for being marked as valid. | |
2412 | |
2413 If FOR_GCSE is true, we compare X and Y for equivalence for GCSE. */ | |
2414 | |
2415 int | |
2416 exp_equiv_p (const_rtx x, const_rtx y, int validate, bool for_gcse) | |
2417 { | |
2418 int i, j; | |
2419 enum rtx_code code; | |
2420 const char *fmt; | |
2421 | |
2422 /* Note: it is incorrect to assume an expression is equivalent to itself | |
2423 if VALIDATE is nonzero. */ | |
2424 if (x == y && !validate) | |
2425 return 1; | |
2426 | |
2427 if (x == 0 || y == 0) | |
2428 return x == y; | |
2429 | |
2430 code = GET_CODE (x); | |
2431 if (code != GET_CODE (y)) | |
2432 return 0; | |
2433 | |
2434 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ | |
2435 if (GET_MODE (x) != GET_MODE (y)) | |
2436 return 0; | |
2437 | |
2438 switch (code) | |
2439 { | |
2440 case PC: | |
2441 case CC0: | |
2442 case CONST_INT: | |
2443 case CONST_DOUBLE: | |
2444 case CONST_FIXED: | |
2445 return x == y; | |
2446 | |
2447 case LABEL_REF: | |
2448 return XEXP (x, 0) == XEXP (y, 0); | |
2449 | |
2450 case SYMBOL_REF: | |
2451 return XSTR (x, 0) == XSTR (y, 0); | |
2452 | |
2453 case REG: | |
2454 if (for_gcse) | |
2455 return REGNO (x) == REGNO (y); | |
2456 else | |
2457 { | |
2458 unsigned int regno = REGNO (y); | |
2459 unsigned int i; | |
2460 unsigned int endregno = END_REGNO (y); | |
2461 | |
2462 /* If the quantities are not the same, the expressions are not | |
2463 equivalent. If there are and we are not to validate, they | |
2464 are equivalent. Otherwise, ensure all regs are up-to-date. */ | |
2465 | |
2466 if (REG_QTY (REGNO (x)) != REG_QTY (regno)) | |
2467 return 0; | |
2468 | |
2469 if (! validate) | |
2470 return 1; | |
2471 | |
2472 for (i = regno; i < endregno; i++) | |
2473 if (REG_IN_TABLE (i) != REG_TICK (i)) | |
2474 return 0; | |
2475 | |
2476 return 1; | |
2477 } | |
2478 | |
2479 case MEM: | |
2480 if (for_gcse) | |
2481 { | |
2482 /* A volatile mem should not be considered equivalent to any | |
2483 other. */ | |
2484 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y)) | |
2485 return 0; | |
2486 | |
2487 /* Can't merge two expressions in different alias sets, since we | |
2488 can decide that the expression is transparent in a block when | |
2489 it isn't, due to it being set with the different alias set. | |
2490 | |
2491 Also, can't merge two expressions with different MEM_ATTRS. | |
2492 They could e.g. be two different entities allocated into the | |
2493 same space on the stack (see e.g. PR25130). In that case, the | |
2494 MEM addresses can be the same, even though the two MEMs are | |
2495 absolutely not equivalent. | |
2496 | |
2497 But because really all MEM attributes should be the same for | |
2498 equivalent MEMs, we just use the invariant that MEMs that have | |
2499 the same attributes share the same mem_attrs data structure. */ | |
2500 if (MEM_ATTRS (x) != MEM_ATTRS (y)) | |
2501 return 0; | |
2502 } | |
2503 break; | |
2504 | |
2505 /* For commutative operations, check both orders. */ | |
2506 case PLUS: | |
2507 case MULT: | |
2508 case AND: | |
2509 case IOR: | |
2510 case XOR: | |
2511 case NE: | |
2512 case EQ: | |
2513 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), | |
2514 validate, for_gcse) | |
2515 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1), | |
2516 validate, for_gcse)) | |
2517 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1), | |
2518 validate, for_gcse) | |
2519 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0), | |
2520 validate, for_gcse))); | |
2521 | |
2522 case ASM_OPERANDS: | |
2523 /* We don't use the generic code below because we want to | |
2524 disregard filename and line numbers. */ | |
2525 | |
2526 /* A volatile asm isn't equivalent to any other. */ | |
2527 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y)) | |
2528 return 0; | |
2529 | |
2530 if (GET_MODE (x) != GET_MODE (y) | |
2531 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y)) | |
2532 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x), | |
2533 ASM_OPERANDS_OUTPUT_CONSTRAINT (y)) | |
2534 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y) | |
2535 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y)) | |
2536 return 0; | |
2537 | |
2538 if (ASM_OPERANDS_INPUT_LENGTH (x)) | |
2539 { | |
2540 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) | |
2541 if (! exp_equiv_p (ASM_OPERANDS_INPUT (x, i), | |
2542 ASM_OPERANDS_INPUT (y, i), | |
2543 validate, for_gcse) | |
2544 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i), | |
2545 ASM_OPERANDS_INPUT_CONSTRAINT (y, i))) | |
2546 return 0; | |
2547 } | |
2548 | |
2549 return 1; | |
2550 | |
2551 default: | |
2552 break; | |
2553 } | |
2554 | |
2555 /* Compare the elements. If any pair of corresponding elements | |
2556 fail to match, return 0 for the whole thing. */ | |
2557 | |
2558 fmt = GET_RTX_FORMAT (code); | |
2559 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
2560 { | |
2561 switch (fmt[i]) | |
2562 { | |
2563 case 'e': | |
2564 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), | |
2565 validate, for_gcse)) | |
2566 return 0; | |
2567 break; | |
2568 | |
2569 case 'E': | |
2570 if (XVECLEN (x, i) != XVECLEN (y, i)) | |
2571 return 0; | |
2572 for (j = 0; j < XVECLEN (x, i); j++) | |
2573 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j), | |
2574 validate, for_gcse)) | |
2575 return 0; | |
2576 break; | |
2577 | |
2578 case 's': | |
2579 if (strcmp (XSTR (x, i), XSTR (y, i))) | |
2580 return 0; | |
2581 break; | |
2582 | |
2583 case 'i': | |
2584 if (XINT (x, i) != XINT (y, i)) | |
2585 return 0; | |
2586 break; | |
2587 | |
2588 case 'w': | |
2589 if (XWINT (x, i) != XWINT (y, i)) | |
2590 return 0; | |
2591 break; | |
2592 | |
2593 case '0': | |
2594 case 't': | |
2595 break; | |
2596 | |
2597 default: | |
2598 gcc_unreachable (); | |
2599 } | |
2600 } | |
2601 | |
2602 return 1; | |
2603 } | |
2604 | |
2605 /* Return 1 if X has a value that can vary even between two | |
2606 executions of the program. 0 means X can be compared reliably | |
2607 against certain constants or near-constants. */ | |
2608 | |
2609 static bool | |
2610 cse_rtx_varies_p (const_rtx x, bool from_alias) | |
2611 { | |
2612 /* We need not check for X and the equivalence class being of the same | |
2613 mode because if X is equivalent to a constant in some mode, it | |
2614 doesn't vary in any mode. */ | |
2615 | |
2616 if (REG_P (x) | |
2617 && REGNO_QTY_VALID_P (REGNO (x))) | |
2618 { | |
2619 int x_q = REG_QTY (REGNO (x)); | |
2620 struct qty_table_elem *x_ent = &qty_table[x_q]; | |
2621 | |
2622 if (GET_MODE (x) == x_ent->mode | |
2623 && x_ent->const_rtx != NULL_RTX) | |
2624 return 0; | |
2625 } | |
2626 | |
2627 if (GET_CODE (x) == PLUS | |
2628 && GET_CODE (XEXP (x, 1)) == CONST_INT | |
2629 && REG_P (XEXP (x, 0)) | |
2630 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))) | |
2631 { | |
2632 int x0_q = REG_QTY (REGNO (XEXP (x, 0))); | |
2633 struct qty_table_elem *x0_ent = &qty_table[x0_q]; | |
2634 | |
2635 if ((GET_MODE (XEXP (x, 0)) == x0_ent->mode) | |
2636 && x0_ent->const_rtx != NULL_RTX) | |
2637 return 0; | |
2638 } | |
2639 | |
2640 /* This can happen as the result of virtual register instantiation, if | |
2641 the initial constant is too large to be a valid address. This gives | |
2642 us a three instruction sequence, load large offset into a register, | |
2643 load fp minus a constant into a register, then a MEM which is the | |
2644 sum of the two `constant' registers. */ | |
2645 if (GET_CODE (x) == PLUS | |
2646 && REG_P (XEXP (x, 0)) | |
2647 && REG_P (XEXP (x, 1)) | |
2648 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))) | |
2649 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 1)))) | |
2650 { | |
2651 int x0_q = REG_QTY (REGNO (XEXP (x, 0))); | |
2652 int x1_q = REG_QTY (REGNO (XEXP (x, 1))); | |
2653 struct qty_table_elem *x0_ent = &qty_table[x0_q]; | |
2654 struct qty_table_elem *x1_ent = &qty_table[x1_q]; | |
2655 | |
2656 if ((GET_MODE (XEXP (x, 0)) == x0_ent->mode) | |
2657 && x0_ent->const_rtx != NULL_RTX | |
2658 && (GET_MODE (XEXP (x, 1)) == x1_ent->mode) | |
2659 && x1_ent->const_rtx != NULL_RTX) | |
2660 return 0; | |
2661 } | |
2662 | |
2663 return rtx_varies_p (x, from_alias); | |
2664 } | |
2665 | |
2666 /* Subroutine of canon_reg. Pass *XLOC through canon_reg, and validate | |
2667 the result if necessary. INSN is as for canon_reg. */ | |
2668 | |
2669 static void | |
2670 validate_canon_reg (rtx *xloc, rtx insn) | |
2671 { | |
2672 if (*xloc) | |
2673 { | |
2674 rtx new_rtx = canon_reg (*xloc, insn); | |
2675 | |
2676 /* If replacing pseudo with hard reg or vice versa, ensure the | |
2677 insn remains valid. Likewise if the insn has MATCH_DUPs. */ | |
2678 gcc_assert (insn && new_rtx); | |
2679 validate_change (insn, xloc, new_rtx, 1); | |
2680 } | |
2681 } | |
2682 | |
2683 /* Canonicalize an expression: | |
2684 replace each register reference inside it | |
2685 with the "oldest" equivalent register. | |
2686 | |
2687 If INSN is nonzero validate_change is used to ensure that INSN remains valid | |
2688 after we make our substitution. The calls are made with IN_GROUP nonzero | |
2689 so apply_change_group must be called upon the outermost return from this | |
2690 function (unless INSN is zero). The result of apply_change_group can | |
2691 generally be discarded since the changes we are making are optional. */ | |
2692 | |
2693 static rtx | |
2694 canon_reg (rtx x, rtx insn) | |
2695 { | |
2696 int i; | |
2697 enum rtx_code code; | |
2698 const char *fmt; | |
2699 | |
2700 if (x == 0) | |
2701 return x; | |
2702 | |
2703 code = GET_CODE (x); | |
2704 switch (code) | |
2705 { | |
2706 case PC: | |
2707 case CC0: | |
2708 case CONST: | |
2709 case CONST_INT: | |
2710 case CONST_DOUBLE: | |
2711 case CONST_FIXED: | |
2712 case CONST_VECTOR: | |
2713 case SYMBOL_REF: | |
2714 case LABEL_REF: | |
2715 case ADDR_VEC: | |
2716 case ADDR_DIFF_VEC: | |
2717 return x; | |
2718 | |
2719 case REG: | |
2720 { | |
2721 int first; | |
2722 int q; | |
2723 struct qty_table_elem *ent; | |
2724 | |
2725 /* Never replace a hard reg, because hard regs can appear | |
2726 in more than one machine mode, and we must preserve the mode | |
2727 of each occurrence. Also, some hard regs appear in | |
2728 MEMs that are shared and mustn't be altered. Don't try to | |
2729 replace any reg that maps to a reg of class NO_REGS. */ | |
2730 if (REGNO (x) < FIRST_PSEUDO_REGISTER | |
2731 || ! REGNO_QTY_VALID_P (REGNO (x))) | |
2732 return x; | |
2733 | |
2734 q = REG_QTY (REGNO (x)); | |
2735 ent = &qty_table[q]; | |
2736 first = ent->first_reg; | |
2737 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first] | |
2738 : REGNO_REG_CLASS (first) == NO_REGS ? x | |
2739 : gen_rtx_REG (ent->mode, first)); | |
2740 } | |
2741 | |
2742 default: | |
2743 break; | |
2744 } | |
2745 | |
2746 fmt = GET_RTX_FORMAT (code); | |
2747 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
2748 { | |
2749 int j; | |
2750 | |
2751 if (fmt[i] == 'e') | |
2752 validate_canon_reg (&XEXP (x, i), insn); | |
2753 else if (fmt[i] == 'E') | |
2754 for (j = 0; j < XVECLEN (x, i); j++) | |
2755 validate_canon_reg (&XVECEXP (x, i, j), insn); | |
2756 } | |
2757 | |
2758 return x; | |
2759 } | |
2760 | |
2761 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison | |
2762 operation (EQ, NE, GT, etc.), follow it back through the hash table and | |
2763 what values are being compared. | |
2764 | |
2765 *PARG1 and *PARG2 are updated to contain the rtx representing the values | |
2766 actually being compared. For example, if *PARG1 was (cc0) and *PARG2 | |
2767 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were | |
2768 compared to produce cc0. | |
2769 | |
2770 The return value is the comparison operator and is either the code of | |
2771 A or the code corresponding to the inverse of the comparison. */ | |
2772 | |
2773 static enum rtx_code | |
2774 find_comparison_args (enum rtx_code code, rtx *parg1, rtx *parg2, | |
2775 enum machine_mode *pmode1, enum machine_mode *pmode2) | |
2776 { | |
2777 rtx arg1, arg2; | |
2778 | |
2779 arg1 = *parg1, arg2 = *parg2; | |
2780 | |
2781 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */ | |
2782 | |
2783 while (arg2 == CONST0_RTX (GET_MODE (arg1))) | |
2784 { | |
2785 /* Set nonzero when we find something of interest. */ | |
2786 rtx x = 0; | |
2787 int reverse_code = 0; | |
2788 struct table_elt *p = 0; | |
2789 | |
2790 /* If arg1 is a COMPARE, extract the comparison arguments from it. | |
2791 On machines with CC0, this is the only case that can occur, since | |
2792 fold_rtx will return the COMPARE or item being compared with zero | |
2793 when given CC0. */ | |
2794 | |
2795 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx) | |
2796 x = arg1; | |
2797 | |
2798 /* If ARG1 is a comparison operator and CODE is testing for | |
2799 STORE_FLAG_VALUE, get the inner arguments. */ | |
2800 | |
2801 else if (COMPARISON_P (arg1)) | |
2802 { | |
2803 #ifdef FLOAT_STORE_FLAG_VALUE | |
2804 REAL_VALUE_TYPE fsfv; | |
2805 #endif | |
2806 | |
2807 if (code == NE | |
2808 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT | |
2809 && code == LT && STORE_FLAG_VALUE == -1) | |
2810 #ifdef FLOAT_STORE_FLAG_VALUE | |
2811 || (SCALAR_FLOAT_MODE_P (GET_MODE (arg1)) | |
2812 && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)), | |
2813 REAL_VALUE_NEGATIVE (fsfv))) | |
2814 #endif | |
2815 ) | |
2816 x = arg1; | |
2817 else if (code == EQ | |
2818 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT | |
2819 && code == GE && STORE_FLAG_VALUE == -1) | |
2820 #ifdef FLOAT_STORE_FLAG_VALUE | |
2821 || (SCALAR_FLOAT_MODE_P (GET_MODE (arg1)) | |
2822 && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)), | |
2823 REAL_VALUE_NEGATIVE (fsfv))) | |
2824 #endif | |
2825 ) | |
2826 x = arg1, reverse_code = 1; | |
2827 } | |
2828 | |
2829 /* ??? We could also check for | |
2830 | |
2831 (ne (and (eq (...) (const_int 1))) (const_int 0)) | |
2832 | |
2833 and related forms, but let's wait until we see them occurring. */ | |
2834 | |
2835 if (x == 0) | |
2836 /* Look up ARG1 in the hash table and see if it has an equivalence | |
2837 that lets us see what is being compared. */ | |
2838 p = lookup (arg1, SAFE_HASH (arg1, GET_MODE (arg1)), GET_MODE (arg1)); | |
2839 if (p) | |
2840 { | |
2841 p = p->first_same_value; | |
2842 | |
2843 /* If what we compare is already known to be constant, that is as | |
2844 good as it gets. | |
2845 We need to break the loop in this case, because otherwise we | |
2846 can have an infinite loop when looking at a reg that is known | |
2847 to be a constant which is the same as a comparison of a reg | |
2848 against zero which appears later in the insn stream, which in | |
2849 turn is constant and the same as the comparison of the first reg | |
2850 against zero... */ | |
2851 if (p->is_const) | |
2852 break; | |
2853 } | |
2854 | |
2855 for (; p; p = p->next_same_value) | |
2856 { | |
2857 enum machine_mode inner_mode = GET_MODE (p->exp); | |
2858 #ifdef FLOAT_STORE_FLAG_VALUE | |
2859 REAL_VALUE_TYPE fsfv; | |
2860 #endif | |
2861 | |
2862 /* If the entry isn't valid, skip it. */ | |
2863 if (! exp_equiv_p (p->exp, p->exp, 1, false)) | |
2864 continue; | |
2865 | |
2866 if (GET_CODE (p->exp) == COMPARE | |
2867 /* Another possibility is that this machine has a compare insn | |
2868 that includes the comparison code. In that case, ARG1 would | |
2869 be equivalent to a comparison operation that would set ARG1 to | |
2870 either STORE_FLAG_VALUE or zero. If this is an NE operation, | |
2871 ORIG_CODE is the actual comparison being done; if it is an EQ, | |
2872 we must reverse ORIG_CODE. On machine with a negative value | |
2873 for STORE_FLAG_VALUE, also look at LT and GE operations. */ | |
2874 || ((code == NE | |
2875 || (code == LT | |
2876 && GET_MODE_CLASS (inner_mode) == MODE_INT | |
2877 && (GET_MODE_BITSIZE (inner_mode) | |
2878 <= HOST_BITS_PER_WIDE_INT) | |
2879 && (STORE_FLAG_VALUE | |
2880 & ((HOST_WIDE_INT) 1 | |
2881 << (GET_MODE_BITSIZE (inner_mode) - 1)))) | |
2882 #ifdef FLOAT_STORE_FLAG_VALUE | |
2883 || (code == LT | |
2884 && SCALAR_FLOAT_MODE_P (inner_mode) | |
2885 && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)), | |
2886 REAL_VALUE_NEGATIVE (fsfv))) | |
2887 #endif | |
2888 ) | |
2889 && COMPARISON_P (p->exp))) | |
2890 { | |
2891 x = p->exp; | |
2892 break; | |
2893 } | |
2894 else if ((code == EQ | |
2895 || (code == GE | |
2896 && GET_MODE_CLASS (inner_mode) == MODE_INT | |
2897 && (GET_MODE_BITSIZE (inner_mode) | |
2898 <= HOST_BITS_PER_WIDE_INT) | |
2899 && (STORE_FLAG_VALUE | |
2900 & ((HOST_WIDE_INT) 1 | |
2901 << (GET_MODE_BITSIZE (inner_mode) - 1)))) | |
2902 #ifdef FLOAT_STORE_FLAG_VALUE | |
2903 || (code == GE | |
2904 && SCALAR_FLOAT_MODE_P (inner_mode) | |
2905 && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)), | |
2906 REAL_VALUE_NEGATIVE (fsfv))) | |
2907 #endif | |
2908 ) | |
2909 && COMPARISON_P (p->exp)) | |
2910 { | |
2911 reverse_code = 1; | |
2912 x = p->exp; | |
2913 break; | |
2914 } | |
2915 | |
2916 /* If this non-trapping address, e.g. fp + constant, the | |
2917 equivalent is a better operand since it may let us predict | |
2918 the value of the comparison. */ | |
2919 else if (!rtx_addr_can_trap_p (p->exp)) | |
2920 { | |
2921 arg1 = p->exp; | |
2922 continue; | |
2923 } | |
2924 } | |
2925 | |
2926 /* If we didn't find a useful equivalence for ARG1, we are done. | |
2927 Otherwise, set up for the next iteration. */ | |
2928 if (x == 0) | |
2929 break; | |
2930 | |
2931 /* If we need to reverse the comparison, make sure that that is | |
2932 possible -- we can't necessarily infer the value of GE from LT | |
2933 with floating-point operands. */ | |
2934 if (reverse_code) | |
2935 { | |
2936 enum rtx_code reversed = reversed_comparison_code (x, NULL_RTX); | |
2937 if (reversed == UNKNOWN) | |
2938 break; | |
2939 else | |
2940 code = reversed; | |
2941 } | |
2942 else if (COMPARISON_P (x)) | |
2943 code = GET_CODE (x); | |
2944 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1); | |
2945 } | |
2946 | |
2947 /* Return our results. Return the modes from before fold_rtx | |
2948 because fold_rtx might produce const_int, and then it's too late. */ | |
2949 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2); | |
2950 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0); | |
2951 | |
2952 return code; | |
2953 } | |
2954 | |
2955 /* If X is a nontrivial arithmetic operation on an argument for which | |
2956 a constant value can be determined, return the result of operating | |
2957 on that value, as a constant. Otherwise, return X, possibly with | |
2958 one or more operands changed to a forward-propagated constant. | |
2959 | |
2960 If X is a register whose contents are known, we do NOT return | |
2961 those contents here; equiv_constant is called to perform that task. | |
2962 For SUBREGs and MEMs, we do that both here and in equiv_constant. | |
2963 | |
2964 INSN is the insn that we may be modifying. If it is 0, make a copy | |
2965 of X before modifying it. */ | |
2966 | |
2967 static rtx | |
2968 fold_rtx (rtx x, rtx insn) | |
2969 { | |
2970 enum rtx_code code; | |
2971 enum machine_mode mode; | |
2972 const char *fmt; | |
2973 int i; | |
2974 rtx new_rtx = 0; | |
2975 int changed = 0; | |
2976 | |
2977 /* Operands of X. */ | |
2978 rtx folded_arg0; | |
2979 rtx folded_arg1; | |
2980 | |
2981 /* Constant equivalents of first three operands of X; | |
2982 0 when no such equivalent is known. */ | |
2983 rtx const_arg0; | |
2984 rtx const_arg1; | |
2985 rtx const_arg2; | |
2986 | |
2987 /* The mode of the first operand of X. We need this for sign and zero | |
2988 extends. */ | |
2989 enum machine_mode mode_arg0; | |
2990 | |
2991 if (x == 0) | |
2992 return x; | |
2993 | |
2994 /* Try to perform some initial simplifications on X. */ | |
2995 code = GET_CODE (x); | |
2996 switch (code) | |
2997 { | |
2998 case MEM: | |
2999 case SUBREG: | |
3000 if ((new_rtx = equiv_constant (x)) != NULL_RTX) | |
3001 return new_rtx; | |
3002 return x; | |
3003 | |
3004 case CONST: | |
3005 case CONST_INT: | |
3006 case CONST_DOUBLE: | |
3007 case CONST_FIXED: | |
3008 case CONST_VECTOR: | |
3009 case SYMBOL_REF: | |
3010 case LABEL_REF: | |
3011 case REG: | |
3012 case PC: | |
3013 /* No use simplifying an EXPR_LIST | |
3014 since they are used only for lists of args | |
3015 in a function call's REG_EQUAL note. */ | |
3016 case EXPR_LIST: | |
3017 return x; | |
3018 | |
3019 #ifdef HAVE_cc0 | |
3020 case CC0: | |
3021 return prev_insn_cc0; | |
3022 #endif | |
3023 | |
3024 case ASM_OPERANDS: | |
3025 if (insn) | |
3026 { | |
3027 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) | |
3028 validate_change (insn, &ASM_OPERANDS_INPUT (x, i), | |
3029 fold_rtx (ASM_OPERANDS_INPUT (x, i), insn), 0); | |
3030 } | |
3031 return x; | |
3032 | |
3033 #ifdef NO_FUNCTION_CSE | |
3034 case CALL: | |
3035 if (CONSTANT_P (XEXP (XEXP (x, 0), 0))) | |
3036 return x; | |
3037 break; | |
3038 #endif | |
3039 | |
3040 /* Anything else goes through the loop below. */ | |
3041 default: | |
3042 break; | |
3043 } | |
3044 | |
3045 mode = GET_MODE (x); | |
3046 const_arg0 = 0; | |
3047 const_arg1 = 0; | |
3048 const_arg2 = 0; | |
3049 mode_arg0 = VOIDmode; | |
3050 | |
3051 /* Try folding our operands. | |
3052 Then see which ones have constant values known. */ | |
3053 | |
3054 fmt = GET_RTX_FORMAT (code); | |
3055 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3056 if (fmt[i] == 'e') | |
3057 { | |
3058 rtx folded_arg = XEXP (x, i), const_arg; | |
3059 enum machine_mode mode_arg = GET_MODE (folded_arg); | |
3060 | |
3061 switch (GET_CODE (folded_arg)) | |
3062 { | |
3063 case MEM: | |
3064 case REG: | |
3065 case SUBREG: | |
3066 const_arg = equiv_constant (folded_arg); | |
3067 break; | |
3068 | |
3069 case CONST: | |
3070 case CONST_INT: | |
3071 case SYMBOL_REF: | |
3072 case LABEL_REF: | |
3073 case CONST_DOUBLE: | |
3074 case CONST_FIXED: | |
3075 case CONST_VECTOR: | |
3076 const_arg = folded_arg; | |
3077 break; | |
3078 | |
3079 #ifdef HAVE_cc0 | |
3080 case CC0: | |
3081 folded_arg = prev_insn_cc0; | |
3082 mode_arg = prev_insn_cc0_mode; | |
3083 const_arg = equiv_constant (folded_arg); | |
3084 break; | |
3085 #endif | |
3086 | |
3087 default: | |
3088 folded_arg = fold_rtx (folded_arg, insn); | |
3089 const_arg = equiv_constant (folded_arg); | |
3090 break; | |
3091 } | |
3092 | |
3093 /* For the first three operands, see if the operand | |
3094 is constant or equivalent to a constant. */ | |
3095 switch (i) | |
3096 { | |
3097 case 0: | |
3098 folded_arg0 = folded_arg; | |
3099 const_arg0 = const_arg; | |
3100 mode_arg0 = mode_arg; | |
3101 break; | |
3102 case 1: | |
3103 folded_arg1 = folded_arg; | |
3104 const_arg1 = const_arg; | |
3105 break; | |
3106 case 2: | |
3107 const_arg2 = const_arg; | |
3108 break; | |
3109 } | |
3110 | |
3111 /* Pick the least expensive of the argument and an equivalent constant | |
3112 argument. */ | |
3113 if (const_arg != 0 | |
3114 && const_arg != folded_arg | |
3115 && COST_IN (const_arg, code) <= COST_IN (folded_arg, code) | |
3116 | |
3117 /* It's not safe to substitute the operand of a conversion | |
3118 operator with a constant, as the conversion's identity | |
3119 depends upon the mode of its operand. This optimization | |
3120 is handled by the call to simplify_unary_operation. */ | |
3121 && (GET_RTX_CLASS (code) != RTX_UNARY | |
3122 || GET_MODE (const_arg) == mode_arg0 | |
3123 || (code != ZERO_EXTEND | |
3124 && code != SIGN_EXTEND | |
3125 && code != TRUNCATE | |
3126 && code != FLOAT_TRUNCATE | |
3127 && code != FLOAT_EXTEND | |
3128 && code != FLOAT | |
3129 && code != FIX | |
3130 && code != UNSIGNED_FLOAT | |
3131 && code != UNSIGNED_FIX))) | |
3132 folded_arg = const_arg; | |
3133 | |
3134 if (folded_arg == XEXP (x, i)) | |
3135 continue; | |
3136 | |
3137 if (insn == NULL_RTX && !changed) | |
3138 x = copy_rtx (x); | |
3139 changed = 1; | |
3140 validate_unshare_change (insn, &XEXP (x, i), folded_arg, 1); | |
3141 } | |
3142 | |
3143 if (changed) | |
3144 { | |
3145 /* Canonicalize X if necessary, and keep const_argN and folded_argN | |
3146 consistent with the order in X. */ | |
3147 if (canonicalize_change_group (insn, x)) | |
3148 { | |
3149 rtx tem; | |
3150 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem; | |
3151 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem; | |
3152 } | |
3153 | |
3154 apply_change_group (); | |
3155 } | |
3156 | |
3157 /* If X is an arithmetic operation, see if we can simplify it. */ | |
3158 | |
3159 switch (GET_RTX_CLASS (code)) | |
3160 { | |
3161 case RTX_UNARY: | |
3162 { | |
3163 /* We can't simplify extension ops unless we know the | |
3164 original mode. */ | |
3165 if ((code == ZERO_EXTEND || code == SIGN_EXTEND) | |
3166 && mode_arg0 == VOIDmode) | |
3167 break; | |
3168 | |
3169 new_rtx = simplify_unary_operation (code, mode, | |
3170 const_arg0 ? const_arg0 : folded_arg0, | |
3171 mode_arg0); | |
3172 } | |
3173 break; | |
3174 | |
3175 case RTX_COMPARE: | |
3176 case RTX_COMM_COMPARE: | |
3177 /* See what items are actually being compared and set FOLDED_ARG[01] | |
3178 to those values and CODE to the actual comparison code. If any are | |
3179 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't | |
3180 do anything if both operands are already known to be constant. */ | |
3181 | |
3182 /* ??? Vector mode comparisons are not supported yet. */ | |
3183 if (VECTOR_MODE_P (mode)) | |
3184 break; | |
3185 | |
3186 if (const_arg0 == 0 || const_arg1 == 0) | |
3187 { | |
3188 struct table_elt *p0, *p1; | |
3189 rtx true_rtx, false_rtx; | |
3190 enum machine_mode mode_arg1; | |
3191 | |
3192 if (SCALAR_FLOAT_MODE_P (mode)) | |
3193 { | |
3194 #ifdef FLOAT_STORE_FLAG_VALUE | |
3195 true_rtx = (CONST_DOUBLE_FROM_REAL_VALUE | |
3196 (FLOAT_STORE_FLAG_VALUE (mode), mode)); | |
3197 #else | |
3198 true_rtx = NULL_RTX; | |
3199 #endif | |
3200 false_rtx = CONST0_RTX (mode); | |
3201 } | |
3202 else | |
3203 { | |
3204 true_rtx = const_true_rtx; | |
3205 false_rtx = const0_rtx; | |
3206 } | |
3207 | |
3208 code = find_comparison_args (code, &folded_arg0, &folded_arg1, | |
3209 &mode_arg0, &mode_arg1); | |
3210 | |
3211 /* If the mode is VOIDmode or a MODE_CC mode, we don't know | |
3212 what kinds of things are being compared, so we can't do | |
3213 anything with this comparison. */ | |
3214 | |
3215 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC) | |
3216 break; | |
3217 | |
3218 const_arg0 = equiv_constant (folded_arg0); | |
3219 const_arg1 = equiv_constant (folded_arg1); | |
3220 | |
3221 /* If we do not now have two constants being compared, see | |
3222 if we can nevertheless deduce some things about the | |
3223 comparison. */ | |
3224 if (const_arg0 == 0 || const_arg1 == 0) | |
3225 { | |
3226 if (const_arg1 != NULL) | |
3227 { | |
3228 rtx cheapest_simplification; | |
3229 int cheapest_cost; | |
3230 rtx simp_result; | |
3231 struct table_elt *p; | |
3232 | |
3233 /* See if we can find an equivalent of folded_arg0 | |
3234 that gets us a cheaper expression, possibly a | |
3235 constant through simplifications. */ | |
3236 p = lookup (folded_arg0, SAFE_HASH (folded_arg0, mode_arg0), | |
3237 mode_arg0); | |
3238 | |
3239 if (p != NULL) | |
3240 { | |
3241 cheapest_simplification = x; | |
3242 cheapest_cost = COST (x); | |
3243 | |
3244 for (p = p->first_same_value; p != NULL; p = p->next_same_value) | |
3245 { | |
3246 int cost; | |
3247 | |
3248 /* If the entry isn't valid, skip it. */ | |
3249 if (! exp_equiv_p (p->exp, p->exp, 1, false)) | |
3250 continue; | |
3251 | |
3252 /* Try to simplify using this equivalence. */ | |
3253 simp_result | |
3254 = simplify_relational_operation (code, mode, | |
3255 mode_arg0, | |
3256 p->exp, | |
3257 const_arg1); | |
3258 | |
3259 if (simp_result == NULL) | |
3260 continue; | |
3261 | |
3262 cost = COST (simp_result); | |
3263 if (cost < cheapest_cost) | |
3264 { | |
3265 cheapest_cost = cost; | |
3266 cheapest_simplification = simp_result; | |
3267 } | |
3268 } | |
3269 | |
3270 /* If we have a cheaper expression now, use that | |
3271 and try folding it further, from the top. */ | |
3272 if (cheapest_simplification != x) | |
3273 return fold_rtx (copy_rtx (cheapest_simplification), | |
3274 insn); | |
3275 } | |
3276 } | |
3277 | |
3278 /* See if the two operands are the same. */ | |
3279 | |
3280 if ((REG_P (folded_arg0) | |
3281 && REG_P (folded_arg1) | |
3282 && (REG_QTY (REGNO (folded_arg0)) | |
3283 == REG_QTY (REGNO (folded_arg1)))) | |
3284 || ((p0 = lookup (folded_arg0, | |
3285 SAFE_HASH (folded_arg0, mode_arg0), | |
3286 mode_arg0)) | |
3287 && (p1 = lookup (folded_arg1, | |
3288 SAFE_HASH (folded_arg1, mode_arg0), | |
3289 mode_arg0)) | |
3290 && p0->first_same_value == p1->first_same_value)) | |
3291 folded_arg1 = folded_arg0; | |
3292 | |
3293 /* If FOLDED_ARG0 is a register, see if the comparison we are | |
3294 doing now is either the same as we did before or the reverse | |
3295 (we only check the reverse if not floating-point). */ | |
3296 else if (REG_P (folded_arg0)) | |
3297 { | |
3298 int qty = REG_QTY (REGNO (folded_arg0)); | |
3299 | |
3300 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))) | |
3301 { | |
3302 struct qty_table_elem *ent = &qty_table[qty]; | |
3303 | |
3304 if ((comparison_dominates_p (ent->comparison_code, code) | |
3305 || (! FLOAT_MODE_P (mode_arg0) | |
3306 && comparison_dominates_p (ent->comparison_code, | |
3307 reverse_condition (code)))) | |
3308 && (rtx_equal_p (ent->comparison_const, folded_arg1) | |
3309 || (const_arg1 | |
3310 && rtx_equal_p (ent->comparison_const, | |
3311 const_arg1)) | |
3312 || (REG_P (folded_arg1) | |
3313 && (REG_QTY (REGNO (folded_arg1)) == ent->comparison_qty)))) | |
3314 { | |
3315 if (comparison_dominates_p (ent->comparison_code, code)) | |
3316 { | |
3317 if (true_rtx) | |
3318 return true_rtx; | |
3319 else | |
3320 break; | |
3321 } | |
3322 else | |
3323 return false_rtx; | |
3324 } | |
3325 } | |
3326 } | |
3327 } | |
3328 } | |
3329 | |
3330 /* If we are comparing against zero, see if the first operand is | |
3331 equivalent to an IOR with a constant. If so, we may be able to | |
3332 determine the result of this comparison. */ | |
3333 if (const_arg1 == const0_rtx && !const_arg0) | |
3334 { | |
3335 rtx y = lookup_as_function (folded_arg0, IOR); | |
3336 rtx inner_const; | |
3337 | |
3338 if (y != 0 | |
3339 && (inner_const = equiv_constant (XEXP (y, 1))) != 0 | |
3340 && GET_CODE (inner_const) == CONST_INT | |
3341 && INTVAL (inner_const) != 0) | |
3342 folded_arg0 = gen_rtx_IOR (mode_arg0, XEXP (y, 0), inner_const); | |
3343 } | |
3344 | |
3345 { | |
3346 rtx op0 = const_arg0 ? const_arg0 : folded_arg0; | |
3347 rtx op1 = const_arg1 ? const_arg1 : folded_arg1; | |
3348 new_rtx = simplify_relational_operation (code, mode, mode_arg0, op0, op1); | |
3349 } | |
3350 break; | |
3351 | |
3352 case RTX_BIN_ARITH: | |
3353 case RTX_COMM_ARITH: | |
3354 switch (code) | |
3355 { | |
3356 case PLUS: | |
3357 /* If the second operand is a LABEL_REF, see if the first is a MINUS | |
3358 with that LABEL_REF as its second operand. If so, the result is | |
3359 the first operand of that MINUS. This handles switches with an | |
3360 ADDR_DIFF_VEC table. */ | |
3361 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF) | |
3362 { | |
3363 rtx y | |
3364 = GET_CODE (folded_arg0) == MINUS ? folded_arg0 | |
3365 : lookup_as_function (folded_arg0, MINUS); | |
3366 | |
3367 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF | |
3368 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0)) | |
3369 return XEXP (y, 0); | |
3370 | |
3371 /* Now try for a CONST of a MINUS like the above. */ | |
3372 if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0 | |
3373 : lookup_as_function (folded_arg0, CONST))) != 0 | |
3374 && GET_CODE (XEXP (y, 0)) == MINUS | |
3375 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF | |
3376 && XEXP (XEXP (XEXP (y, 0), 1), 0) == XEXP (const_arg1, 0)) | |
3377 return XEXP (XEXP (y, 0), 0); | |
3378 } | |
3379 | |
3380 /* Likewise if the operands are in the other order. */ | |
3381 if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF) | |
3382 { | |
3383 rtx y | |
3384 = GET_CODE (folded_arg1) == MINUS ? folded_arg1 | |
3385 : lookup_as_function (folded_arg1, MINUS); | |
3386 | |
3387 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF | |
3388 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0)) | |
3389 return XEXP (y, 0); | |
3390 | |
3391 /* Now try for a CONST of a MINUS like the above. */ | |
3392 if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1 | |
3393 : lookup_as_function (folded_arg1, CONST))) != 0 | |
3394 && GET_CODE (XEXP (y, 0)) == MINUS | |
3395 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF | |
3396 && XEXP (XEXP (XEXP (y, 0), 1), 0) == XEXP (const_arg0, 0)) | |
3397 return XEXP (XEXP (y, 0), 0); | |
3398 } | |
3399 | |
3400 /* If second operand is a register equivalent to a negative | |
3401 CONST_INT, see if we can find a register equivalent to the | |
3402 positive constant. Make a MINUS if so. Don't do this for | |
3403 a non-negative constant since we might then alternate between | |
3404 choosing positive and negative constants. Having the positive | |
3405 constant previously-used is the more common case. Be sure | |
3406 the resulting constant is non-negative; if const_arg1 were | |
3407 the smallest negative number this would overflow: depending | |
3408 on the mode, this would either just be the same value (and | |
3409 hence not save anything) or be incorrect. */ | |
3410 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT | |
3411 && INTVAL (const_arg1) < 0 | |
3412 /* This used to test | |
3413 | |
3414 -INTVAL (const_arg1) >= 0 | |
3415 | |
3416 But The Sun V5.0 compilers mis-compiled that test. So | |
3417 instead we test for the problematic value in a more direct | |
3418 manner and hope the Sun compilers get it correct. */ | |
3419 && INTVAL (const_arg1) != | |
3420 ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)) | |
3421 && REG_P (folded_arg1)) | |
3422 { | |
3423 rtx new_const = GEN_INT (-INTVAL (const_arg1)); | |
3424 struct table_elt *p | |
3425 = lookup (new_const, SAFE_HASH (new_const, mode), mode); | |
3426 | |
3427 if (p) | |
3428 for (p = p->first_same_value; p; p = p->next_same_value) | |
3429 if (REG_P (p->exp)) | |
3430 return simplify_gen_binary (MINUS, mode, folded_arg0, | |
3431 canon_reg (p->exp, NULL_RTX)); | |
3432 } | |
3433 goto from_plus; | |
3434 | |
3435 case MINUS: | |
3436 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2). | |
3437 If so, produce (PLUS Z C2-C). */ | |
3438 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT) | |
3439 { | |
3440 rtx y = lookup_as_function (XEXP (x, 0), PLUS); | |
3441 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT) | |
3442 return fold_rtx (plus_constant (copy_rtx (y), | |
3443 -INTVAL (const_arg1)), | |
3444 NULL_RTX); | |
3445 } | |
3446 | |
3447 /* Fall through. */ | |
3448 | |
3449 from_plus: | |
3450 case SMIN: case SMAX: case UMIN: case UMAX: | |
3451 case IOR: case AND: case XOR: | |
3452 case MULT: | |
3453 case ASHIFT: case LSHIFTRT: case ASHIFTRT: | |
3454 /* If we have (<op> <reg> <const_int>) for an associative OP and REG | |
3455 is known to be of similar form, we may be able to replace the | |
3456 operation with a combined operation. This may eliminate the | |
3457 intermediate operation if every use is simplified in this way. | |
3458 Note that the similar optimization done by combine.c only works | |
3459 if the intermediate operation's result has only one reference. */ | |
3460 | |
3461 if (REG_P (folded_arg0) | |
3462 && const_arg1 && GET_CODE (const_arg1) == CONST_INT) | |
3463 { | |
3464 int is_shift | |
3465 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT); | |
3466 rtx y, inner_const, new_const; | |
3467 rtx canon_const_arg1 = const_arg1; | |
3468 enum rtx_code associate_code; | |
3469 | |
3470 if (is_shift | |
3471 && (INTVAL (const_arg1) >= GET_MODE_BITSIZE (mode) | |
3472 || INTVAL (const_arg1) < 0)) | |
3473 { | |
3474 if (SHIFT_COUNT_TRUNCATED) | |
3475 canon_const_arg1 = GEN_INT (INTVAL (const_arg1) | |
3476 & (GET_MODE_BITSIZE (mode) | |
3477 - 1)); | |
3478 else | |
3479 break; | |
3480 } | |
3481 | |
3482 y = lookup_as_function (folded_arg0, code); | |
3483 if (y == 0) | |
3484 break; | |
3485 | |
3486 /* If we have compiled a statement like | |
3487 "if (x == (x & mask1))", and now are looking at | |
3488 "x & mask2", we will have a case where the first operand | |
3489 of Y is the same as our first operand. Unless we detect | |
3490 this case, an infinite loop will result. */ | |
3491 if (XEXP (y, 0) == folded_arg0) | |
3492 break; | |
3493 | |
3494 inner_const = equiv_constant (fold_rtx (XEXP (y, 1), 0)); | |
3495 if (!inner_const || GET_CODE (inner_const) != CONST_INT) | |
3496 break; | |
3497 | |
3498 /* Don't associate these operations if they are a PLUS with the | |
3499 same constant and it is a power of two. These might be doable | |
3500 with a pre- or post-increment. Similarly for two subtracts of | |
3501 identical powers of two with post decrement. */ | |
3502 | |
3503 if (code == PLUS && const_arg1 == inner_const | |
3504 && ((HAVE_PRE_INCREMENT | |
3505 && exact_log2 (INTVAL (const_arg1)) >= 0) | |
3506 || (HAVE_POST_INCREMENT | |
3507 && exact_log2 (INTVAL (const_arg1)) >= 0) | |
3508 || (HAVE_PRE_DECREMENT | |
3509 && exact_log2 (- INTVAL (const_arg1)) >= 0) | |
3510 || (HAVE_POST_DECREMENT | |
3511 && exact_log2 (- INTVAL (const_arg1)) >= 0))) | |
3512 break; | |
3513 | |
3514 /* ??? Vector mode shifts by scalar | |
3515 shift operand are not supported yet. */ | |
3516 if (is_shift && VECTOR_MODE_P (mode)) | |
3517 break; | |
3518 | |
3519 if (is_shift | |
3520 && (INTVAL (inner_const) >= GET_MODE_BITSIZE (mode) | |
3521 || INTVAL (inner_const) < 0)) | |
3522 { | |
3523 if (SHIFT_COUNT_TRUNCATED) | |
3524 inner_const = GEN_INT (INTVAL (inner_const) | |
3525 & (GET_MODE_BITSIZE (mode) - 1)); | |
3526 else | |
3527 break; | |
3528 } | |
3529 | |
3530 /* Compute the code used to compose the constants. For example, | |
3531 A-C1-C2 is A-(C1 + C2), so if CODE == MINUS, we want PLUS. */ | |
3532 | |
3533 associate_code = (is_shift || code == MINUS ? PLUS : code); | |
3534 | |
3535 new_const = simplify_binary_operation (associate_code, mode, | |
3536 canon_const_arg1, | |
3537 inner_const); | |
3538 | |
3539 if (new_const == 0) | |
3540 break; | |
3541 | |
3542 /* If we are associating shift operations, don't let this | |
3543 produce a shift of the size of the object or larger. | |
3544 This could occur when we follow a sign-extend by a right | |
3545 shift on a machine that does a sign-extend as a pair | |
3546 of shifts. */ | |
3547 | |
3548 if (is_shift | |
3549 && GET_CODE (new_const) == CONST_INT | |
3550 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode)) | |
3551 { | |
3552 /* As an exception, we can turn an ASHIFTRT of this | |
3553 form into a shift of the number of bits - 1. */ | |
3554 if (code == ASHIFTRT) | |
3555 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1); | |
3556 else if (!side_effects_p (XEXP (y, 0))) | |
3557 return CONST0_RTX (mode); | |
3558 else | |
3559 break; | |
3560 } | |
3561 | |
3562 y = copy_rtx (XEXP (y, 0)); | |
3563 | |
3564 /* If Y contains our first operand (the most common way this | |
3565 can happen is if Y is a MEM), we would do into an infinite | |
3566 loop if we tried to fold it. So don't in that case. */ | |
3567 | |
3568 if (! reg_mentioned_p (folded_arg0, y)) | |
3569 y = fold_rtx (y, insn); | |
3570 | |
3571 return simplify_gen_binary (code, mode, y, new_const); | |
3572 } | |
3573 break; | |
3574 | |
3575 case DIV: case UDIV: | |
3576 /* ??? The associative optimization performed immediately above is | |
3577 also possible for DIV and UDIV using associate_code of MULT. | |
3578 However, we would need extra code to verify that the | |
3579 multiplication does not overflow, that is, there is no overflow | |
3580 in the calculation of new_const. */ | |
3581 break; | |
3582 | |
3583 default: | |
3584 break; | |
3585 } | |
3586 | |
3587 new_rtx = simplify_binary_operation (code, mode, | |
3588 const_arg0 ? const_arg0 : folded_arg0, | |
3589 const_arg1 ? const_arg1 : folded_arg1); | |
3590 break; | |
3591 | |
3592 case RTX_OBJ: | |
3593 /* (lo_sum (high X) X) is simply X. */ | |
3594 if (code == LO_SUM && const_arg0 != 0 | |
3595 && GET_CODE (const_arg0) == HIGH | |
3596 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1)) | |
3597 return const_arg1; | |
3598 break; | |
3599 | |
3600 case RTX_TERNARY: | |
3601 case RTX_BITFIELD_OPS: | |
3602 new_rtx = simplify_ternary_operation (code, mode, mode_arg0, | |
3603 const_arg0 ? const_arg0 : folded_arg0, | |
3604 const_arg1 ? const_arg1 : folded_arg1, | |
3605 const_arg2 ? const_arg2 : XEXP (x, 2)); | |
3606 break; | |
3607 | |
3608 default: | |
3609 break; | |
3610 } | |
3611 | |
3612 return new_rtx ? new_rtx : x; | |
3613 } | |
3614 | |
3615 /* Return a constant value currently equivalent to X. | |
3616 Return 0 if we don't know one. */ | |
3617 | |
3618 static rtx | |
3619 equiv_constant (rtx x) | |
3620 { | |
3621 if (REG_P (x) | |
3622 && REGNO_QTY_VALID_P (REGNO (x))) | |
3623 { | |
3624 int x_q = REG_QTY (REGNO (x)); | |
3625 struct qty_table_elem *x_ent = &qty_table[x_q]; | |
3626 | |
3627 if (x_ent->const_rtx) | |
3628 x = gen_lowpart (GET_MODE (x), x_ent->const_rtx); | |
3629 } | |
3630 | |
3631 if (x == 0 || CONSTANT_P (x)) | |
3632 return x; | |
3633 | |
3634 if (GET_CODE (x) == SUBREG) | |
3635 { | |
3636 enum machine_mode mode = GET_MODE (x); | |
3637 enum machine_mode imode = GET_MODE (SUBREG_REG (x)); | |
3638 rtx new_rtx; | |
3639 | |
3640 /* See if we previously assigned a constant value to this SUBREG. */ | |
3641 if ((new_rtx = lookup_as_function (x, CONST_INT)) != 0 | |
3642 || (new_rtx = lookup_as_function (x, CONST_DOUBLE)) != 0 | |
3643 || (new_rtx = lookup_as_function (x, CONST_FIXED)) != 0) | |
3644 return new_rtx; | |
3645 | |
3646 /* If we didn't and if doing so makes sense, see if we previously | |
3647 assigned a constant value to the enclosing word mode SUBREG. */ | |
3648 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode) | |
3649 && GET_MODE_SIZE (word_mode) < GET_MODE_SIZE (imode)) | |
3650 { | |
3651 int byte = SUBREG_BYTE (x) - subreg_lowpart_offset (mode, word_mode); | |
3652 if (byte >= 0 && (byte % UNITS_PER_WORD) == 0) | |
3653 { | |
3654 rtx y = gen_rtx_SUBREG (word_mode, SUBREG_REG (x), byte); | |
3655 new_rtx = lookup_as_function (y, CONST_INT); | |
3656 if (new_rtx) | |
3657 return gen_lowpart (mode, new_rtx); | |
3658 } | |
3659 } | |
3660 | |
3661 /* Otherwise see if we already have a constant for the inner REG. */ | |
3662 if (REG_P (SUBREG_REG (x)) | |
3663 && (new_rtx = equiv_constant (SUBREG_REG (x))) != 0) | |
3664 return simplify_subreg (mode, new_rtx, imode, SUBREG_BYTE (x)); | |
3665 | |
3666 return 0; | |
3667 } | |
3668 | |
3669 /* If X is a MEM, see if it is a constant-pool reference, or look it up in | |
3670 the hash table in case its value was seen before. */ | |
3671 | |
3672 if (MEM_P (x)) | |
3673 { | |
3674 struct table_elt *elt; | |
3675 | |
3676 x = avoid_constant_pool_reference (x); | |
3677 if (CONSTANT_P (x)) | |
3678 return x; | |
3679 | |
3680 elt = lookup (x, SAFE_HASH (x, GET_MODE (x)), GET_MODE (x)); | |
3681 if (elt == 0) | |
3682 return 0; | |
3683 | |
3684 for (elt = elt->first_same_value; elt; elt = elt->next_same_value) | |
3685 if (elt->is_const && CONSTANT_P (elt->exp)) | |
3686 return elt->exp; | |
3687 } | |
3688 | |
3689 return 0; | |
3690 } | |
3691 | |
3692 /* Given INSN, a jump insn, TAKEN indicates if we are following the | |
3693 "taken" branch. | |
3694 | |
3695 In certain cases, this can cause us to add an equivalence. For example, | |
3696 if we are following the taken case of | |
3697 if (i == 2) | |
3698 we can add the fact that `i' and '2' are now equivalent. | |
3699 | |
3700 In any case, we can record that this comparison was passed. If the same | |
3701 comparison is seen later, we will know its value. */ | |
3702 | |
3703 static void | |
3704 record_jump_equiv (rtx insn, bool taken) | |
3705 { | |
3706 int cond_known_true; | |
3707 rtx op0, op1; | |
3708 rtx set; | |
3709 enum machine_mode mode, mode0, mode1; | |
3710 int reversed_nonequality = 0; | |
3711 enum rtx_code code; | |
3712 | |
3713 /* Ensure this is the right kind of insn. */ | |
3714 gcc_assert (any_condjump_p (insn)); | |
3715 | |
3716 set = pc_set (insn); | |
3717 | |
3718 /* See if this jump condition is known true or false. */ | |
3719 if (taken) | |
3720 cond_known_true = (XEXP (SET_SRC (set), 2) == pc_rtx); | |
3721 else | |
3722 cond_known_true = (XEXP (SET_SRC (set), 1) == pc_rtx); | |
3723 | |
3724 /* Get the type of comparison being done and the operands being compared. | |
3725 If we had to reverse a non-equality condition, record that fact so we | |
3726 know that it isn't valid for floating-point. */ | |
3727 code = GET_CODE (XEXP (SET_SRC (set), 0)); | |
3728 op0 = fold_rtx (XEXP (XEXP (SET_SRC (set), 0), 0), insn); | |
3729 op1 = fold_rtx (XEXP (XEXP (SET_SRC (set), 0), 1), insn); | |
3730 | |
3731 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1); | |
3732 if (! cond_known_true) | |
3733 { | |
3734 code = reversed_comparison_code_parts (code, op0, op1, insn); | |
3735 | |
3736 /* Don't remember if we can't find the inverse. */ | |
3737 if (code == UNKNOWN) | |
3738 return; | |
3739 } | |
3740 | |
3741 /* The mode is the mode of the non-constant. */ | |
3742 mode = mode0; | |
3743 if (mode1 != VOIDmode) | |
3744 mode = mode1; | |
3745 | |
3746 record_jump_cond (code, mode, op0, op1, reversed_nonequality); | |
3747 } | |
3748 | |
3749 /* Yet another form of subreg creation. In this case, we want something in | |
3750 MODE, and we should assume OP has MODE iff it is naturally modeless. */ | |
3751 | |
3752 static rtx | |
3753 record_jump_cond_subreg (enum machine_mode mode, rtx op) | |
3754 { | |
3755 enum machine_mode op_mode = GET_MODE (op); | |
3756 if (op_mode == mode || op_mode == VOIDmode) | |
3757 return op; | |
3758 return lowpart_subreg (mode, op, op_mode); | |
3759 } | |
3760 | |
3761 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true. | |
3762 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped. | |
3763 Make any useful entries we can with that information. Called from | |
3764 above function and called recursively. */ | |
3765 | |
3766 static void | |
3767 record_jump_cond (enum rtx_code code, enum machine_mode mode, rtx op0, | |
3768 rtx op1, int reversed_nonequality) | |
3769 { | |
3770 unsigned op0_hash, op1_hash; | |
3771 int op0_in_memory, op1_in_memory; | |
3772 struct table_elt *op0_elt, *op1_elt; | |
3773 | |
3774 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG, | |
3775 we know that they are also equal in the smaller mode (this is also | |
3776 true for all smaller modes whether or not there is a SUBREG, but | |
3777 is not worth testing for with no SUBREG). */ | |
3778 | |
3779 /* Note that GET_MODE (op0) may not equal MODE. */ | |
3780 if (code == EQ && GET_CODE (op0) == SUBREG | |
3781 && (GET_MODE_SIZE (GET_MODE (op0)) | |
3782 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))) | |
3783 { | |
3784 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0)); | |
3785 rtx tem = record_jump_cond_subreg (inner_mode, op1); | |
3786 if (tem) | |
3787 record_jump_cond (code, mode, SUBREG_REG (op0), tem, | |
3788 reversed_nonequality); | |
3789 } | |
3790 | |
3791 if (code == EQ && GET_CODE (op1) == SUBREG | |
3792 && (GET_MODE_SIZE (GET_MODE (op1)) | |
3793 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1))))) | |
3794 { | |
3795 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1)); | |
3796 rtx tem = record_jump_cond_subreg (inner_mode, op0); | |
3797 if (tem) | |
3798 record_jump_cond (code, mode, SUBREG_REG (op1), tem, | |
3799 reversed_nonequality); | |
3800 } | |
3801 | |
3802 /* Similarly, if this is an NE comparison, and either is a SUBREG | |
3803 making a smaller mode, we know the whole thing is also NE. */ | |
3804 | |
3805 /* Note that GET_MODE (op0) may not equal MODE; | |
3806 if we test MODE instead, we can get an infinite recursion | |
3807 alternating between two modes each wider than MODE. */ | |
3808 | |
3809 if (code == NE && GET_CODE (op0) == SUBREG | |
3810 && subreg_lowpart_p (op0) | |
3811 && (GET_MODE_SIZE (GET_MODE (op0)) | |
3812 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))) | |
3813 { | |
3814 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0)); | |
3815 rtx tem = record_jump_cond_subreg (inner_mode, op1); | |
3816 if (tem) | |
3817 record_jump_cond (code, mode, SUBREG_REG (op0), tem, | |
3818 reversed_nonequality); | |
3819 } | |
3820 | |
3821 if (code == NE && GET_CODE (op1) == SUBREG | |
3822 && subreg_lowpart_p (op1) | |
3823 && (GET_MODE_SIZE (GET_MODE (op1)) | |
3824 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1))))) | |
3825 { | |
3826 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1)); | |
3827 rtx tem = record_jump_cond_subreg (inner_mode, op0); | |
3828 if (tem) | |
3829 record_jump_cond (code, mode, SUBREG_REG (op1), tem, | |
3830 reversed_nonequality); | |
3831 } | |
3832 | |
3833 /* Hash both operands. */ | |
3834 | |
3835 do_not_record = 0; | |
3836 hash_arg_in_memory = 0; | |
3837 op0_hash = HASH (op0, mode); | |
3838 op0_in_memory = hash_arg_in_memory; | |
3839 | |
3840 if (do_not_record) | |
3841 return; | |
3842 | |
3843 do_not_record = 0; | |
3844 hash_arg_in_memory = 0; | |
3845 op1_hash = HASH (op1, mode); | |
3846 op1_in_memory = hash_arg_in_memory; | |
3847 | |
3848 if (do_not_record) | |
3849 return; | |
3850 | |
3851 /* Look up both operands. */ | |
3852 op0_elt = lookup (op0, op0_hash, mode); | |
3853 op1_elt = lookup (op1, op1_hash, mode); | |
3854 | |
3855 /* If both operands are already equivalent or if they are not in the | |
3856 table but are identical, do nothing. */ | |
3857 if ((op0_elt != 0 && op1_elt != 0 | |
3858 && op0_elt->first_same_value == op1_elt->first_same_value) | |
3859 || op0 == op1 || rtx_equal_p (op0, op1)) | |
3860 return; | |
3861 | |
3862 /* If we aren't setting two things equal all we can do is save this | |
3863 comparison. Similarly if this is floating-point. In the latter | |
3864 case, OP1 might be zero and both -0.0 and 0.0 are equal to it. | |
3865 If we record the equality, we might inadvertently delete code | |
3866 whose intent was to change -0 to +0. */ | |
3867 | |
3868 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0))) | |
3869 { | |
3870 struct qty_table_elem *ent; | |
3871 int qty; | |
3872 | |
3873 /* If we reversed a floating-point comparison, if OP0 is not a | |
3874 register, or if OP1 is neither a register or constant, we can't | |
3875 do anything. */ | |
3876 | |
3877 if (!REG_P (op1)) | |
3878 op1 = equiv_constant (op1); | |
3879 | |
3880 if ((reversed_nonequality && FLOAT_MODE_P (mode)) | |
3881 || !REG_P (op0) || op1 == 0) | |
3882 return; | |
3883 | |
3884 /* Put OP0 in the hash table if it isn't already. This gives it a | |
3885 new quantity number. */ | |
3886 if (op0_elt == 0) | |
3887 { | |
3888 if (insert_regs (op0, NULL, 0)) | |
3889 { | |
3890 rehash_using_reg (op0); | |
3891 op0_hash = HASH (op0, mode); | |
3892 | |
3893 /* If OP0 is contained in OP1, this changes its hash code | |
3894 as well. Faster to rehash than to check, except | |
3895 for the simple case of a constant. */ | |
3896 if (! CONSTANT_P (op1)) | |
3897 op1_hash = HASH (op1,mode); | |
3898 } | |
3899 | |
3900 op0_elt = insert (op0, NULL, op0_hash, mode); | |
3901 op0_elt->in_memory = op0_in_memory; | |
3902 } | |
3903 | |
3904 qty = REG_QTY (REGNO (op0)); | |
3905 ent = &qty_table[qty]; | |
3906 | |
3907 ent->comparison_code = code; | |
3908 if (REG_P (op1)) | |
3909 { | |
3910 /* Look it up again--in case op0 and op1 are the same. */ | |
3911 op1_elt = lookup (op1, op1_hash, mode); | |
3912 | |
3913 /* Put OP1 in the hash table so it gets a new quantity number. */ | |
3914 if (op1_elt == 0) | |
3915 { | |
3916 if (insert_regs (op1, NULL, 0)) | |
3917 { | |
3918 rehash_using_reg (op1); | |
3919 op1_hash = HASH (op1, mode); | |
3920 } | |
3921 | |
3922 op1_elt = insert (op1, NULL, op1_hash, mode); | |
3923 op1_elt->in_memory = op1_in_memory; | |
3924 } | |
3925 | |
3926 ent->comparison_const = NULL_RTX; | |
3927 ent->comparison_qty = REG_QTY (REGNO (op1)); | |
3928 } | |
3929 else | |
3930 { | |
3931 ent->comparison_const = op1; | |
3932 ent->comparison_qty = -1; | |
3933 } | |
3934 | |
3935 return; | |
3936 } | |
3937 | |
3938 /* If either side is still missing an equivalence, make it now, | |
3939 then merge the equivalences. */ | |
3940 | |
3941 if (op0_elt == 0) | |
3942 { | |
3943 if (insert_regs (op0, NULL, 0)) | |
3944 { | |
3945 rehash_using_reg (op0); | |
3946 op0_hash = HASH (op0, mode); | |
3947 } | |
3948 | |
3949 op0_elt = insert (op0, NULL, op0_hash, mode); | |
3950 op0_elt->in_memory = op0_in_memory; | |
3951 } | |
3952 | |
3953 if (op1_elt == 0) | |
3954 { | |
3955 if (insert_regs (op1, NULL, 0)) | |
3956 { | |
3957 rehash_using_reg (op1); | |
3958 op1_hash = HASH (op1, mode); | |
3959 } | |
3960 | |
3961 op1_elt = insert (op1, NULL, op1_hash, mode); | |
3962 op1_elt->in_memory = op1_in_memory; | |
3963 } | |
3964 | |
3965 merge_equiv_classes (op0_elt, op1_elt); | |
3966 } | |
3967 | |
3968 /* CSE processing for one instruction. | |
3969 First simplify sources and addresses of all assignments | |
3970 in the instruction, using previously-computed equivalents values. | |
3971 Then install the new sources and destinations in the table | |
3972 of available values. */ | |
3973 | |
3974 /* Data on one SET contained in the instruction. */ | |
3975 | |
3976 struct set | |
3977 { | |
3978 /* The SET rtx itself. */ | |
3979 rtx rtl; | |
3980 /* The SET_SRC of the rtx (the original value, if it is changing). */ | |
3981 rtx src; | |
3982 /* The hash-table element for the SET_SRC of the SET. */ | |
3983 struct table_elt *src_elt; | |
3984 /* Hash value for the SET_SRC. */ | |
3985 unsigned src_hash; | |
3986 /* Hash value for the SET_DEST. */ | |
3987 unsigned dest_hash; | |
3988 /* The SET_DEST, with SUBREG, etc., stripped. */ | |
3989 rtx inner_dest; | |
3990 /* Nonzero if the SET_SRC is in memory. */ | |
3991 char src_in_memory; | |
3992 /* Nonzero if the SET_SRC contains something | |
3993 whose value cannot be predicted and understood. */ | |
3994 char src_volatile; | |
3995 /* Original machine mode, in case it becomes a CONST_INT. | |
3996 The size of this field should match the size of the mode | |
3997 field of struct rtx_def (see rtl.h). */ | |
3998 ENUM_BITFIELD(machine_mode) mode : 8; | |
3999 /* A constant equivalent for SET_SRC, if any. */ | |
4000 rtx src_const; | |
4001 /* Hash value of constant equivalent for SET_SRC. */ | |
4002 unsigned src_const_hash; | |
4003 /* Table entry for constant equivalent for SET_SRC, if any. */ | |
4004 struct table_elt *src_const_elt; | |
4005 /* Table entry for the destination address. */ | |
4006 struct table_elt *dest_addr_elt; | |
4007 }; | |
4008 | |
4009 static void | |
4010 cse_insn (rtx insn) | |
4011 { | |
4012 rtx x = PATTERN (insn); | |
4013 int i; | |
4014 rtx tem; | |
4015 int n_sets = 0; | |
4016 | |
4017 rtx src_eqv = 0; | |
4018 struct table_elt *src_eqv_elt = 0; | |
4019 int src_eqv_volatile = 0; | |
4020 int src_eqv_in_memory = 0; | |
4021 unsigned src_eqv_hash = 0; | |
4022 | |
4023 struct set *sets = (struct set *) 0; | |
4024 | |
4025 this_insn = insn; | |
4026 #ifdef HAVE_cc0 | |
4027 /* Records what this insn does to set CC0. */ | |
4028 this_insn_cc0 = 0; | |
4029 this_insn_cc0_mode = VOIDmode; | |
4030 #endif | |
4031 | |
4032 /* Find all the SETs and CLOBBERs in this instruction. | |
4033 Record all the SETs in the array `set' and count them. | |
4034 Also determine whether there is a CLOBBER that invalidates | |
4035 all memory references, or all references at varying addresses. */ | |
4036 | |
4037 if (CALL_P (insn)) | |
4038 { | |
4039 for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1)) | |
4040 { | |
4041 if (GET_CODE (XEXP (tem, 0)) == CLOBBER) | |
4042 invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode); | |
4043 XEXP (tem, 0) = canon_reg (XEXP (tem, 0), insn); | |
4044 } | |
4045 } | |
4046 | |
4047 if (GET_CODE (x) == SET) | |
4048 { | |
4049 sets = XALLOCA (struct set); | |
4050 sets[0].rtl = x; | |
4051 | |
4052 /* Ignore SETs that are unconditional jumps. | |
4053 They never need cse processing, so this does not hurt. | |
4054 The reason is not efficiency but rather | |
4055 so that we can test at the end for instructions | |
4056 that have been simplified to unconditional jumps | |
4057 and not be misled by unchanged instructions | |
4058 that were unconditional jumps to begin with. */ | |
4059 if (SET_DEST (x) == pc_rtx | |
4060 && GET_CODE (SET_SRC (x)) == LABEL_REF) | |
4061 ; | |
4062 | |
4063 /* Don't count call-insns, (set (reg 0) (call ...)), as a set. | |
4064 The hard function value register is used only once, to copy to | |
4065 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)! | |
4066 Ensure we invalidate the destination register. On the 80386 no | |
4067 other code would invalidate it since it is a fixed_reg. | |
4068 We need not check the return of apply_change_group; see canon_reg. */ | |
4069 | |
4070 else if (GET_CODE (SET_SRC (x)) == CALL) | |
4071 { | |
4072 canon_reg (SET_SRC (x), insn); | |
4073 apply_change_group (); | |
4074 fold_rtx (SET_SRC (x), insn); | |
4075 invalidate (SET_DEST (x), VOIDmode); | |
4076 } | |
4077 else | |
4078 n_sets = 1; | |
4079 } | |
4080 else if (GET_CODE (x) == PARALLEL) | |
4081 { | |
4082 int lim = XVECLEN (x, 0); | |
4083 | |
4084 sets = XALLOCAVEC (struct set, lim); | |
4085 | |
4086 /* Find all regs explicitly clobbered in this insn, | |
4087 and ensure they are not replaced with any other regs | |
4088 elsewhere in this insn. | |
4089 When a reg that is clobbered is also used for input, | |
4090 we should presume that that is for a reason, | |
4091 and we should not substitute some other register | |
4092 which is not supposed to be clobbered. | |
4093 Therefore, this loop cannot be merged into the one below | |
4094 because a CALL may precede a CLOBBER and refer to the | |
4095 value clobbered. We must not let a canonicalization do | |
4096 anything in that case. */ | |
4097 for (i = 0; i < lim; i++) | |
4098 { | |
4099 rtx y = XVECEXP (x, 0, i); | |
4100 if (GET_CODE (y) == CLOBBER) | |
4101 { | |
4102 rtx clobbered = XEXP (y, 0); | |
4103 | |
4104 if (REG_P (clobbered) | |
4105 || GET_CODE (clobbered) == SUBREG) | |
4106 invalidate (clobbered, VOIDmode); | |
4107 else if (GET_CODE (clobbered) == STRICT_LOW_PART | |
4108 || GET_CODE (clobbered) == ZERO_EXTRACT) | |
4109 invalidate (XEXP (clobbered, 0), GET_MODE (clobbered)); | |
4110 } | |
4111 } | |
4112 | |
4113 for (i = 0; i < lim; i++) | |
4114 { | |
4115 rtx y = XVECEXP (x, 0, i); | |
4116 if (GET_CODE (y) == SET) | |
4117 { | |
4118 /* As above, we ignore unconditional jumps and call-insns and | |
4119 ignore the result of apply_change_group. */ | |
4120 if (GET_CODE (SET_SRC (y)) == CALL) | |
4121 { | |
4122 canon_reg (SET_SRC (y), insn); | |
4123 apply_change_group (); | |
4124 fold_rtx (SET_SRC (y), insn); | |
4125 invalidate (SET_DEST (y), VOIDmode); | |
4126 } | |
4127 else if (SET_DEST (y) == pc_rtx | |
4128 && GET_CODE (SET_SRC (y)) == LABEL_REF) | |
4129 ; | |
4130 else | |
4131 sets[n_sets++].rtl = y; | |
4132 } | |
4133 else if (GET_CODE (y) == CLOBBER) | |
4134 { | |
4135 /* If we clobber memory, canon the address. | |
4136 This does nothing when a register is clobbered | |
4137 because we have already invalidated the reg. */ | |
4138 if (MEM_P (XEXP (y, 0))) | |
4139 canon_reg (XEXP (y, 0), insn); | |
4140 } | |
4141 else if (GET_CODE (y) == USE | |
4142 && ! (REG_P (XEXP (y, 0)) | |
4143 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER)) | |
4144 canon_reg (y, insn); | |
4145 else if (GET_CODE (y) == CALL) | |
4146 { | |
4147 /* The result of apply_change_group can be ignored; see | |
4148 canon_reg. */ | |
4149 canon_reg (y, insn); | |
4150 apply_change_group (); | |
4151 fold_rtx (y, insn); | |
4152 } | |
4153 } | |
4154 } | |
4155 else if (GET_CODE (x) == CLOBBER) | |
4156 { | |
4157 if (MEM_P (XEXP (x, 0))) | |
4158 canon_reg (XEXP (x, 0), insn); | |
4159 } | |
4160 | |
4161 /* Canonicalize a USE of a pseudo register or memory location. */ | |
4162 else if (GET_CODE (x) == USE | |
4163 && ! (REG_P (XEXP (x, 0)) | |
4164 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)) | |
4165 canon_reg (XEXP (x, 0), insn); | |
4166 else if (GET_CODE (x) == CALL) | |
4167 { | |
4168 /* The result of apply_change_group can be ignored; see canon_reg. */ | |
4169 canon_reg (x, insn); | |
4170 apply_change_group (); | |
4171 fold_rtx (x, insn); | |
4172 } | |
4173 | |
4174 /* Store the equivalent value in SRC_EQV, if different, or if the DEST | |
4175 is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV | |
4176 is handled specially for this case, and if it isn't set, then there will | |
4177 be no equivalence for the destination. */ | |
4178 if (n_sets == 1 && REG_NOTES (insn) != 0 | |
4179 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0 | |
4180 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl)) | |
4181 || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART)) | |
4182 { | |
4183 /* The result of apply_change_group can be ignored; see canon_reg. */ | |
4184 canon_reg (XEXP (tem, 0), insn); | |
4185 apply_change_group (); | |
4186 src_eqv = fold_rtx (XEXP (tem, 0), insn); | |
4187 XEXP (tem, 0) = copy_rtx (src_eqv); | |
4188 df_notes_rescan (insn); | |
4189 } | |
4190 | |
4191 /* Canonicalize sources and addresses of destinations. | |
4192 We do this in a separate pass to avoid problems when a MATCH_DUP is | |
4193 present in the insn pattern. In that case, we want to ensure that | |
4194 we don't break the duplicate nature of the pattern. So we will replace | |
4195 both operands at the same time. Otherwise, we would fail to find an | |
4196 equivalent substitution in the loop calling validate_change below. | |
4197 | |
4198 We used to suppress canonicalization of DEST if it appears in SRC, | |
4199 but we don't do this any more. */ | |
4200 | |
4201 for (i = 0; i < n_sets; i++) | |
4202 { | |
4203 rtx dest = SET_DEST (sets[i].rtl); | |
4204 rtx src = SET_SRC (sets[i].rtl); | |
4205 rtx new_rtx = canon_reg (src, insn); | |
4206 | |
4207 validate_change (insn, &SET_SRC (sets[i].rtl), new_rtx, 1); | |
4208 | |
4209 if (GET_CODE (dest) == ZERO_EXTRACT) | |
4210 { | |
4211 validate_change (insn, &XEXP (dest, 1), | |
4212 canon_reg (XEXP (dest, 1), insn), 1); | |
4213 validate_change (insn, &XEXP (dest, 2), | |
4214 canon_reg (XEXP (dest, 2), insn), 1); | |
4215 } | |
4216 | |
4217 while (GET_CODE (dest) == SUBREG | |
4218 || GET_CODE (dest) == ZERO_EXTRACT | |
4219 || GET_CODE (dest) == STRICT_LOW_PART) | |
4220 dest = XEXP (dest, 0); | |
4221 | |
4222 if (MEM_P (dest)) | |
4223 canon_reg (dest, insn); | |
4224 } | |
4225 | |
4226 /* Now that we have done all the replacements, we can apply the change | |
4227 group and see if they all work. Note that this will cause some | |
4228 canonicalizations that would have worked individually not to be applied | |
4229 because some other canonicalization didn't work, but this should not | |
4230 occur often. | |
4231 | |
4232 The result of apply_change_group can be ignored; see canon_reg. */ | |
4233 | |
4234 apply_change_group (); | |
4235 | |
4236 /* Set sets[i].src_elt to the class each source belongs to. | |
4237 Detect assignments from or to volatile things | |
4238 and set set[i] to zero so they will be ignored | |
4239 in the rest of this function. | |
4240 | |
4241 Nothing in this loop changes the hash table or the register chains. */ | |
4242 | |
4243 for (i = 0; i < n_sets; i++) | |
4244 { | |
4245 rtx src, dest; | |
4246 rtx src_folded; | |
4247 struct table_elt *elt = 0, *p; | |
4248 enum machine_mode mode; | |
4249 rtx src_eqv_here; | |
4250 rtx src_const = 0; | |
4251 rtx src_related = 0; | |
4252 struct table_elt *src_const_elt = 0; | |
4253 int src_cost = MAX_COST; | |
4254 int src_eqv_cost = MAX_COST; | |
4255 int src_folded_cost = MAX_COST; | |
4256 int src_related_cost = MAX_COST; | |
4257 int src_elt_cost = MAX_COST; | |
4258 int src_regcost = MAX_COST; | |
4259 int src_eqv_regcost = MAX_COST; | |
4260 int src_folded_regcost = MAX_COST; | |
4261 int src_related_regcost = MAX_COST; | |
4262 int src_elt_regcost = MAX_COST; | |
4263 /* Set nonzero if we need to call force_const_mem on with the | |
4264 contents of src_folded before using it. */ | |
4265 int src_folded_force_flag = 0; | |
4266 | |
4267 dest = SET_DEST (sets[i].rtl); | |
4268 src = SET_SRC (sets[i].rtl); | |
4269 | |
4270 /* If SRC is a constant that has no machine mode, | |
4271 hash it with the destination's machine mode. | |
4272 This way we can keep different modes separate. */ | |
4273 | |
4274 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src); | |
4275 sets[i].mode = mode; | |
4276 | |
4277 if (src_eqv) | |
4278 { | |
4279 enum machine_mode eqvmode = mode; | |
4280 if (GET_CODE (dest) == STRICT_LOW_PART) | |
4281 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0))); | |
4282 do_not_record = 0; | |
4283 hash_arg_in_memory = 0; | |
4284 src_eqv_hash = HASH (src_eqv, eqvmode); | |
4285 | |
4286 /* Find the equivalence class for the equivalent expression. */ | |
4287 | |
4288 if (!do_not_record) | |
4289 src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode); | |
4290 | |
4291 src_eqv_volatile = do_not_record; | |
4292 src_eqv_in_memory = hash_arg_in_memory; | |
4293 } | |
4294 | |
4295 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the | |
4296 value of the INNER register, not the destination. So it is not | |
4297 a valid substitution for the source. But save it for later. */ | |
4298 if (GET_CODE (dest) == STRICT_LOW_PART) | |
4299 src_eqv_here = 0; | |
4300 else | |
4301 src_eqv_here = src_eqv; | |
4302 | |
4303 /* Simplify and foldable subexpressions in SRC. Then get the fully- | |
4304 simplified result, which may not necessarily be valid. */ | |
4305 src_folded = fold_rtx (src, insn); | |
4306 | |
4307 #if 0 | |
4308 /* ??? This caused bad code to be generated for the m68k port with -O2. | |
4309 Suppose src is (CONST_INT -1), and that after truncation src_folded | |
4310 is (CONST_INT 3). Suppose src_folded is then used for src_const. | |
4311 At the end we will add src and src_const to the same equivalence | |
4312 class. We now have 3 and -1 on the same equivalence class. This | |
4313 causes later instructions to be mis-optimized. */ | |
4314 /* If storing a constant in a bitfield, pre-truncate the constant | |
4315 so we will be able to record it later. */ | |
4316 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT) | |
4317 { | |
4318 rtx width = XEXP (SET_DEST (sets[i].rtl), 1); | |
4319 | |
4320 if (GET_CODE (src) == CONST_INT | |
4321 && GET_CODE (width) == CONST_INT | |
4322 && INTVAL (width) < HOST_BITS_PER_WIDE_INT | |
4323 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width)))) | |
4324 src_folded | |
4325 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1 | |
4326 << INTVAL (width)) - 1)); | |
4327 } | |
4328 #endif | |
4329 | |
4330 /* Compute SRC's hash code, and also notice if it | |
4331 should not be recorded at all. In that case, | |
4332 prevent any further processing of this assignment. */ | |
4333 do_not_record = 0; | |
4334 hash_arg_in_memory = 0; | |
4335 | |
4336 sets[i].src = src; | |
4337 sets[i].src_hash = HASH (src, mode); | |
4338 sets[i].src_volatile = do_not_record; | |
4339 sets[i].src_in_memory = hash_arg_in_memory; | |
4340 | |
4341 /* If SRC is a MEM, there is a REG_EQUIV note for SRC, and DEST is | |
4342 a pseudo, do not record SRC. Using SRC as a replacement for | |
4343 anything else will be incorrect in that situation. Note that | |
4344 this usually occurs only for stack slots, in which case all the | |
4345 RTL would be referring to SRC, so we don't lose any optimization | |
4346 opportunities by not having SRC in the hash table. */ | |
4347 | |
4348 if (MEM_P (src) | |
4349 && find_reg_note (insn, REG_EQUIV, NULL_RTX) != 0 | |
4350 && REG_P (dest) | |
4351 && REGNO (dest) >= FIRST_PSEUDO_REGISTER) | |
4352 sets[i].src_volatile = 1; | |
4353 | |
4354 #if 0 | |
4355 /* It is no longer clear why we used to do this, but it doesn't | |
4356 appear to still be needed. So let's try without it since this | |
4357 code hurts cse'ing widened ops. */ | |
4358 /* If source is a paradoxical subreg (such as QI treated as an SI), | |
4359 treat it as volatile. It may do the work of an SI in one context | |
4360 where the extra bits are not being used, but cannot replace an SI | |
4361 in general. */ | |
4362 if (GET_CODE (src) == SUBREG | |
4363 && (GET_MODE_SIZE (GET_MODE (src)) | |
4364 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))) | |
4365 sets[i].src_volatile = 1; | |
4366 #endif | |
4367 | |
4368 /* Locate all possible equivalent forms for SRC. Try to replace | |
4369 SRC in the insn with each cheaper equivalent. | |
4370 | |
4371 We have the following types of equivalents: SRC itself, a folded | |
4372 version, a value given in a REG_EQUAL note, or a value related | |
4373 to a constant. | |
4374 | |
4375 Each of these equivalents may be part of an additional class | |
4376 of equivalents (if more than one is in the table, they must be in | |
4377 the same class; we check for this). | |
4378 | |
4379 If the source is volatile, we don't do any table lookups. | |
4380 | |
4381 We note any constant equivalent for possible later use in a | |
4382 REG_NOTE. */ | |
4383 | |
4384 if (!sets[i].src_volatile) | |
4385 elt = lookup (src, sets[i].src_hash, mode); | |
4386 | |
4387 sets[i].src_elt = elt; | |
4388 | |
4389 if (elt && src_eqv_here && src_eqv_elt) | |
4390 { | |
4391 if (elt->first_same_value != src_eqv_elt->first_same_value) | |
4392 { | |
4393 /* The REG_EQUAL is indicating that two formerly distinct | |
4394 classes are now equivalent. So merge them. */ | |
4395 merge_equiv_classes (elt, src_eqv_elt); | |
4396 src_eqv_hash = HASH (src_eqv, elt->mode); | |
4397 src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode); | |
4398 } | |
4399 | |
4400 src_eqv_here = 0; | |
4401 } | |
4402 | |
4403 else if (src_eqv_elt) | |
4404 elt = src_eqv_elt; | |
4405 | |
4406 /* Try to find a constant somewhere and record it in `src_const'. | |
4407 Record its table element, if any, in `src_const_elt'. Look in | |
4408 any known equivalences first. (If the constant is not in the | |
4409 table, also set `sets[i].src_const_hash'). */ | |
4410 if (elt) | |
4411 for (p = elt->first_same_value; p; p = p->next_same_value) | |
4412 if (p->is_const) | |
4413 { | |
4414 src_const = p->exp; | |
4415 src_const_elt = elt; | |
4416 break; | |
4417 } | |
4418 | |
4419 if (src_const == 0 | |
4420 && (CONSTANT_P (src_folded) | |
4421 /* Consider (minus (label_ref L1) (label_ref L2)) as | |
4422 "constant" here so we will record it. This allows us | |
4423 to fold switch statements when an ADDR_DIFF_VEC is used. */ | |
4424 || (GET_CODE (src_folded) == MINUS | |
4425 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF | |
4426 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF))) | |
4427 src_const = src_folded, src_const_elt = elt; | |
4428 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here)) | |
4429 src_const = src_eqv_here, src_const_elt = src_eqv_elt; | |
4430 | |
4431 /* If we don't know if the constant is in the table, get its | |
4432 hash code and look it up. */ | |
4433 if (src_const && src_const_elt == 0) | |
4434 { | |
4435 sets[i].src_const_hash = HASH (src_const, mode); | |
4436 src_const_elt = lookup (src_const, sets[i].src_const_hash, mode); | |
4437 } | |
4438 | |
4439 sets[i].src_const = src_const; | |
4440 sets[i].src_const_elt = src_const_elt; | |
4441 | |
4442 /* If the constant and our source are both in the table, mark them as | |
4443 equivalent. Otherwise, if a constant is in the table but the source | |
4444 isn't, set ELT to it. */ | |
4445 if (src_const_elt && elt | |
4446 && src_const_elt->first_same_value != elt->first_same_value) | |
4447 merge_equiv_classes (elt, src_const_elt); | |
4448 else if (src_const_elt && elt == 0) | |
4449 elt = src_const_elt; | |
4450 | |
4451 /* See if there is a register linearly related to a constant | |
4452 equivalent of SRC. */ | |
4453 if (src_const | |
4454 && (GET_CODE (src_const) == CONST | |
4455 || (src_const_elt && src_const_elt->related_value != 0))) | |
4456 { | |
4457 src_related = use_related_value (src_const, src_const_elt); | |
4458 if (src_related) | |
4459 { | |
4460 struct table_elt *src_related_elt | |
4461 = lookup (src_related, HASH (src_related, mode), mode); | |
4462 if (src_related_elt && elt) | |
4463 { | |
4464 if (elt->first_same_value | |
4465 != src_related_elt->first_same_value) | |
4466 /* This can occur when we previously saw a CONST | |
4467 involving a SYMBOL_REF and then see the SYMBOL_REF | |
4468 twice. Merge the involved classes. */ | |
4469 merge_equiv_classes (elt, src_related_elt); | |
4470 | |
4471 src_related = 0; | |
4472 src_related_elt = 0; | |
4473 } | |
4474 else if (src_related_elt && elt == 0) | |
4475 elt = src_related_elt; | |
4476 } | |
4477 } | |
4478 | |
4479 /* See if we have a CONST_INT that is already in a register in a | |
4480 wider mode. */ | |
4481 | |
4482 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT | |
4483 && GET_MODE_CLASS (mode) == MODE_INT | |
4484 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD) | |
4485 { | |
4486 enum machine_mode wider_mode; | |
4487 | |
4488 for (wider_mode = GET_MODE_WIDER_MODE (mode); | |
4489 wider_mode != VOIDmode | |
4490 && GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD | |
4491 && src_related == 0; | |
4492 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) | |
4493 { | |
4494 struct table_elt *const_elt | |
4495 = lookup (src_const, HASH (src_const, wider_mode), wider_mode); | |
4496 | |
4497 if (const_elt == 0) | |
4498 continue; | |
4499 | |
4500 for (const_elt = const_elt->first_same_value; | |
4501 const_elt; const_elt = const_elt->next_same_value) | |
4502 if (REG_P (const_elt->exp)) | |
4503 { | |
4504 src_related = gen_lowpart (mode, const_elt->exp); | |
4505 break; | |
4506 } | |
4507 } | |
4508 } | |
4509 | |
4510 /* Another possibility is that we have an AND with a constant in | |
4511 a mode narrower than a word. If so, it might have been generated | |
4512 as part of an "if" which would narrow the AND. If we already | |
4513 have done the AND in a wider mode, we can use a SUBREG of that | |
4514 value. */ | |
4515 | |
4516 if (flag_expensive_optimizations && ! src_related | |
4517 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT | |
4518 && GET_MODE_SIZE (mode) < UNITS_PER_WORD) | |
4519 { | |
4520 enum machine_mode tmode; | |
4521 rtx new_and = gen_rtx_AND (VOIDmode, NULL_RTX, XEXP (src, 1)); | |
4522 | |
4523 for (tmode = GET_MODE_WIDER_MODE (mode); | |
4524 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD; | |
4525 tmode = GET_MODE_WIDER_MODE (tmode)) | |
4526 { | |
4527 rtx inner = gen_lowpart (tmode, XEXP (src, 0)); | |
4528 struct table_elt *larger_elt; | |
4529 | |
4530 if (inner) | |
4531 { | |
4532 PUT_MODE (new_and, tmode); | |
4533 XEXP (new_and, 0) = inner; | |
4534 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode); | |
4535 if (larger_elt == 0) | |
4536 continue; | |
4537 | |
4538 for (larger_elt = larger_elt->first_same_value; | |
4539 larger_elt; larger_elt = larger_elt->next_same_value) | |
4540 if (REG_P (larger_elt->exp)) | |
4541 { | |
4542 src_related | |
4543 = gen_lowpart (mode, larger_elt->exp); | |
4544 break; | |
4545 } | |
4546 | |
4547 if (src_related) | |
4548 break; | |
4549 } | |
4550 } | |
4551 } | |
4552 | |
4553 #ifdef LOAD_EXTEND_OP | |
4554 /* See if a MEM has already been loaded with a widening operation; | |
4555 if it has, we can use a subreg of that. Many CISC machines | |
4556 also have such operations, but this is only likely to be | |
4557 beneficial on these machines. */ | |
4558 | |
4559 if (flag_expensive_optimizations && src_related == 0 | |
4560 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD) | |
4561 && GET_MODE_CLASS (mode) == MODE_INT | |
4562 && MEM_P (src) && ! do_not_record | |
4563 && LOAD_EXTEND_OP (mode) != UNKNOWN) | |
4564 { | |
4565 struct rtx_def memory_extend_buf; | |
4566 rtx memory_extend_rtx = &memory_extend_buf; | |
4567 enum machine_mode tmode; | |
4568 | |
4569 /* Set what we are trying to extend and the operation it might | |
4570 have been extended with. */ | |
4571 memset (memory_extend_rtx, 0, sizeof(*memory_extend_rtx)); | |
4572 PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode)); | |
4573 XEXP (memory_extend_rtx, 0) = src; | |
4574 | |
4575 for (tmode = GET_MODE_WIDER_MODE (mode); | |
4576 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD; | |
4577 tmode = GET_MODE_WIDER_MODE (tmode)) | |
4578 { | |
4579 struct table_elt *larger_elt; | |
4580 | |
4581 PUT_MODE (memory_extend_rtx, tmode); | |
4582 larger_elt = lookup (memory_extend_rtx, | |
4583 HASH (memory_extend_rtx, tmode), tmode); | |
4584 if (larger_elt == 0) | |
4585 continue; | |
4586 | |
4587 for (larger_elt = larger_elt->first_same_value; | |
4588 larger_elt; larger_elt = larger_elt->next_same_value) | |
4589 if (REG_P (larger_elt->exp)) | |
4590 { | |
4591 src_related = gen_lowpart (mode, larger_elt->exp); | |
4592 break; | |
4593 } | |
4594 | |
4595 if (src_related) | |
4596 break; | |
4597 } | |
4598 } | |
4599 #endif /* LOAD_EXTEND_OP */ | |
4600 | |
4601 if (src == src_folded) | |
4602 src_folded = 0; | |
4603 | |
4604 /* At this point, ELT, if nonzero, points to a class of expressions | |
4605 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED, | |
4606 and SRC_RELATED, if nonzero, each contain additional equivalent | |
4607 expressions. Prune these latter expressions by deleting expressions | |
4608 already in the equivalence class. | |
4609 | |
4610 Check for an equivalent identical to the destination. If found, | |
4611 this is the preferred equivalent since it will likely lead to | |
4612 elimination of the insn. Indicate this by placing it in | |
4613 `src_related'. */ | |
4614 | |
4615 if (elt) | |
4616 elt = elt->first_same_value; | |
4617 for (p = elt; p; p = p->next_same_value) | |
4618 { | |
4619 enum rtx_code code = GET_CODE (p->exp); | |
4620 | |
4621 /* If the expression is not valid, ignore it. Then we do not | |
4622 have to check for validity below. In most cases, we can use | |
4623 `rtx_equal_p', since canonicalization has already been done. */ | |
4624 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, false)) | |
4625 continue; | |
4626 | |
4627 /* Also skip paradoxical subregs, unless that's what we're | |
4628 looking for. */ | |
4629 if (code == SUBREG | |
4630 && (GET_MODE_SIZE (GET_MODE (p->exp)) | |
4631 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp)))) | |
4632 && ! (src != 0 | |
4633 && GET_CODE (src) == SUBREG | |
4634 && GET_MODE (src) == GET_MODE (p->exp) | |
4635 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))) | |
4636 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp)))))) | |
4637 continue; | |
4638 | |
4639 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp)) | |
4640 src = 0; | |
4641 else if (src_folded && GET_CODE (src_folded) == code | |
4642 && rtx_equal_p (src_folded, p->exp)) | |
4643 src_folded = 0; | |
4644 else if (src_eqv_here && GET_CODE (src_eqv_here) == code | |
4645 && rtx_equal_p (src_eqv_here, p->exp)) | |
4646 src_eqv_here = 0; | |
4647 else if (src_related && GET_CODE (src_related) == code | |
4648 && rtx_equal_p (src_related, p->exp)) | |
4649 src_related = 0; | |
4650 | |
4651 /* This is the same as the destination of the insns, we want | |
4652 to prefer it. Copy it to src_related. The code below will | |
4653 then give it a negative cost. */ | |
4654 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest)) | |
4655 src_related = dest; | |
4656 } | |
4657 | |
4658 /* Find the cheapest valid equivalent, trying all the available | |
4659 possibilities. Prefer items not in the hash table to ones | |
4660 that are when they are equal cost. Note that we can never | |
4661 worsen an insn as the current contents will also succeed. | |
4662 If we find an equivalent identical to the destination, use it as best, | |
4663 since this insn will probably be eliminated in that case. */ | |
4664 if (src) | |
4665 { | |
4666 if (rtx_equal_p (src, dest)) | |
4667 src_cost = src_regcost = -1; | |
4668 else | |
4669 { | |
4670 src_cost = COST (src); | |
4671 src_regcost = approx_reg_cost (src); | |
4672 } | |
4673 } | |
4674 | |
4675 if (src_eqv_here) | |
4676 { | |
4677 if (rtx_equal_p (src_eqv_here, dest)) | |
4678 src_eqv_cost = src_eqv_regcost = -1; | |
4679 else | |
4680 { | |
4681 src_eqv_cost = COST (src_eqv_here); | |
4682 src_eqv_regcost = approx_reg_cost (src_eqv_here); | |
4683 } | |
4684 } | |
4685 | |
4686 if (src_folded) | |
4687 { | |
4688 if (rtx_equal_p (src_folded, dest)) | |
4689 src_folded_cost = src_folded_regcost = -1; | |
4690 else | |
4691 { | |
4692 src_folded_cost = COST (src_folded); | |
4693 src_folded_regcost = approx_reg_cost (src_folded); | |
4694 } | |
4695 } | |
4696 | |
4697 if (src_related) | |
4698 { | |
4699 if (rtx_equal_p (src_related, dest)) | |
4700 src_related_cost = src_related_regcost = -1; | |
4701 else | |
4702 { | |
4703 src_related_cost = COST (src_related); | |
4704 src_related_regcost = approx_reg_cost (src_related); | |
4705 } | |
4706 } | |
4707 | |
4708 /* If this was an indirect jump insn, a known label will really be | |
4709 cheaper even though it looks more expensive. */ | |
4710 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF) | |
4711 src_folded = src_const, src_folded_cost = src_folded_regcost = -1; | |
4712 | |
4713 /* Terminate loop when replacement made. This must terminate since | |
4714 the current contents will be tested and will always be valid. */ | |
4715 while (1) | |
4716 { | |
4717 rtx trial; | |
4718 | |
4719 /* Skip invalid entries. */ | |
4720 while (elt && !REG_P (elt->exp) | |
4721 && ! exp_equiv_p (elt->exp, elt->exp, 1, false)) | |
4722 elt = elt->next_same_value; | |
4723 | |
4724 /* A paradoxical subreg would be bad here: it'll be the right | |
4725 size, but later may be adjusted so that the upper bits aren't | |
4726 what we want. So reject it. */ | |
4727 if (elt != 0 | |
4728 && GET_CODE (elt->exp) == SUBREG | |
4729 && (GET_MODE_SIZE (GET_MODE (elt->exp)) | |
4730 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp)))) | |
4731 /* It is okay, though, if the rtx we're trying to match | |
4732 will ignore any of the bits we can't predict. */ | |
4733 && ! (src != 0 | |
4734 && GET_CODE (src) == SUBREG | |
4735 && GET_MODE (src) == GET_MODE (elt->exp) | |
4736 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))) | |
4737 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp)))))) | |
4738 { | |
4739 elt = elt->next_same_value; | |
4740 continue; | |
4741 } | |
4742 | |
4743 if (elt) | |
4744 { | |
4745 src_elt_cost = elt->cost; | |
4746 src_elt_regcost = elt->regcost; | |
4747 } | |
4748 | |
4749 /* Find cheapest and skip it for the next time. For items | |
4750 of equal cost, use this order: | |
4751 src_folded, src, src_eqv, src_related and hash table entry. */ | |
4752 if (src_folded | |
4753 && preferable (src_folded_cost, src_folded_regcost, | |
4754 src_cost, src_regcost) <= 0 | |
4755 && preferable (src_folded_cost, src_folded_regcost, | |
4756 src_eqv_cost, src_eqv_regcost) <= 0 | |
4757 && preferable (src_folded_cost, src_folded_regcost, | |
4758 src_related_cost, src_related_regcost) <= 0 | |
4759 && preferable (src_folded_cost, src_folded_regcost, | |
4760 src_elt_cost, src_elt_regcost) <= 0) | |
4761 { | |
4762 trial = src_folded, src_folded_cost = MAX_COST; | |
4763 if (src_folded_force_flag) | |
4764 { | |
4765 rtx forced = force_const_mem (mode, trial); | |
4766 if (forced) | |
4767 trial = forced; | |
4768 } | |
4769 } | |
4770 else if (src | |
4771 && preferable (src_cost, src_regcost, | |
4772 src_eqv_cost, src_eqv_regcost) <= 0 | |
4773 && preferable (src_cost, src_regcost, | |
4774 src_related_cost, src_related_regcost) <= 0 | |
4775 && preferable (src_cost, src_regcost, | |
4776 src_elt_cost, src_elt_regcost) <= 0) | |
4777 trial = src, src_cost = MAX_COST; | |
4778 else if (src_eqv_here | |
4779 && preferable (src_eqv_cost, src_eqv_regcost, | |
4780 src_related_cost, src_related_regcost) <= 0 | |
4781 && preferable (src_eqv_cost, src_eqv_regcost, | |
4782 src_elt_cost, src_elt_regcost) <= 0) | |
4783 trial = src_eqv_here, src_eqv_cost = MAX_COST; | |
4784 else if (src_related | |
4785 && preferable (src_related_cost, src_related_regcost, | |
4786 src_elt_cost, src_elt_regcost) <= 0) | |
4787 trial = src_related, src_related_cost = MAX_COST; | |
4788 else | |
4789 { | |
4790 trial = elt->exp; | |
4791 elt = elt->next_same_value; | |
4792 src_elt_cost = MAX_COST; | |
4793 } | |
4794 | |
4795 /* Avoid creation of overlapping memory moves. */ | |
4796 if (MEM_P (trial) && MEM_P (SET_DEST (sets[i].rtl))) | |
4797 { | |
4798 rtx src, dest; | |
4799 | |
4800 /* BLKmode moves are not handled by cse anyway. */ | |
4801 if (GET_MODE (trial) == BLKmode) | |
4802 break; | |
4803 | |
4804 src = canon_rtx (trial); | |
4805 dest = canon_rtx (SET_DEST (sets[i].rtl)); | |
4806 | |
4807 if (!MEM_P (src) || !MEM_P (dest) | |
4808 || !nonoverlapping_memrefs_p (src, dest)) | |
4809 break; | |
4810 } | |
4811 | |
4812 /* We don't normally have an insn matching (set (pc) (pc)), so | |
4813 check for this separately here. We will delete such an | |
4814 insn below. | |
4815 | |
4816 For other cases such as a table jump or conditional jump | |
4817 where we know the ultimate target, go ahead and replace the | |
4818 operand. While that may not make a valid insn, we will | |
4819 reemit the jump below (and also insert any necessary | |
4820 barriers). */ | |
4821 if (n_sets == 1 && dest == pc_rtx | |
4822 && (trial == pc_rtx | |
4823 || (GET_CODE (trial) == LABEL_REF | |
4824 && ! condjump_p (insn)))) | |
4825 { | |
4826 /* Don't substitute non-local labels, this confuses CFG. */ | |
4827 if (GET_CODE (trial) == LABEL_REF | |
4828 && LABEL_REF_NONLOCAL_P (trial)) | |
4829 continue; | |
4830 | |
4831 SET_SRC (sets[i].rtl) = trial; | |
4832 cse_jumps_altered = true; | |
4833 break; | |
4834 } | |
4835 | |
4836 /* Reject certain invalid forms of CONST that we create. */ | |
4837 else if (CONSTANT_P (trial) | |
4838 && GET_CODE (trial) == CONST | |
4839 /* Reject cases that will cause decode_rtx_const to | |
4840 die. On the alpha when simplifying a switch, we | |
4841 get (const (truncate (minus (label_ref) | |
4842 (label_ref)))). */ | |
4843 && (GET_CODE (XEXP (trial, 0)) == TRUNCATE | |
4844 /* Likewise on IA-64, except without the | |
4845 truncate. */ | |
4846 || (GET_CODE (XEXP (trial, 0)) == MINUS | |
4847 && GET_CODE (XEXP (XEXP (trial, 0), 0)) == LABEL_REF | |
4848 && GET_CODE (XEXP (XEXP (trial, 0), 1)) == LABEL_REF))) | |
4849 /* Do nothing for this case. */ | |
4850 ; | |
4851 | |
4852 /* Look for a substitution that makes a valid insn. */ | |
4853 else if (validate_unshare_change | |
4854 (insn, &SET_SRC (sets[i].rtl), trial, 0)) | |
4855 { | |
4856 rtx new_rtx = canon_reg (SET_SRC (sets[i].rtl), insn); | |
4857 | |
4858 /* The result of apply_change_group can be ignored; see | |
4859 canon_reg. */ | |
4860 | |
4861 validate_change (insn, &SET_SRC (sets[i].rtl), new_rtx, 1); | |
4862 apply_change_group (); | |
4863 | |
4864 break; | |
4865 } | |
4866 | |
4867 /* If we previously found constant pool entries for | |
4868 constants and this is a constant, try making a | |
4869 pool entry. Put it in src_folded unless we already have done | |
4870 this since that is where it likely came from. */ | |
4871 | |
4872 else if (constant_pool_entries_cost | |
4873 && CONSTANT_P (trial) | |
4874 && (src_folded == 0 | |
4875 || (!MEM_P (src_folded) | |
4876 && ! src_folded_force_flag)) | |
4877 && GET_MODE_CLASS (mode) != MODE_CC | |
4878 && mode != VOIDmode) | |
4879 { | |
4880 src_folded_force_flag = 1; | |
4881 src_folded = trial; | |
4882 src_folded_cost = constant_pool_entries_cost; | |
4883 src_folded_regcost = constant_pool_entries_regcost; | |
4884 } | |
4885 } | |
4886 | |
4887 src = SET_SRC (sets[i].rtl); | |
4888 | |
4889 /* In general, it is good to have a SET with SET_SRC == SET_DEST. | |
4890 However, there is an important exception: If both are registers | |
4891 that are not the head of their equivalence class, replace SET_SRC | |
4892 with the head of the class. If we do not do this, we will have | |
4893 both registers live over a portion of the basic block. This way, | |
4894 their lifetimes will likely abut instead of overlapping. */ | |
4895 if (REG_P (dest) | |
4896 && REGNO_QTY_VALID_P (REGNO (dest))) | |
4897 { | |
4898 int dest_q = REG_QTY (REGNO (dest)); | |
4899 struct qty_table_elem *dest_ent = &qty_table[dest_q]; | |
4900 | |
4901 if (dest_ent->mode == GET_MODE (dest) | |
4902 && dest_ent->first_reg != REGNO (dest) | |
4903 && REG_P (src) && REGNO (src) == REGNO (dest) | |
4904 /* Don't do this if the original insn had a hard reg as | |
4905 SET_SRC or SET_DEST. */ | |
4906 && (!REG_P (sets[i].src) | |
4907 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER) | |
4908 && (!REG_P (dest) || REGNO (dest) >= FIRST_PSEUDO_REGISTER)) | |
4909 /* We can't call canon_reg here because it won't do anything if | |
4910 SRC is a hard register. */ | |
4911 { | |
4912 int src_q = REG_QTY (REGNO (src)); | |
4913 struct qty_table_elem *src_ent = &qty_table[src_q]; | |
4914 int first = src_ent->first_reg; | |
4915 rtx new_src | |
4916 = (first >= FIRST_PSEUDO_REGISTER | |
4917 ? regno_reg_rtx[first] : gen_rtx_REG (GET_MODE (src), first)); | |
4918 | |
4919 /* We must use validate-change even for this, because this | |
4920 might be a special no-op instruction, suitable only to | |
4921 tag notes onto. */ | |
4922 if (validate_change (insn, &SET_SRC (sets[i].rtl), new_src, 0)) | |
4923 { | |
4924 src = new_src; | |
4925 /* If we had a constant that is cheaper than what we are now | |
4926 setting SRC to, use that constant. We ignored it when we | |
4927 thought we could make this into a no-op. */ | |
4928 if (src_const && COST (src_const) < COST (src) | |
4929 && validate_change (insn, &SET_SRC (sets[i].rtl), | |
4930 src_const, 0)) | |
4931 src = src_const; | |
4932 } | |
4933 } | |
4934 } | |
4935 | |
4936 /* If we made a change, recompute SRC values. */ | |
4937 if (src != sets[i].src) | |
4938 { | |
4939 do_not_record = 0; | |
4940 hash_arg_in_memory = 0; | |
4941 sets[i].src = src; | |
4942 sets[i].src_hash = HASH (src, mode); | |
4943 sets[i].src_volatile = do_not_record; | |
4944 sets[i].src_in_memory = hash_arg_in_memory; | |
4945 sets[i].src_elt = lookup (src, sets[i].src_hash, mode); | |
4946 } | |
4947 | |
4948 /* If this is a single SET, we are setting a register, and we have an | |
4949 equivalent constant, we want to add a REG_NOTE. We don't want | |
4950 to write a REG_EQUAL note for a constant pseudo since verifying that | |
4951 that pseudo hasn't been eliminated is a pain. Such a note also | |
4952 won't help anything. | |
4953 | |
4954 Avoid a REG_EQUAL note for (CONST (MINUS (LABEL_REF) (LABEL_REF))) | |
4955 which can be created for a reference to a compile time computable | |
4956 entry in a jump table. */ | |
4957 | |
4958 if (n_sets == 1 && src_const && REG_P (dest) | |
4959 && !REG_P (src_const) | |
4960 && ! (GET_CODE (src_const) == CONST | |
4961 && GET_CODE (XEXP (src_const, 0)) == MINUS | |
4962 && GET_CODE (XEXP (XEXP (src_const, 0), 0)) == LABEL_REF | |
4963 && GET_CODE (XEXP (XEXP (src_const, 0), 1)) == LABEL_REF)) | |
4964 { | |
4965 /* We only want a REG_EQUAL note if src_const != src. */ | |
4966 if (! rtx_equal_p (src, src_const)) | |
4967 { | |
4968 /* Make sure that the rtx is not shared. */ | |
4969 src_const = copy_rtx (src_const); | |
4970 | |
4971 /* Record the actual constant value in a REG_EQUAL note, | |
4972 making a new one if one does not already exist. */ | |
4973 set_unique_reg_note (insn, REG_EQUAL, src_const); | |
4974 df_notes_rescan (insn); | |
4975 } | |
4976 } | |
4977 | |
4978 /* Now deal with the destination. */ | |
4979 do_not_record = 0; | |
4980 | |
4981 /* Look within any ZERO_EXTRACT to the MEM or REG within it. */ | |
4982 while (GET_CODE (dest) == SUBREG | |
4983 || GET_CODE (dest) == ZERO_EXTRACT | |
4984 || GET_CODE (dest) == STRICT_LOW_PART) | |
4985 dest = XEXP (dest, 0); | |
4986 | |
4987 sets[i].inner_dest = dest; | |
4988 | |
4989 if (MEM_P (dest)) | |
4990 { | |
4991 #ifdef PUSH_ROUNDING | |
4992 /* Stack pushes invalidate the stack pointer. */ | |
4993 rtx addr = XEXP (dest, 0); | |
4994 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC | |
4995 && XEXP (addr, 0) == stack_pointer_rtx) | |
4996 invalidate (stack_pointer_rtx, VOIDmode); | |
4997 #endif | |
4998 dest = fold_rtx (dest, insn); | |
4999 } | |
5000 | |
5001 /* Compute the hash code of the destination now, | |
5002 before the effects of this instruction are recorded, | |
5003 since the register values used in the address computation | |
5004 are those before this instruction. */ | |
5005 sets[i].dest_hash = HASH (dest, mode); | |
5006 | |
5007 /* Don't enter a bit-field in the hash table | |
5008 because the value in it after the store | |
5009 may not equal what was stored, due to truncation. */ | |
5010 | |
5011 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT) | |
5012 { | |
5013 rtx width = XEXP (SET_DEST (sets[i].rtl), 1); | |
5014 | |
5015 if (src_const != 0 && GET_CODE (src_const) == CONST_INT | |
5016 && GET_CODE (width) == CONST_INT | |
5017 && INTVAL (width) < HOST_BITS_PER_WIDE_INT | |
5018 && ! (INTVAL (src_const) | |
5019 & ((HOST_WIDE_INT) (-1) << INTVAL (width)))) | |
5020 /* Exception: if the value is constant, | |
5021 and it won't be truncated, record it. */ | |
5022 ; | |
5023 else | |
5024 { | |
5025 /* This is chosen so that the destination will be invalidated | |
5026 but no new value will be recorded. | |
5027 We must invalidate because sometimes constant | |
5028 values can be recorded for bitfields. */ | |
5029 sets[i].src_elt = 0; | |
5030 sets[i].src_volatile = 1; | |
5031 src_eqv = 0; | |
5032 src_eqv_elt = 0; | |
5033 } | |
5034 } | |
5035 | |
5036 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete | |
5037 the insn. */ | |
5038 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx) | |
5039 { | |
5040 /* One less use of the label this insn used to jump to. */ | |
5041 delete_insn_and_edges (insn); | |
5042 cse_jumps_altered = true; | |
5043 /* No more processing for this set. */ | |
5044 sets[i].rtl = 0; | |
5045 } | |
5046 | |
5047 /* If this SET is now setting PC to a label, we know it used to | |
5048 be a conditional or computed branch. */ | |
5049 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF | |
5050 && !LABEL_REF_NONLOCAL_P (src)) | |
5051 { | |
5052 /* We reemit the jump in as many cases as possible just in | |
5053 case the form of an unconditional jump is significantly | |
5054 different than a computed jump or conditional jump. | |
5055 | |
5056 If this insn has multiple sets, then reemitting the | |
5057 jump is nontrivial. So instead we just force rerecognition | |
5058 and hope for the best. */ | |
5059 if (n_sets == 1) | |
5060 { | |
5061 rtx new_rtx, note; | |
5062 | |
5063 new_rtx = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn); | |
5064 JUMP_LABEL (new_rtx) = XEXP (src, 0); | |
5065 LABEL_NUSES (XEXP (src, 0))++; | |
5066 | |
5067 /* Make sure to copy over REG_NON_LOCAL_GOTO. */ | |
5068 note = find_reg_note (insn, REG_NON_LOCAL_GOTO, 0); | |
5069 if (note) | |
5070 { | |
5071 XEXP (note, 1) = NULL_RTX; | |
5072 REG_NOTES (new_rtx) = note; | |
5073 } | |
5074 | |
5075 delete_insn_and_edges (insn); | |
5076 insn = new_rtx; | |
5077 } | |
5078 else | |
5079 INSN_CODE (insn) = -1; | |
5080 | |
5081 /* Do not bother deleting any unreachable code, let jump do it. */ | |
5082 cse_jumps_altered = true; | |
5083 sets[i].rtl = 0; | |
5084 } | |
5085 | |
5086 /* If destination is volatile, invalidate it and then do no further | |
5087 processing for this assignment. */ | |
5088 | |
5089 else if (do_not_record) | |
5090 { | |
5091 if (REG_P (dest) || GET_CODE (dest) == SUBREG) | |
5092 invalidate (dest, VOIDmode); | |
5093 else if (MEM_P (dest)) | |
5094 invalidate (dest, VOIDmode); | |
5095 else if (GET_CODE (dest) == STRICT_LOW_PART | |
5096 || GET_CODE (dest) == ZERO_EXTRACT) | |
5097 invalidate (XEXP (dest, 0), GET_MODE (dest)); | |
5098 sets[i].rtl = 0; | |
5099 } | |
5100 | |
5101 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl)) | |
5102 sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode); | |
5103 | |
5104 #ifdef HAVE_cc0 | |
5105 /* If setting CC0, record what it was set to, or a constant, if it | |
5106 is equivalent to a constant. If it is being set to a floating-point | |
5107 value, make a COMPARE with the appropriate constant of 0. If we | |
5108 don't do this, later code can interpret this as a test against | |
5109 const0_rtx, which can cause problems if we try to put it into an | |
5110 insn as a floating-point operand. */ | |
5111 if (dest == cc0_rtx) | |
5112 { | |
5113 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src; | |
5114 this_insn_cc0_mode = mode; | |
5115 if (FLOAT_MODE_P (mode)) | |
5116 this_insn_cc0 = gen_rtx_COMPARE (VOIDmode, this_insn_cc0, | |
5117 CONST0_RTX (mode)); | |
5118 } | |
5119 #endif | |
5120 } | |
5121 | |
5122 /* Now enter all non-volatile source expressions in the hash table | |
5123 if they are not already present. | |
5124 Record their equivalence classes in src_elt. | |
5125 This way we can insert the corresponding destinations into | |
5126 the same classes even if the actual sources are no longer in them | |
5127 (having been invalidated). */ | |
5128 | |
5129 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile | |
5130 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl))) | |
5131 { | |
5132 struct table_elt *elt; | |
5133 struct table_elt *classp = sets[0].src_elt; | |
5134 rtx dest = SET_DEST (sets[0].rtl); | |
5135 enum machine_mode eqvmode = GET_MODE (dest); | |
5136 | |
5137 if (GET_CODE (dest) == STRICT_LOW_PART) | |
5138 { | |
5139 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0))); | |
5140 classp = 0; | |
5141 } | |
5142 if (insert_regs (src_eqv, classp, 0)) | |
5143 { | |
5144 rehash_using_reg (src_eqv); | |
5145 src_eqv_hash = HASH (src_eqv, eqvmode); | |
5146 } | |
5147 elt = insert (src_eqv, classp, src_eqv_hash, eqvmode); | |
5148 elt->in_memory = src_eqv_in_memory; | |
5149 src_eqv_elt = elt; | |
5150 | |
5151 /* Check to see if src_eqv_elt is the same as a set source which | |
5152 does not yet have an elt, and if so set the elt of the set source | |
5153 to src_eqv_elt. */ | |
5154 for (i = 0; i < n_sets; i++) | |
5155 if (sets[i].rtl && sets[i].src_elt == 0 | |
5156 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv)) | |
5157 sets[i].src_elt = src_eqv_elt; | |
5158 } | |
5159 | |
5160 for (i = 0; i < n_sets; i++) | |
5161 if (sets[i].rtl && ! sets[i].src_volatile | |
5162 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl))) | |
5163 { | |
5164 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART) | |
5165 { | |
5166 /* REG_EQUAL in setting a STRICT_LOW_PART | |
5167 gives an equivalent for the entire destination register, | |
5168 not just for the subreg being stored in now. | |
5169 This is a more interesting equivalence, so we arrange later | |
5170 to treat the entire reg as the destination. */ | |
5171 sets[i].src_elt = src_eqv_elt; | |
5172 sets[i].src_hash = src_eqv_hash; | |
5173 } | |
5174 else | |
5175 { | |
5176 /* Insert source and constant equivalent into hash table, if not | |
5177 already present. */ | |
5178 struct table_elt *classp = src_eqv_elt; | |
5179 rtx src = sets[i].src; | |
5180 rtx dest = SET_DEST (sets[i].rtl); | |
5181 enum machine_mode mode | |
5182 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src); | |
5183 | |
5184 /* It's possible that we have a source value known to be | |
5185 constant but don't have a REG_EQUAL note on the insn. | |
5186 Lack of a note will mean src_eqv_elt will be NULL. This | |
5187 can happen where we've generated a SUBREG to access a | |
5188 CONST_INT that is already in a register in a wider mode. | |
5189 Ensure that the source expression is put in the proper | |
5190 constant class. */ | |
5191 if (!classp) | |
5192 classp = sets[i].src_const_elt; | |
5193 | |
5194 if (sets[i].src_elt == 0) | |
5195 { | |
5196 struct table_elt *elt; | |
5197 | |
5198 /* Note that these insert_regs calls cannot remove | |
5199 any of the src_elt's, because they would have failed to | |
5200 match if not still valid. */ | |
5201 if (insert_regs (src, classp, 0)) | |
5202 { | |
5203 rehash_using_reg (src); | |
5204 sets[i].src_hash = HASH (src, mode); | |
5205 } | |
5206 elt = insert (src, classp, sets[i].src_hash, mode); | |
5207 elt->in_memory = sets[i].src_in_memory; | |
5208 sets[i].src_elt = classp = elt; | |
5209 } | |
5210 if (sets[i].src_const && sets[i].src_const_elt == 0 | |
5211 && src != sets[i].src_const | |
5212 && ! rtx_equal_p (sets[i].src_const, src)) | |
5213 sets[i].src_elt = insert (sets[i].src_const, classp, | |
5214 sets[i].src_const_hash, mode); | |
5215 } | |
5216 } | |
5217 else if (sets[i].src_elt == 0) | |
5218 /* If we did not insert the source into the hash table (e.g., it was | |
5219 volatile), note the equivalence class for the REG_EQUAL value, if any, | |
5220 so that the destination goes into that class. */ | |
5221 sets[i].src_elt = src_eqv_elt; | |
5222 | |
5223 /* Record destination addresses in the hash table. This allows us to | |
5224 check if they are invalidated by other sets. */ | |
5225 for (i = 0; i < n_sets; i++) | |
5226 { | |
5227 if (sets[i].rtl) | |
5228 { | |
5229 rtx x = sets[i].inner_dest; | |
5230 struct table_elt *elt; | |
5231 enum machine_mode mode; | |
5232 unsigned hash; | |
5233 | |
5234 if (MEM_P (x)) | |
5235 { | |
5236 x = XEXP (x, 0); | |
5237 mode = GET_MODE (x); | |
5238 hash = HASH (x, mode); | |
5239 elt = lookup (x, hash, mode); | |
5240 if (!elt) | |
5241 { | |
5242 if (insert_regs (x, NULL, 0)) | |
5243 { | |
5244 rtx dest = SET_DEST (sets[i].rtl); | |
5245 | |
5246 rehash_using_reg (x); | |
5247 hash = HASH (x, mode); | |
5248 sets[i].dest_hash = HASH (dest, GET_MODE (dest)); | |
5249 } | |
5250 elt = insert (x, NULL, hash, mode); | |
5251 } | |
5252 | |
5253 sets[i].dest_addr_elt = elt; | |
5254 } | |
5255 else | |
5256 sets[i].dest_addr_elt = NULL; | |
5257 } | |
5258 } | |
5259 | |
5260 invalidate_from_clobbers (x); | |
5261 | |
5262 /* Some registers are invalidated by subroutine calls. Memory is | |
5263 invalidated by non-constant calls. */ | |
5264 | |
5265 if (CALL_P (insn)) | |
5266 { | |
5267 if (!(RTL_CONST_OR_PURE_CALL_P (insn))) | |
5268 invalidate_memory (); | |
5269 invalidate_for_call (); | |
5270 } | |
5271 | |
5272 /* Now invalidate everything set by this instruction. | |
5273 If a SUBREG or other funny destination is being set, | |
5274 sets[i].rtl is still nonzero, so here we invalidate the reg | |
5275 a part of which is being set. */ | |
5276 | |
5277 for (i = 0; i < n_sets; i++) | |
5278 if (sets[i].rtl) | |
5279 { | |
5280 /* We can't use the inner dest, because the mode associated with | |
5281 a ZERO_EXTRACT is significant. */ | |
5282 rtx dest = SET_DEST (sets[i].rtl); | |
5283 | |
5284 /* Needed for registers to remove the register from its | |
5285 previous quantity's chain. | |
5286 Needed for memory if this is a nonvarying address, unless | |
5287 we have just done an invalidate_memory that covers even those. */ | |
5288 if (REG_P (dest) || GET_CODE (dest) == SUBREG) | |
5289 invalidate (dest, VOIDmode); | |
5290 else if (MEM_P (dest)) | |
5291 invalidate (dest, VOIDmode); | |
5292 else if (GET_CODE (dest) == STRICT_LOW_PART | |
5293 || GET_CODE (dest) == ZERO_EXTRACT) | |
5294 invalidate (XEXP (dest, 0), GET_MODE (dest)); | |
5295 } | |
5296 | |
5297 /* A volatile ASM invalidates everything. */ | |
5298 if (NONJUMP_INSN_P (insn) | |
5299 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS | |
5300 && MEM_VOLATILE_P (PATTERN (insn))) | |
5301 flush_hash_table (); | |
5302 | |
5303 /* Don't cse over a call to setjmp; on some machines (eg VAX) | |
5304 the regs restored by the longjmp come from a later time | |
5305 than the setjmp. */ | |
5306 if (CALL_P (insn) && find_reg_note (insn, REG_SETJMP, NULL)) | |
5307 { | |
5308 flush_hash_table (); | |
5309 goto done; | |
5310 } | |
5311 | |
5312 /* Make sure registers mentioned in destinations | |
5313 are safe for use in an expression to be inserted. | |
5314 This removes from the hash table | |
5315 any invalid entry that refers to one of these registers. | |
5316 | |
5317 We don't care about the return value from mention_regs because | |
5318 we are going to hash the SET_DEST values unconditionally. */ | |
5319 | |
5320 for (i = 0; i < n_sets; i++) | |
5321 { | |
5322 if (sets[i].rtl) | |
5323 { | |
5324 rtx x = SET_DEST (sets[i].rtl); | |
5325 | |
5326 if (!REG_P (x)) | |
5327 mention_regs (x); | |
5328 else | |
5329 { | |
5330 /* We used to rely on all references to a register becoming | |
5331 inaccessible when a register changes to a new quantity, | |
5332 since that changes the hash code. However, that is not | |
5333 safe, since after HASH_SIZE new quantities we get a | |
5334 hash 'collision' of a register with its own invalid | |
5335 entries. And since SUBREGs have been changed not to | |
5336 change their hash code with the hash code of the register, | |
5337 it wouldn't work any longer at all. So we have to check | |
5338 for any invalid references lying around now. | |
5339 This code is similar to the REG case in mention_regs, | |
5340 but it knows that reg_tick has been incremented, and | |
5341 it leaves reg_in_table as -1 . */ | |
5342 unsigned int regno = REGNO (x); | |
5343 unsigned int endregno = END_REGNO (x); | |
5344 unsigned int i; | |
5345 | |
5346 for (i = regno; i < endregno; i++) | |
5347 { | |
5348 if (REG_IN_TABLE (i) >= 0) | |
5349 { | |
5350 remove_invalid_refs (i); | |
5351 REG_IN_TABLE (i) = -1; | |
5352 } | |
5353 } | |
5354 } | |
5355 } | |
5356 } | |
5357 | |
5358 /* We may have just removed some of the src_elt's from the hash table. | |
5359 So replace each one with the current head of the same class. | |
5360 Also check if destination addresses have been removed. */ | |
5361 | |
5362 for (i = 0; i < n_sets; i++) | |
5363 if (sets[i].rtl) | |
5364 { | |
5365 if (sets[i].dest_addr_elt | |
5366 && sets[i].dest_addr_elt->first_same_value == 0) | |
5367 { | |
5368 /* The elt was removed, which means this destination is not | |
5369 valid after this instruction. */ | |
5370 sets[i].rtl = NULL_RTX; | |
5371 } | |
5372 else if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0) | |
5373 /* If elt was removed, find current head of same class, | |
5374 or 0 if nothing remains of that class. */ | |
5375 { | |
5376 struct table_elt *elt = sets[i].src_elt; | |
5377 | |
5378 while (elt && elt->prev_same_value) | |
5379 elt = elt->prev_same_value; | |
5380 | |
5381 while (elt && elt->first_same_value == 0) | |
5382 elt = elt->next_same_value; | |
5383 sets[i].src_elt = elt ? elt->first_same_value : 0; | |
5384 } | |
5385 } | |
5386 | |
5387 /* Now insert the destinations into their equivalence classes. */ | |
5388 | |
5389 for (i = 0; i < n_sets; i++) | |
5390 if (sets[i].rtl) | |
5391 { | |
5392 rtx dest = SET_DEST (sets[i].rtl); | |
5393 struct table_elt *elt; | |
5394 | |
5395 /* Don't record value if we are not supposed to risk allocating | |
5396 floating-point values in registers that might be wider than | |
5397 memory. */ | |
5398 if ((flag_float_store | |
5399 && MEM_P (dest) | |
5400 && FLOAT_MODE_P (GET_MODE (dest))) | |
5401 /* Don't record BLKmode values, because we don't know the | |
5402 size of it, and can't be sure that other BLKmode values | |
5403 have the same or smaller size. */ | |
5404 || GET_MODE (dest) == BLKmode | |
5405 /* If we didn't put a REG_EQUAL value or a source into the hash | |
5406 table, there is no point is recording DEST. */ | |
5407 || sets[i].src_elt == 0 | |
5408 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND | |
5409 or SIGN_EXTEND, don't record DEST since it can cause | |
5410 some tracking to be wrong. | |
5411 | |
5412 ??? Think about this more later. */ | |
5413 || (GET_CODE (dest) == SUBREG | |
5414 && (GET_MODE_SIZE (GET_MODE (dest)) | |
5415 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))) | |
5416 && (GET_CODE (sets[i].src) == SIGN_EXTEND | |
5417 || GET_CODE (sets[i].src) == ZERO_EXTEND))) | |
5418 continue; | |
5419 | |
5420 /* STRICT_LOW_PART isn't part of the value BEING set, | |
5421 and neither is the SUBREG inside it. | |
5422 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */ | |
5423 if (GET_CODE (dest) == STRICT_LOW_PART) | |
5424 dest = SUBREG_REG (XEXP (dest, 0)); | |
5425 | |
5426 if (REG_P (dest) || GET_CODE (dest) == SUBREG) | |
5427 /* Registers must also be inserted into chains for quantities. */ | |
5428 if (insert_regs (dest, sets[i].src_elt, 1)) | |
5429 { | |
5430 /* If `insert_regs' changes something, the hash code must be | |
5431 recalculated. */ | |
5432 rehash_using_reg (dest); | |
5433 sets[i].dest_hash = HASH (dest, GET_MODE (dest)); | |
5434 } | |
5435 | |
5436 elt = insert (dest, sets[i].src_elt, | |
5437 sets[i].dest_hash, GET_MODE (dest)); | |
5438 | |
5439 elt->in_memory = (MEM_P (sets[i].inner_dest) | |
5440 && !MEM_READONLY_P (sets[i].inner_dest)); | |
5441 | |
5442 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no | |
5443 narrower than M2, and both M1 and M2 are the same number of words, | |
5444 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so | |
5445 make that equivalence as well. | |
5446 | |
5447 However, BAR may have equivalences for which gen_lowpart | |
5448 will produce a simpler value than gen_lowpart applied to | |
5449 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all | |
5450 BAR's equivalences. If we don't get a simplified form, make | |
5451 the SUBREG. It will not be used in an equivalence, but will | |
5452 cause two similar assignments to be detected. | |
5453 | |
5454 Note the loop below will find SUBREG_REG (DEST) since we have | |
5455 already entered SRC and DEST of the SET in the table. */ | |
5456 | |
5457 if (GET_CODE (dest) == SUBREG | |
5458 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1) | |
5459 / UNITS_PER_WORD) | |
5460 == (GET_MODE_SIZE (GET_MODE (dest)) - 1) / UNITS_PER_WORD) | |
5461 && (GET_MODE_SIZE (GET_MODE (dest)) | |
5462 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))) | |
5463 && sets[i].src_elt != 0) | |
5464 { | |
5465 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest)); | |
5466 struct table_elt *elt, *classp = 0; | |
5467 | |
5468 for (elt = sets[i].src_elt->first_same_value; elt; | |
5469 elt = elt->next_same_value) | |
5470 { | |
5471 rtx new_src = 0; | |
5472 unsigned src_hash; | |
5473 struct table_elt *src_elt; | |
5474 int byte = 0; | |
5475 | |
5476 /* Ignore invalid entries. */ | |
5477 if (!REG_P (elt->exp) | |
5478 && ! exp_equiv_p (elt->exp, elt->exp, 1, false)) | |
5479 continue; | |
5480 | |
5481 /* We may have already been playing subreg games. If the | |
5482 mode is already correct for the destination, use it. */ | |
5483 if (GET_MODE (elt->exp) == new_mode) | |
5484 new_src = elt->exp; | |
5485 else | |
5486 { | |
5487 /* Calculate big endian correction for the SUBREG_BYTE. | |
5488 We have already checked that M1 (GET_MODE (dest)) | |
5489 is not narrower than M2 (new_mode). */ | |
5490 if (BYTES_BIG_ENDIAN) | |
5491 byte = (GET_MODE_SIZE (GET_MODE (dest)) | |
5492 - GET_MODE_SIZE (new_mode)); | |
5493 | |
5494 new_src = simplify_gen_subreg (new_mode, elt->exp, | |
5495 GET_MODE (dest), byte); | |
5496 } | |
5497 | |
5498 /* The call to simplify_gen_subreg fails if the value | |
5499 is VOIDmode, yet we can't do any simplification, e.g. | |
5500 for EXPR_LISTs denoting function call results. | |
5501 It is invalid to construct a SUBREG with a VOIDmode | |
5502 SUBREG_REG, hence a zero new_src means we can't do | |
5503 this substitution. */ | |
5504 if (! new_src) | |
5505 continue; | |
5506 | |
5507 src_hash = HASH (new_src, new_mode); | |
5508 src_elt = lookup (new_src, src_hash, new_mode); | |
5509 | |
5510 /* Put the new source in the hash table is if isn't | |
5511 already. */ | |
5512 if (src_elt == 0) | |
5513 { | |
5514 if (insert_regs (new_src, classp, 0)) | |
5515 { | |
5516 rehash_using_reg (new_src); | |
5517 src_hash = HASH (new_src, new_mode); | |
5518 } | |
5519 src_elt = insert (new_src, classp, src_hash, new_mode); | |
5520 src_elt->in_memory = elt->in_memory; | |
5521 } | |
5522 else if (classp && classp != src_elt->first_same_value) | |
5523 /* Show that two things that we've seen before are | |
5524 actually the same. */ | |
5525 merge_equiv_classes (src_elt, classp); | |
5526 | |
5527 classp = src_elt->first_same_value; | |
5528 /* Ignore invalid entries. */ | |
5529 while (classp | |
5530 && !REG_P (classp->exp) | |
5531 && ! exp_equiv_p (classp->exp, classp->exp, 1, false)) | |
5532 classp = classp->next_same_value; | |
5533 } | |
5534 } | |
5535 } | |
5536 | |
5537 /* Special handling for (set REG0 REG1) where REG0 is the | |
5538 "cheapest", cheaper than REG1. After cse, REG1 will probably not | |
5539 be used in the sequel, so (if easily done) change this insn to | |
5540 (set REG1 REG0) and replace REG1 with REG0 in the previous insn | |
5541 that computed their value. Then REG1 will become a dead store | |
5542 and won't cloud the situation for later optimizations. | |
5543 | |
5544 Do not make this change if REG1 is a hard register, because it will | |
5545 then be used in the sequel and we may be changing a two-operand insn | |
5546 into a three-operand insn. | |
5547 | |
5548 Also do not do this if we are operating on a copy of INSN. */ | |
5549 | |
5550 if (n_sets == 1 && sets[0].rtl && REG_P (SET_DEST (sets[0].rtl)) | |
5551 && NEXT_INSN (PREV_INSN (insn)) == insn | |
5552 && REG_P (SET_SRC (sets[0].rtl)) | |
5553 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER | |
5554 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))) | |
5555 { | |
5556 int src_q = REG_QTY (REGNO (SET_SRC (sets[0].rtl))); | |
5557 struct qty_table_elem *src_ent = &qty_table[src_q]; | |
5558 | |
5559 if (src_ent->first_reg == REGNO (SET_DEST (sets[0].rtl))) | |
5560 { | |
5561 /* Scan for the previous nonnote insn, but stop at a basic | |
5562 block boundary. */ | |
5563 rtx prev = insn; | |
5564 rtx bb_head = BB_HEAD (BLOCK_FOR_INSN (insn)); | |
5565 do | |
5566 { | |
5567 prev = PREV_INSN (prev); | |
5568 } | |
5569 while (prev != bb_head && NOTE_P (prev)); | |
5570 | |
5571 /* Do not swap the registers around if the previous instruction | |
5572 attaches a REG_EQUIV note to REG1. | |
5573 | |
5574 ??? It's not entirely clear whether we can transfer a REG_EQUIV | |
5575 from the pseudo that originally shadowed an incoming argument | |
5576 to another register. Some uses of REG_EQUIV might rely on it | |
5577 being attached to REG1 rather than REG2. | |
5578 | |
5579 This section previously turned the REG_EQUIV into a REG_EQUAL | |
5580 note. We cannot do that because REG_EQUIV may provide an | |
5581 uninitialized stack slot when REG_PARM_STACK_SPACE is used. */ | |
5582 if (NONJUMP_INSN_P (prev) | |
5583 && GET_CODE (PATTERN (prev)) == SET | |
5584 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl) | |
5585 && ! find_reg_note (prev, REG_EQUIV, NULL_RTX)) | |
5586 { | |
5587 rtx dest = SET_DEST (sets[0].rtl); | |
5588 rtx src = SET_SRC (sets[0].rtl); | |
5589 rtx note; | |
5590 | |
5591 validate_change (prev, &SET_DEST (PATTERN (prev)), dest, 1); | |
5592 validate_change (insn, &SET_DEST (sets[0].rtl), src, 1); | |
5593 validate_change (insn, &SET_SRC (sets[0].rtl), dest, 1); | |
5594 apply_change_group (); | |
5595 | |
5596 /* If INSN has a REG_EQUAL note, and this note mentions | |
5597 REG0, then we must delete it, because the value in | |
5598 REG0 has changed. If the note's value is REG1, we must | |
5599 also delete it because that is now this insn's dest. */ | |
5600 note = find_reg_note (insn, REG_EQUAL, NULL_RTX); | |
5601 if (note != 0 | |
5602 && (reg_mentioned_p (dest, XEXP (note, 0)) | |
5603 || rtx_equal_p (src, XEXP (note, 0)))) | |
5604 remove_note (insn, note); | |
5605 } | |
5606 } | |
5607 } | |
5608 | |
5609 done:; | |
5610 } | |
5611 | |
5612 /* Remove from the hash table all expressions that reference memory. */ | |
5613 | |
5614 static void | |
5615 invalidate_memory (void) | |
5616 { | |
5617 int i; | |
5618 struct table_elt *p, *next; | |
5619 | |
5620 for (i = 0; i < HASH_SIZE; i++) | |
5621 for (p = table[i]; p; p = next) | |
5622 { | |
5623 next = p->next_same_hash; | |
5624 if (p->in_memory) | |
5625 remove_from_table (p, i); | |
5626 } | |
5627 } | |
5628 | |
5629 /* Perform invalidation on the basis of everything about an insn | |
5630 except for invalidating the actual places that are SET in it. | |
5631 This includes the places CLOBBERed, and anything that might | |
5632 alias with something that is SET or CLOBBERed. | |
5633 | |
5634 X is the pattern of the insn. */ | |
5635 | |
5636 static void | |
5637 invalidate_from_clobbers (rtx x) | |
5638 { | |
5639 if (GET_CODE (x) == CLOBBER) | |
5640 { | |
5641 rtx ref = XEXP (x, 0); | |
5642 if (ref) | |
5643 { | |
5644 if (REG_P (ref) || GET_CODE (ref) == SUBREG | |
5645 || MEM_P (ref)) | |
5646 invalidate (ref, VOIDmode); | |
5647 else if (GET_CODE (ref) == STRICT_LOW_PART | |
5648 || GET_CODE (ref) == ZERO_EXTRACT) | |
5649 invalidate (XEXP (ref, 0), GET_MODE (ref)); | |
5650 } | |
5651 } | |
5652 else if (GET_CODE (x) == PARALLEL) | |
5653 { | |
5654 int i; | |
5655 for (i = XVECLEN (x, 0) - 1; i >= 0; i--) | |
5656 { | |
5657 rtx y = XVECEXP (x, 0, i); | |
5658 if (GET_CODE (y) == CLOBBER) | |
5659 { | |
5660 rtx ref = XEXP (y, 0); | |
5661 if (REG_P (ref) || GET_CODE (ref) == SUBREG | |
5662 || MEM_P (ref)) | |
5663 invalidate (ref, VOIDmode); | |
5664 else if (GET_CODE (ref) == STRICT_LOW_PART | |
5665 || GET_CODE (ref) == ZERO_EXTRACT) | |
5666 invalidate (XEXP (ref, 0), GET_MODE (ref)); | |
5667 } | |
5668 } | |
5669 } | |
5670 } | |
5671 | |
5672 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes | |
5673 and replace any registers in them with either an equivalent constant | |
5674 or the canonical form of the register. If we are inside an address, | |
5675 only do this if the address remains valid. | |
5676 | |
5677 OBJECT is 0 except when within a MEM in which case it is the MEM. | |
5678 | |
5679 Return the replacement for X. */ | |
5680 | |
5681 static rtx | |
5682 cse_process_notes_1 (rtx x, rtx object, bool *changed) | |
5683 { | |
5684 enum rtx_code code = GET_CODE (x); | |
5685 const char *fmt = GET_RTX_FORMAT (code); | |
5686 int i; | |
5687 | |
5688 switch (code) | |
5689 { | |
5690 case CONST_INT: | |
5691 case CONST: | |
5692 case SYMBOL_REF: | |
5693 case LABEL_REF: | |
5694 case CONST_DOUBLE: | |
5695 case CONST_FIXED: | |
5696 case CONST_VECTOR: | |
5697 case PC: | |
5698 case CC0: | |
5699 case LO_SUM: | |
5700 return x; | |
5701 | |
5702 case MEM: | |
5703 validate_change (x, &XEXP (x, 0), | |
5704 cse_process_notes (XEXP (x, 0), x, changed), 0); | |
5705 return x; | |
5706 | |
5707 case EXPR_LIST: | |
5708 case INSN_LIST: | |
5709 if (REG_NOTE_KIND (x) == REG_EQUAL) | |
5710 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX, changed); | |
5711 if (XEXP (x, 1)) | |
5712 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX, changed); | |
5713 return x; | |
5714 | |
5715 case SIGN_EXTEND: | |
5716 case ZERO_EXTEND: | |
5717 case SUBREG: | |
5718 { | |
5719 rtx new_rtx = cse_process_notes (XEXP (x, 0), object, changed); | |
5720 /* We don't substitute VOIDmode constants into these rtx, | |
5721 since they would impede folding. */ | |
5722 if (GET_MODE (new_rtx) != VOIDmode) | |
5723 validate_change (object, &XEXP (x, 0), new_rtx, 0); | |
5724 return x; | |
5725 } | |
5726 | |
5727 case REG: | |
5728 i = REG_QTY (REGNO (x)); | |
5729 | |
5730 /* Return a constant or a constant register. */ | |
5731 if (REGNO_QTY_VALID_P (REGNO (x))) | |
5732 { | |
5733 struct qty_table_elem *ent = &qty_table[i]; | |
5734 | |
5735 if (ent->const_rtx != NULL_RTX | |
5736 && (CONSTANT_P (ent->const_rtx) | |
5737 || REG_P (ent->const_rtx))) | |
5738 { | |
5739 rtx new_rtx = gen_lowpart (GET_MODE (x), ent->const_rtx); | |
5740 if (new_rtx) | |
5741 return copy_rtx (new_rtx); | |
5742 } | |
5743 } | |
5744 | |
5745 /* Otherwise, canonicalize this register. */ | |
5746 return canon_reg (x, NULL_RTX); | |
5747 | |
5748 default: | |
5749 break; | |
5750 } | |
5751 | |
5752 for (i = 0; i < GET_RTX_LENGTH (code); i++) | |
5753 if (fmt[i] == 'e') | |
5754 validate_change (object, &XEXP (x, i), | |
5755 cse_process_notes (XEXP (x, i), object, changed), 0); | |
5756 | |
5757 return x; | |
5758 } | |
5759 | |
5760 static rtx | |
5761 cse_process_notes (rtx x, rtx object, bool *changed) | |
5762 { | |
5763 rtx new_rtx = cse_process_notes_1 (x, object, changed); | |
5764 if (new_rtx != x) | |
5765 *changed = true; | |
5766 return new_rtx; | |
5767 } | |
5768 | |
5769 | |
5770 /* Find a path in the CFG, starting with FIRST_BB to perform CSE on. | |
5771 | |
5772 DATA is a pointer to a struct cse_basic_block_data, that is used to | |
5773 describe the path. | |
5774 It is filled with a queue of basic blocks, starting with FIRST_BB | |
5775 and following a trace through the CFG. | |
5776 | |
5777 If all paths starting at FIRST_BB have been followed, or no new path | |
5778 starting at FIRST_BB can be constructed, this function returns FALSE. | |
5779 Otherwise, DATA->path is filled and the function returns TRUE indicating | |
5780 that a path to follow was found. | |
5781 | |
5782 If FOLLOW_JUMPS is false, the maximum path length is 1 and the only | |
5783 block in the path will be FIRST_BB. */ | |
5784 | |
5785 static bool | |
5786 cse_find_path (basic_block first_bb, struct cse_basic_block_data *data, | |
5787 int follow_jumps) | |
5788 { | |
5789 basic_block bb; | |
5790 edge e; | |
5791 int path_size; | |
5792 | |
5793 SET_BIT (cse_visited_basic_blocks, first_bb->index); | |
5794 | |
5795 /* See if there is a previous path. */ | |
5796 path_size = data->path_size; | |
5797 | |
5798 /* There is a previous path. Make sure it started with FIRST_BB. */ | |
5799 if (path_size) | |
5800 gcc_assert (data->path[0].bb == first_bb); | |
5801 | |
5802 /* There was only one basic block in the last path. Clear the path and | |
5803 return, so that paths starting at another basic block can be tried. */ | |
5804 if (path_size == 1) | |
5805 { | |
5806 path_size = 0; | |
5807 goto done; | |
5808 } | |
5809 | |
5810 /* If the path was empty from the beginning, construct a new path. */ | |
5811 if (path_size == 0) | |
5812 data->path[path_size++].bb = first_bb; | |
5813 else | |
5814 { | |
5815 /* Otherwise, path_size must be equal to or greater than 2, because | |
5816 a previous path exists that is at least two basic blocks long. | |
5817 | |
5818 Update the previous branch path, if any. If the last branch was | |
5819 previously along the branch edge, take the fallthrough edge now. */ | |
5820 while (path_size >= 2) | |
5821 { | |
5822 basic_block last_bb_in_path, previous_bb_in_path; | |
5823 edge e; | |
5824 | |
5825 --path_size; | |
5826 last_bb_in_path = data->path[path_size].bb; | |
5827 previous_bb_in_path = data->path[path_size - 1].bb; | |
5828 | |
5829 /* If we previously followed a path along the branch edge, try | |
5830 the fallthru edge now. */ | |
5831 if (EDGE_COUNT (previous_bb_in_path->succs) == 2 | |
5832 && any_condjump_p (BB_END (previous_bb_in_path)) | |
5833 && (e = find_edge (previous_bb_in_path, last_bb_in_path)) | |
5834 && e == BRANCH_EDGE (previous_bb_in_path)) | |
5835 { | |
5836 bb = FALLTHRU_EDGE (previous_bb_in_path)->dest; | |
5837 if (bb != EXIT_BLOCK_PTR | |
5838 && single_pred_p (bb) | |
5839 /* We used to assert here that we would only see blocks | |
5840 that we have not visited yet. But we may end up | |
5841 visiting basic blocks twice if the CFG has changed | |
5842 in this run of cse_main, because when the CFG changes | |
5843 the topological sort of the CFG also changes. A basic | |
5844 blocks that previously had more than two predecessors | |
5845 may now have a single predecessor, and become part of | |
5846 a path that starts at another basic block. | |
5847 | |
5848 We still want to visit each basic block only once, so | |
5849 halt the path here if we have already visited BB. */ | |
5850 && !TEST_BIT (cse_visited_basic_blocks, bb->index)) | |
5851 { | |
5852 SET_BIT (cse_visited_basic_blocks, bb->index); | |
5853 data->path[path_size++].bb = bb; | |
5854 break; | |
5855 } | |
5856 } | |
5857 | |
5858 data->path[path_size].bb = NULL; | |
5859 } | |
5860 | |
5861 /* If only one block remains in the path, bail. */ | |
5862 if (path_size == 1) | |
5863 { | |
5864 path_size = 0; | |
5865 goto done; | |
5866 } | |
5867 } | |
5868 | |
5869 /* Extend the path if possible. */ | |
5870 if (follow_jumps) | |
5871 { | |
5872 bb = data->path[path_size - 1].bb; | |
5873 while (bb && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH)) | |
5874 { | |
5875 if (single_succ_p (bb)) | |
5876 e = single_succ_edge (bb); | |
5877 else if (EDGE_COUNT (bb->succs) == 2 | |
5878 && any_condjump_p (BB_END (bb))) | |
5879 { | |
5880 /* First try to follow the branch. If that doesn't lead | |
5881 to a useful path, follow the fallthru edge. */ | |
5882 e = BRANCH_EDGE (bb); | |
5883 if (!single_pred_p (e->dest)) | |
5884 e = FALLTHRU_EDGE (bb); | |
5885 } | |
5886 else | |
5887 e = NULL; | |
5888 | |
5889 if (e && e->dest != EXIT_BLOCK_PTR | |
5890 && single_pred_p (e->dest) | |
5891 /* Avoid visiting basic blocks twice. The large comment | |
5892 above explains why this can happen. */ | |
5893 && !TEST_BIT (cse_visited_basic_blocks, e->dest->index)) | |
5894 { | |
5895 basic_block bb2 = e->dest; | |
5896 SET_BIT (cse_visited_basic_blocks, bb2->index); | |
5897 data->path[path_size++].bb = bb2; | |
5898 bb = bb2; | |
5899 } | |
5900 else | |
5901 bb = NULL; | |
5902 } | |
5903 } | |
5904 | |
5905 done: | |
5906 data->path_size = path_size; | |
5907 return path_size != 0; | |
5908 } | |
5909 | |
5910 /* Dump the path in DATA to file F. NSETS is the number of sets | |
5911 in the path. */ | |
5912 | |
5913 static void | |
5914 cse_dump_path (struct cse_basic_block_data *data, int nsets, FILE *f) | |
5915 { | |
5916 int path_entry; | |
5917 | |
5918 fprintf (f, ";; Following path with %d sets: ", nsets); | |
5919 for (path_entry = 0; path_entry < data->path_size; path_entry++) | |
5920 fprintf (f, "%d ", (data->path[path_entry].bb)->index); | |
5921 fputc ('\n', dump_file); | |
5922 fflush (f); | |
5923 } | |
5924 | |
5925 | |
5926 /* Return true if BB has exception handling successor edges. */ | |
5927 | |
5928 static bool | |
5929 have_eh_succ_edges (basic_block bb) | |
5930 { | |
5931 edge e; | |
5932 edge_iterator ei; | |
5933 | |
5934 FOR_EACH_EDGE (e, ei, bb->succs) | |
5935 if (e->flags & EDGE_EH) | |
5936 return true; | |
5937 | |
5938 return false; | |
5939 } | |
5940 | |
5941 | |
5942 /* Scan to the end of the path described by DATA. Return an estimate of | |
5943 the total number of SETs of all insns in the path. */ | |
5944 | |
5945 static void | |
5946 cse_prescan_path (struct cse_basic_block_data *data) | |
5947 { | |
5948 int nsets = 0; | |
5949 int path_size = data->path_size; | |
5950 int path_entry; | |
5951 | |
5952 /* Scan to end of each basic block in the path. */ | |
5953 for (path_entry = 0; path_entry < path_size; path_entry++) | |
5954 { | |
5955 basic_block bb; | |
5956 rtx insn; | |
5957 | |
5958 bb = data->path[path_entry].bb; | |
5959 | |
5960 FOR_BB_INSNS (bb, insn) | |
5961 { | |
5962 if (!INSN_P (insn)) | |
5963 continue; | |
5964 | |
5965 /* A PARALLEL can have lots of SETs in it, | |
5966 especially if it is really an ASM_OPERANDS. */ | |
5967 if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
5968 nsets += XVECLEN (PATTERN (insn), 0); | |
5969 else | |
5970 nsets += 1; | |
5971 } | |
5972 } | |
5973 | |
5974 data->nsets = nsets; | |
5975 } | |
5976 | |
5977 /* Process a single extended basic block described by EBB_DATA. */ | |
5978 | |
5979 static void | |
5980 cse_extended_basic_block (struct cse_basic_block_data *ebb_data) | |
5981 { | |
5982 int path_size = ebb_data->path_size; | |
5983 int path_entry; | |
5984 int num_insns = 0; | |
5985 | |
5986 /* Allocate the space needed by qty_table. */ | |
5987 qty_table = XNEWVEC (struct qty_table_elem, max_qty); | |
5988 | |
5989 new_basic_block (); | |
5990 cse_ebb_live_in = df_get_live_in (ebb_data->path[0].bb); | |
5991 cse_ebb_live_out = df_get_live_out (ebb_data->path[path_size - 1].bb); | |
5992 for (path_entry = 0; path_entry < path_size; path_entry++) | |
5993 { | |
5994 basic_block bb; | |
5995 rtx insn; | |
5996 | |
5997 bb = ebb_data->path[path_entry].bb; | |
5998 | |
5999 /* Invalidate recorded information for eh regs if there is an EH | |
6000 edge pointing to that bb. */ | |
6001 if (bb_has_eh_pred (bb)) | |
6002 { | |
6003 df_ref *def_rec; | |
6004 | |
6005 for (def_rec = df_get_artificial_defs (bb->index); *def_rec; def_rec++) | |
6006 { | |
6007 df_ref def = *def_rec; | |
6008 if (DF_REF_FLAGS (def) & DF_REF_AT_TOP) | |
6009 invalidate (DF_REF_REG (def), GET_MODE (DF_REF_REG (def))); | |
6010 } | |
6011 } | |
6012 | |
6013 FOR_BB_INSNS (bb, insn) | |
6014 { | |
6015 optimize_this_for_speed_p = optimize_bb_for_speed_p (bb); | |
6016 /* If we have processed 1,000 insns, flush the hash table to | |
6017 avoid extreme quadratic behavior. We must not include NOTEs | |
6018 in the count since there may be more of them when generating | |
6019 debugging information. If we clear the table at different | |
6020 times, code generated with -g -O might be different than code | |
6021 generated with -O but not -g. | |
6022 | |
6023 FIXME: This is a real kludge and needs to be done some other | |
6024 way. */ | |
6025 if (INSN_P (insn) | |
6026 && num_insns++ > PARAM_VALUE (PARAM_MAX_CSE_INSNS)) | |
6027 { | |
6028 flush_hash_table (); | |
6029 num_insns = 0; | |
6030 } | |
6031 | |
6032 if (INSN_P (insn)) | |
6033 { | |
6034 /* Process notes first so we have all notes in canonical forms | |
6035 when looking for duplicate operations. */ | |
6036 if (REG_NOTES (insn)) | |
6037 { | |
6038 bool changed = false; | |
6039 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), | |
6040 NULL_RTX, &changed); | |
6041 if (changed) | |
6042 df_notes_rescan (insn); | |
6043 } | |
6044 | |
6045 cse_insn (insn); | |
6046 | |
6047 /* If we haven't already found an insn where we added a LABEL_REF, | |
6048 check this one. */ | |
6049 if (INSN_P (insn) && !recorded_label_ref | |
6050 && for_each_rtx (&PATTERN (insn), check_for_label_ref, | |
6051 (void *) insn)) | |
6052 recorded_label_ref = true; | |
6053 | |
6054 #ifdef HAVE_cc0 | |
6055 /* If the previous insn set CC0 and this insn no longer | |
6056 references CC0, delete the previous insn. Here we use | |
6057 fact that nothing expects CC0 to be valid over an insn, | |
6058 which is true until the final pass. */ | |
6059 { | |
6060 rtx prev_insn, tem; | |
6061 | |
6062 prev_insn = PREV_INSN (insn); | |
6063 if (prev_insn && NONJUMP_INSN_P (prev_insn) | |
6064 && (tem = single_set (prev_insn)) != 0 | |
6065 && SET_DEST (tem) == cc0_rtx | |
6066 && ! reg_mentioned_p (cc0_rtx, PATTERN (insn))) | |
6067 delete_insn (prev_insn); | |
6068 } | |
6069 | |
6070 /* If this insn is not the last insn in the basic block, | |
6071 it will be PREV_INSN(insn) in the next iteration. If | |
6072 we recorded any CC0-related information for this insn, | |
6073 remember it. */ | |
6074 if (insn != BB_END (bb)) | |
6075 { | |
6076 prev_insn_cc0 = this_insn_cc0; | |
6077 prev_insn_cc0_mode = this_insn_cc0_mode; | |
6078 } | |
6079 #endif | |
6080 } | |
6081 } | |
6082 | |
6083 /* With non-call exceptions, we are not always able to update | |
6084 the CFG properly inside cse_insn. So clean up possibly | |
6085 redundant EH edges here. */ | |
6086 if (flag_non_call_exceptions && have_eh_succ_edges (bb)) | |
6087 cse_cfg_altered |= purge_dead_edges (bb); | |
6088 | |
6089 /* If we changed a conditional jump, we may have terminated | |
6090 the path we are following. Check that by verifying that | |
6091 the edge we would take still exists. If the edge does | |
6092 not exist anymore, purge the remainder of the path. | |
6093 Note that this will cause us to return to the caller. */ | |
6094 if (path_entry < path_size - 1) | |
6095 { | |
6096 basic_block next_bb = ebb_data->path[path_entry + 1].bb; | |
6097 if (!find_edge (bb, next_bb)) | |
6098 { | |
6099 do | |
6100 { | |
6101 path_size--; | |
6102 | |
6103 /* If we truncate the path, we must also reset the | |
6104 visited bit on the remaining blocks in the path, | |
6105 or we will never visit them at all. */ | |
6106 RESET_BIT (cse_visited_basic_blocks, | |
6107 ebb_data->path[path_size].bb->index); | |
6108 ebb_data->path[path_size].bb = NULL; | |
6109 } | |
6110 while (path_size - 1 != path_entry); | |
6111 ebb_data->path_size = path_size; | |
6112 } | |
6113 } | |
6114 | |
6115 /* If this is a conditional jump insn, record any known | |
6116 equivalences due to the condition being tested. */ | |
6117 insn = BB_END (bb); | |
6118 if (path_entry < path_size - 1 | |
6119 && JUMP_P (insn) | |
6120 && single_set (insn) | |
6121 && any_condjump_p (insn)) | |
6122 { | |
6123 basic_block next_bb = ebb_data->path[path_entry + 1].bb; | |
6124 bool taken = (next_bb == BRANCH_EDGE (bb)->dest); | |
6125 record_jump_equiv (insn, taken); | |
6126 } | |
6127 | |
6128 #ifdef HAVE_cc0 | |
6129 /* Clear the CC0-tracking related insns, they can't provide | |
6130 useful information across basic block boundaries. */ | |
6131 prev_insn_cc0 = 0; | |
6132 #endif | |
6133 } | |
6134 | |
6135 gcc_assert (next_qty <= max_qty); | |
6136 | |
6137 free (qty_table); | |
6138 } | |
6139 | |
6140 | |
6141 /* Perform cse on the instructions of a function. | |
6142 F is the first instruction. | |
6143 NREGS is one plus the highest pseudo-reg number used in the instruction. | |
6144 | |
6145 Return 2 if jump optimizations should be redone due to simplifications | |
6146 in conditional jump instructions. | |
6147 Return 1 if the CFG should be cleaned up because it has been modified. | |
6148 Return 0 otherwise. */ | |
6149 | |
6150 int | |
6151 cse_main (rtx f ATTRIBUTE_UNUSED, int nregs) | |
6152 { | |
6153 struct cse_basic_block_data ebb_data; | |
6154 basic_block bb; | |
6155 int *rc_order = XNEWVEC (int, last_basic_block); | |
6156 int i, n_blocks; | |
6157 | |
6158 df_set_flags (DF_LR_RUN_DCE); | |
6159 df_analyze (); | |
6160 df_set_flags (DF_DEFER_INSN_RESCAN); | |
6161 | |
6162 reg_scan (get_insns (), max_reg_num ()); | |
6163 init_cse_reg_info (nregs); | |
6164 | |
6165 ebb_data.path = XNEWVEC (struct branch_path, | |
6166 PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH)); | |
6167 | |
6168 cse_cfg_altered = false; | |
6169 cse_jumps_altered = false; | |
6170 recorded_label_ref = false; | |
6171 constant_pool_entries_cost = 0; | |
6172 constant_pool_entries_regcost = 0; | |
6173 ebb_data.path_size = 0; | |
6174 ebb_data.nsets = 0; | |
6175 rtl_hooks = cse_rtl_hooks; | |
6176 | |
6177 init_recog (); | |
6178 init_alias_analysis (); | |
6179 | |
6180 reg_eqv_table = XNEWVEC (struct reg_eqv_elem, nregs); | |
6181 | |
6182 /* Set up the table of already visited basic blocks. */ | |
6183 cse_visited_basic_blocks = sbitmap_alloc (last_basic_block); | |
6184 sbitmap_zero (cse_visited_basic_blocks); | |
6185 | |
6186 /* Loop over basic blocks in reverse completion order (RPO), | |
6187 excluding the ENTRY and EXIT blocks. */ | |
6188 n_blocks = pre_and_rev_post_order_compute (NULL, rc_order, false); | |
6189 i = 0; | |
6190 while (i < n_blocks) | |
6191 { | |
6192 /* Find the first block in the RPO queue that we have not yet | |
6193 processed before. */ | |
6194 do | |
6195 { | |
6196 bb = BASIC_BLOCK (rc_order[i++]); | |
6197 } | |
6198 while (TEST_BIT (cse_visited_basic_blocks, bb->index) | |
6199 && i < n_blocks); | |
6200 | |
6201 /* Find all paths starting with BB, and process them. */ | |
6202 while (cse_find_path (bb, &ebb_data, flag_cse_follow_jumps)) | |
6203 { | |
6204 /* Pre-scan the path. */ | |
6205 cse_prescan_path (&ebb_data); | |
6206 | |
6207 /* If this basic block has no sets, skip it. */ | |
6208 if (ebb_data.nsets == 0) | |
6209 continue; | |
6210 | |
6211 /* Get a reasonable estimate for the maximum number of qty's | |
6212 needed for this path. For this, we take the number of sets | |
6213 and multiply that by MAX_RECOG_OPERANDS. */ | |
6214 max_qty = ebb_data.nsets * MAX_RECOG_OPERANDS; | |
6215 | |
6216 /* Dump the path we're about to process. */ | |
6217 if (dump_file) | |
6218 cse_dump_path (&ebb_data, ebb_data.nsets, dump_file); | |
6219 | |
6220 cse_extended_basic_block (&ebb_data); | |
6221 } | |
6222 } | |
6223 | |
6224 /* Clean up. */ | |
6225 end_alias_analysis (); | |
6226 free (reg_eqv_table); | |
6227 free (ebb_data.path); | |
6228 sbitmap_free (cse_visited_basic_blocks); | |
6229 free (rc_order); | |
6230 rtl_hooks = general_rtl_hooks; | |
6231 | |
6232 if (cse_jumps_altered || recorded_label_ref) | |
6233 return 2; | |
6234 else if (cse_cfg_altered) | |
6235 return 1; | |
6236 else | |
6237 return 0; | |
6238 } | |
6239 | |
6240 /* Called via for_each_rtx to see if an insn is using a LABEL_REF for | |
6241 which there isn't a REG_LABEL_OPERAND note. | |
6242 Return one if so. DATA is the insn. */ | |
6243 | |
6244 static int | |
6245 check_for_label_ref (rtx *rtl, void *data) | |
6246 { | |
6247 rtx insn = (rtx) data; | |
6248 | |
6249 /* If this insn uses a LABEL_REF and there isn't a REG_LABEL_OPERAND | |
6250 note for it, we must rerun jump since it needs to place the note. If | |
6251 this is a LABEL_REF for a CODE_LABEL that isn't in the insn chain, | |
6252 don't do this since no REG_LABEL_OPERAND will be added. */ | |
6253 return (GET_CODE (*rtl) == LABEL_REF | |
6254 && ! LABEL_REF_NONLOCAL_P (*rtl) | |
6255 && (!JUMP_P (insn) | |
6256 || !label_is_jump_target_p (XEXP (*rtl, 0), insn)) | |
6257 && LABEL_P (XEXP (*rtl, 0)) | |
6258 && INSN_UID (XEXP (*rtl, 0)) != 0 | |
6259 && ! find_reg_note (insn, REG_LABEL_OPERAND, XEXP (*rtl, 0))); | |
6260 } | |
6261 | |
6262 /* Count the number of times registers are used (not set) in X. | |
6263 COUNTS is an array in which we accumulate the count, INCR is how much | |
6264 we count each register usage. | |
6265 | |
6266 Don't count a usage of DEST, which is the SET_DEST of a SET which | |
6267 contains X in its SET_SRC. This is because such a SET does not | |
6268 modify the liveness of DEST. | |
6269 DEST is set to pc_rtx for a trapping insn, which means that we must count | |
6270 uses of a SET_DEST regardless because the insn can't be deleted here. */ | |
6271 | |
6272 static void | |
6273 count_reg_usage (rtx x, int *counts, rtx dest, int incr) | |
6274 { | |
6275 enum rtx_code code; | |
6276 rtx note; | |
6277 const char *fmt; | |
6278 int i, j; | |
6279 | |
6280 if (x == 0) | |
6281 return; | |
6282 | |
6283 switch (code = GET_CODE (x)) | |
6284 { | |
6285 case REG: | |
6286 if (x != dest) | |
6287 counts[REGNO (x)] += incr; | |
6288 return; | |
6289 | |
6290 case PC: | |
6291 case CC0: | |
6292 case CONST: | |
6293 case CONST_INT: | |
6294 case CONST_DOUBLE: | |
6295 case CONST_FIXED: | |
6296 case CONST_VECTOR: | |
6297 case SYMBOL_REF: | |
6298 case LABEL_REF: | |
6299 return; | |
6300 | |
6301 case CLOBBER: | |
6302 /* If we are clobbering a MEM, mark any registers inside the address | |
6303 as being used. */ | |
6304 if (MEM_P (XEXP (x, 0))) | |
6305 count_reg_usage (XEXP (XEXP (x, 0), 0), counts, NULL_RTX, incr); | |
6306 return; | |
6307 | |
6308 case SET: | |
6309 /* Unless we are setting a REG, count everything in SET_DEST. */ | |
6310 if (!REG_P (SET_DEST (x))) | |
6311 count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr); | |
6312 count_reg_usage (SET_SRC (x), counts, | |
6313 dest ? dest : SET_DEST (x), | |
6314 incr); | |
6315 return; | |
6316 | |
6317 case CALL_INSN: | |
6318 case INSN: | |
6319 case JUMP_INSN: | |
6320 /* We expect dest to be NULL_RTX here. If the insn may trap, mark | |
6321 this fact by setting DEST to pc_rtx. */ | |
6322 if (flag_non_call_exceptions && may_trap_p (PATTERN (x))) | |
6323 dest = pc_rtx; | |
6324 if (code == CALL_INSN) | |
6325 count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, dest, incr); | |
6326 count_reg_usage (PATTERN (x), counts, dest, incr); | |
6327 | |
6328 /* Things used in a REG_EQUAL note aren't dead since loop may try to | |
6329 use them. */ | |
6330 | |
6331 note = find_reg_equal_equiv_note (x); | |
6332 if (note) | |
6333 { | |
6334 rtx eqv = XEXP (note, 0); | |
6335 | |
6336 if (GET_CODE (eqv) == EXPR_LIST) | |
6337 /* This REG_EQUAL note describes the result of a function call. | |
6338 Process all the arguments. */ | |
6339 do | |
6340 { | |
6341 count_reg_usage (XEXP (eqv, 0), counts, dest, incr); | |
6342 eqv = XEXP (eqv, 1); | |
6343 } | |
6344 while (eqv && GET_CODE (eqv) == EXPR_LIST); | |
6345 else | |
6346 count_reg_usage (eqv, counts, dest, incr); | |
6347 } | |
6348 return; | |
6349 | |
6350 case EXPR_LIST: | |
6351 if (REG_NOTE_KIND (x) == REG_EQUAL | |
6352 || (REG_NOTE_KIND (x) != REG_NONNEG && GET_CODE (XEXP (x,0)) == USE) | |
6353 /* FUNCTION_USAGE expression lists may include (CLOBBER (mem /u)), | |
6354 involving registers in the address. */ | |
6355 || GET_CODE (XEXP (x, 0)) == CLOBBER) | |
6356 count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr); | |
6357 | |
6358 count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr); | |
6359 return; | |
6360 | |
6361 case ASM_OPERANDS: | |
6362 /* If the asm is volatile, then this insn cannot be deleted, | |
6363 and so the inputs *must* be live. */ | |
6364 if (MEM_VOLATILE_P (x)) | |
6365 dest = NULL_RTX; | |
6366 /* Iterate over just the inputs, not the constraints as well. */ | |
6367 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) | |
6368 count_reg_usage (ASM_OPERANDS_INPUT (x, i), counts, dest, incr); | |
6369 return; | |
6370 | |
6371 case INSN_LIST: | |
6372 gcc_unreachable (); | |
6373 | |
6374 default: | |
6375 break; | |
6376 } | |
6377 | |
6378 fmt = GET_RTX_FORMAT (code); | |
6379 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
6380 { | |
6381 if (fmt[i] == 'e') | |
6382 count_reg_usage (XEXP (x, i), counts, dest, incr); | |
6383 else if (fmt[i] == 'E') | |
6384 for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
6385 count_reg_usage (XVECEXP (x, i, j), counts, dest, incr); | |
6386 } | |
6387 } | |
6388 | |
6389 /* Return true if set is live. */ | |
6390 static bool | |
6391 set_live_p (rtx set, rtx insn ATTRIBUTE_UNUSED, /* Only used with HAVE_cc0. */ | |
6392 int *counts) | |
6393 { | |
6394 #ifdef HAVE_cc0 | |
6395 rtx tem; | |
6396 #endif | |
6397 | |
6398 if (set_noop_p (set)) | |
6399 ; | |
6400 | |
6401 #ifdef HAVE_cc0 | |
6402 else if (GET_CODE (SET_DEST (set)) == CC0 | |
6403 && !side_effects_p (SET_SRC (set)) | |
6404 && ((tem = next_nonnote_insn (insn)) == 0 | |
6405 || !INSN_P (tem) | |
6406 || !reg_referenced_p (cc0_rtx, PATTERN (tem)))) | |
6407 return false; | |
6408 #endif | |
6409 else if (!REG_P (SET_DEST (set)) | |
6410 || REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER | |
6411 || counts[REGNO (SET_DEST (set))] != 0 | |
6412 || side_effects_p (SET_SRC (set))) | |
6413 return true; | |
6414 return false; | |
6415 } | |
6416 | |
6417 /* Return true if insn is live. */ | |
6418 | |
6419 static bool | |
6420 insn_live_p (rtx insn, int *counts) | |
6421 { | |
6422 int i; | |
6423 if (flag_non_call_exceptions && may_trap_p (PATTERN (insn))) | |
6424 return true; | |
6425 else if (GET_CODE (PATTERN (insn)) == SET) | |
6426 return set_live_p (PATTERN (insn), insn, counts); | |
6427 else if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
6428 { | |
6429 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) | |
6430 { | |
6431 rtx elt = XVECEXP (PATTERN (insn), 0, i); | |
6432 | |
6433 if (GET_CODE (elt) == SET) | |
6434 { | |
6435 if (set_live_p (elt, insn, counts)) | |
6436 return true; | |
6437 } | |
6438 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE) | |
6439 return true; | |
6440 } | |
6441 return false; | |
6442 } | |
6443 else | |
6444 return true; | |
6445 } | |
6446 | |
6447 /* Scan all the insns and delete any that are dead; i.e., they store a register | |
6448 that is never used or they copy a register to itself. | |
6449 | |
6450 This is used to remove insns made obviously dead by cse, loop or other | |
6451 optimizations. It improves the heuristics in loop since it won't try to | |
6452 move dead invariants out of loops or make givs for dead quantities. The | |
6453 remaining passes of the compilation are also sped up. */ | |
6454 | |
6455 int | |
6456 delete_trivially_dead_insns (rtx insns, int nreg) | |
6457 { | |
6458 int *counts; | |
6459 rtx insn, prev; | |
6460 int ndead = 0; | |
6461 | |
6462 timevar_push (TV_DELETE_TRIVIALLY_DEAD); | |
6463 /* First count the number of times each register is used. */ | |
6464 counts = XCNEWVEC (int, nreg); | |
6465 for (insn = insns; insn; insn = NEXT_INSN (insn)) | |
6466 if (INSN_P (insn)) | |
6467 count_reg_usage (insn, counts, NULL_RTX, 1); | |
6468 | |
6469 /* Go from the last insn to the first and delete insns that only set unused | |
6470 registers or copy a register to itself. As we delete an insn, remove | |
6471 usage counts for registers it uses. | |
6472 | |
6473 The first jump optimization pass may leave a real insn as the last | |
6474 insn in the function. We must not skip that insn or we may end | |
6475 up deleting code that is not really dead. */ | |
6476 for (insn = get_last_insn (); insn; insn = prev) | |
6477 { | |
6478 int live_insn = 0; | |
6479 | |
6480 prev = PREV_INSN (insn); | |
6481 if (!INSN_P (insn)) | |
6482 continue; | |
6483 | |
6484 live_insn = insn_live_p (insn, counts); | |
6485 | |
6486 /* If this is a dead insn, delete it and show registers in it aren't | |
6487 being used. */ | |
6488 | |
6489 if (! live_insn && dbg_cnt (delete_trivial_dead)) | |
6490 { | |
6491 count_reg_usage (insn, counts, NULL_RTX, -1); | |
6492 delete_insn_and_edges (insn); | |
6493 ndead++; | |
6494 } | |
6495 } | |
6496 | |
6497 if (dump_file && ndead) | |
6498 fprintf (dump_file, "Deleted %i trivially dead insns\n", | |
6499 ndead); | |
6500 /* Clean up. */ | |
6501 free (counts); | |
6502 timevar_pop (TV_DELETE_TRIVIALLY_DEAD); | |
6503 return ndead; | |
6504 } | |
6505 | |
6506 /* This function is called via for_each_rtx. The argument, NEWREG, is | |
6507 a condition code register with the desired mode. If we are looking | |
6508 at the same register in a different mode, replace it with | |
6509 NEWREG. */ | |
6510 | |
6511 static int | |
6512 cse_change_cc_mode (rtx *loc, void *data) | |
6513 { | |
6514 struct change_cc_mode_args* args = (struct change_cc_mode_args*)data; | |
6515 | |
6516 if (*loc | |
6517 && REG_P (*loc) | |
6518 && REGNO (*loc) == REGNO (args->newreg) | |
6519 && GET_MODE (*loc) != GET_MODE (args->newreg)) | |
6520 { | |
6521 validate_change (args->insn, loc, args->newreg, 1); | |
6522 | |
6523 return -1; | |
6524 } | |
6525 return 0; | |
6526 } | |
6527 | |
6528 /* Change the mode of any reference to the register REGNO (NEWREG) to | |
6529 GET_MODE (NEWREG) in INSN. */ | |
6530 | |
6531 static void | |
6532 cse_change_cc_mode_insn (rtx insn, rtx newreg) | |
6533 { | |
6534 struct change_cc_mode_args args; | |
6535 int success; | |
6536 | |
6537 if (!INSN_P (insn)) | |
6538 return; | |
6539 | |
6540 args.insn = insn; | |
6541 args.newreg = newreg; | |
6542 | |
6543 for_each_rtx (&PATTERN (insn), cse_change_cc_mode, &args); | |
6544 for_each_rtx (®_NOTES (insn), cse_change_cc_mode, &args); | |
6545 | |
6546 /* If the following assertion was triggered, there is most probably | |
6547 something wrong with the cc_modes_compatible back end function. | |
6548 CC modes only can be considered compatible if the insn - with the mode | |
6549 replaced by any of the compatible modes - can still be recognized. */ | |
6550 success = apply_change_group (); | |
6551 gcc_assert (success); | |
6552 } | |
6553 | |
6554 /* Change the mode of any reference to the register REGNO (NEWREG) to | |
6555 GET_MODE (NEWREG), starting at START. Stop before END. Stop at | |
6556 any instruction which modifies NEWREG. */ | |
6557 | |
6558 static void | |
6559 cse_change_cc_mode_insns (rtx start, rtx end, rtx newreg) | |
6560 { | |
6561 rtx insn; | |
6562 | |
6563 for (insn = start; insn != end; insn = NEXT_INSN (insn)) | |
6564 { | |
6565 if (! INSN_P (insn)) | |
6566 continue; | |
6567 | |
6568 if (reg_set_p (newreg, insn)) | |
6569 return; | |
6570 | |
6571 cse_change_cc_mode_insn (insn, newreg); | |
6572 } | |
6573 } | |
6574 | |
6575 /* BB is a basic block which finishes with CC_REG as a condition code | |
6576 register which is set to CC_SRC. Look through the successors of BB | |
6577 to find blocks which have a single predecessor (i.e., this one), | |
6578 and look through those blocks for an assignment to CC_REG which is | |
6579 equivalent to CC_SRC. CAN_CHANGE_MODE indicates whether we are | |
6580 permitted to change the mode of CC_SRC to a compatible mode. This | |
6581 returns VOIDmode if no equivalent assignments were found. | |
6582 Otherwise it returns the mode which CC_SRC should wind up with. | |
6583 ORIG_BB should be the same as BB in the outermost cse_cc_succs call, | |
6584 but is passed unmodified down to recursive calls in order to prevent | |
6585 endless recursion. | |
6586 | |
6587 The main complexity in this function is handling the mode issues. | |
6588 We may have more than one duplicate which we can eliminate, and we | |
6589 try to find a mode which will work for multiple duplicates. */ | |
6590 | |
6591 static enum machine_mode | |
6592 cse_cc_succs (basic_block bb, basic_block orig_bb, rtx cc_reg, rtx cc_src, | |
6593 bool can_change_mode) | |
6594 { | |
6595 bool found_equiv; | |
6596 enum machine_mode mode; | |
6597 unsigned int insn_count; | |
6598 edge e; | |
6599 rtx insns[2]; | |
6600 enum machine_mode modes[2]; | |
6601 rtx last_insns[2]; | |
6602 unsigned int i; | |
6603 rtx newreg; | |
6604 edge_iterator ei; | |
6605 | |
6606 /* We expect to have two successors. Look at both before picking | |
6607 the final mode for the comparison. If we have more successors | |
6608 (i.e., some sort of table jump, although that seems unlikely), | |
6609 then we require all beyond the first two to use the same | |
6610 mode. */ | |
6611 | |
6612 found_equiv = false; | |
6613 mode = GET_MODE (cc_src); | |
6614 insn_count = 0; | |
6615 FOR_EACH_EDGE (e, ei, bb->succs) | |
6616 { | |
6617 rtx insn; | |
6618 rtx end; | |
6619 | |
6620 if (e->flags & EDGE_COMPLEX) | |
6621 continue; | |
6622 | |
6623 if (EDGE_COUNT (e->dest->preds) != 1 | |
6624 || e->dest == EXIT_BLOCK_PTR | |
6625 /* Avoid endless recursion on unreachable blocks. */ | |
6626 || e->dest == orig_bb) | |
6627 continue; | |
6628 | |
6629 end = NEXT_INSN (BB_END (e->dest)); | |
6630 for (insn = BB_HEAD (e->dest); insn != end; insn = NEXT_INSN (insn)) | |
6631 { | |
6632 rtx set; | |
6633 | |
6634 if (! INSN_P (insn)) | |
6635 continue; | |
6636 | |
6637 /* If CC_SRC is modified, we have to stop looking for | |
6638 something which uses it. */ | |
6639 if (modified_in_p (cc_src, insn)) | |
6640 break; | |
6641 | |
6642 /* Check whether INSN sets CC_REG to CC_SRC. */ | |
6643 set = single_set (insn); | |
6644 if (set | |
6645 && REG_P (SET_DEST (set)) | |
6646 && REGNO (SET_DEST (set)) == REGNO (cc_reg)) | |
6647 { | |
6648 bool found; | |
6649 enum machine_mode set_mode; | |
6650 enum machine_mode comp_mode; | |
6651 | |
6652 found = false; | |
6653 set_mode = GET_MODE (SET_SRC (set)); | |
6654 comp_mode = set_mode; | |
6655 if (rtx_equal_p (cc_src, SET_SRC (set))) | |
6656 found = true; | |
6657 else if (GET_CODE (cc_src) == COMPARE | |
6658 && GET_CODE (SET_SRC (set)) == COMPARE | |
6659 && mode != set_mode | |
6660 && rtx_equal_p (XEXP (cc_src, 0), | |
6661 XEXP (SET_SRC (set), 0)) | |
6662 && rtx_equal_p (XEXP (cc_src, 1), | |
6663 XEXP (SET_SRC (set), 1))) | |
6664 | |
6665 { | |
6666 comp_mode = targetm.cc_modes_compatible (mode, set_mode); | |
6667 if (comp_mode != VOIDmode | |
6668 && (can_change_mode || comp_mode == mode)) | |
6669 found = true; | |
6670 } | |
6671 | |
6672 if (found) | |
6673 { | |
6674 found_equiv = true; | |
6675 if (insn_count < ARRAY_SIZE (insns)) | |
6676 { | |
6677 insns[insn_count] = insn; | |
6678 modes[insn_count] = set_mode; | |
6679 last_insns[insn_count] = end; | |
6680 ++insn_count; | |
6681 | |
6682 if (mode != comp_mode) | |
6683 { | |
6684 gcc_assert (can_change_mode); | |
6685 mode = comp_mode; | |
6686 | |
6687 /* The modified insn will be re-recognized later. */ | |
6688 PUT_MODE (cc_src, mode); | |
6689 } | |
6690 } | |
6691 else | |
6692 { | |
6693 if (set_mode != mode) | |
6694 { | |
6695 /* We found a matching expression in the | |
6696 wrong mode, but we don't have room to | |
6697 store it in the array. Punt. This case | |
6698 should be rare. */ | |
6699 break; | |
6700 } | |
6701 /* INSN sets CC_REG to a value equal to CC_SRC | |
6702 with the right mode. We can simply delete | |
6703 it. */ | |
6704 delete_insn (insn); | |
6705 } | |
6706 | |
6707 /* We found an instruction to delete. Keep looking, | |
6708 in the hopes of finding a three-way jump. */ | |
6709 continue; | |
6710 } | |
6711 | |
6712 /* We found an instruction which sets the condition | |
6713 code, so don't look any farther. */ | |
6714 break; | |
6715 } | |
6716 | |
6717 /* If INSN sets CC_REG in some other way, don't look any | |
6718 farther. */ | |
6719 if (reg_set_p (cc_reg, insn)) | |
6720 break; | |
6721 } | |
6722 | |
6723 /* If we fell off the bottom of the block, we can keep looking | |
6724 through successors. We pass CAN_CHANGE_MODE as false because | |
6725 we aren't prepared to handle compatibility between the | |
6726 further blocks and this block. */ | |
6727 if (insn == end) | |
6728 { | |
6729 enum machine_mode submode; | |
6730 | |
6731 submode = cse_cc_succs (e->dest, orig_bb, cc_reg, cc_src, false); | |
6732 if (submode != VOIDmode) | |
6733 { | |
6734 gcc_assert (submode == mode); | |
6735 found_equiv = true; | |
6736 can_change_mode = false; | |
6737 } | |
6738 } | |
6739 } | |
6740 | |
6741 if (! found_equiv) | |
6742 return VOIDmode; | |
6743 | |
6744 /* Now INSN_COUNT is the number of instructions we found which set | |
6745 CC_REG to a value equivalent to CC_SRC. The instructions are in | |
6746 INSNS. The modes used by those instructions are in MODES. */ | |
6747 | |
6748 newreg = NULL_RTX; | |
6749 for (i = 0; i < insn_count; ++i) | |
6750 { | |
6751 if (modes[i] != mode) | |
6752 { | |
6753 /* We need to change the mode of CC_REG in INSNS[i] and | |
6754 subsequent instructions. */ | |
6755 if (! newreg) | |
6756 { | |
6757 if (GET_MODE (cc_reg) == mode) | |
6758 newreg = cc_reg; | |
6759 else | |
6760 newreg = gen_rtx_REG (mode, REGNO (cc_reg)); | |
6761 } | |
6762 cse_change_cc_mode_insns (NEXT_INSN (insns[i]), last_insns[i], | |
6763 newreg); | |
6764 } | |
6765 | |
6766 delete_insn_and_edges (insns[i]); | |
6767 } | |
6768 | |
6769 return mode; | |
6770 } | |
6771 | |
6772 /* If we have a fixed condition code register (or two), walk through | |
6773 the instructions and try to eliminate duplicate assignments. */ | |
6774 | |
6775 static void | |
6776 cse_condition_code_reg (void) | |
6777 { | |
6778 unsigned int cc_regno_1; | |
6779 unsigned int cc_regno_2; | |
6780 rtx cc_reg_1; | |
6781 rtx cc_reg_2; | |
6782 basic_block bb; | |
6783 | |
6784 if (! targetm.fixed_condition_code_regs (&cc_regno_1, &cc_regno_2)) | |
6785 return; | |
6786 | |
6787 cc_reg_1 = gen_rtx_REG (CCmode, cc_regno_1); | |
6788 if (cc_regno_2 != INVALID_REGNUM) | |
6789 cc_reg_2 = gen_rtx_REG (CCmode, cc_regno_2); | |
6790 else | |
6791 cc_reg_2 = NULL_RTX; | |
6792 | |
6793 FOR_EACH_BB (bb) | |
6794 { | |
6795 rtx last_insn; | |
6796 rtx cc_reg; | |
6797 rtx insn; | |
6798 rtx cc_src_insn; | |
6799 rtx cc_src; | |
6800 enum machine_mode mode; | |
6801 enum machine_mode orig_mode; | |
6802 | |
6803 /* Look for blocks which end with a conditional jump based on a | |
6804 condition code register. Then look for the instruction which | |
6805 sets the condition code register. Then look through the | |
6806 successor blocks for instructions which set the condition | |
6807 code register to the same value. There are other possible | |
6808 uses of the condition code register, but these are by far the | |
6809 most common and the ones which we are most likely to be able | |
6810 to optimize. */ | |
6811 | |
6812 last_insn = BB_END (bb); | |
6813 if (!JUMP_P (last_insn)) | |
6814 continue; | |
6815 | |
6816 if (reg_referenced_p (cc_reg_1, PATTERN (last_insn))) | |
6817 cc_reg = cc_reg_1; | |
6818 else if (cc_reg_2 && reg_referenced_p (cc_reg_2, PATTERN (last_insn))) | |
6819 cc_reg = cc_reg_2; | |
6820 else | |
6821 continue; | |
6822 | |
6823 cc_src_insn = NULL_RTX; | |
6824 cc_src = NULL_RTX; | |
6825 for (insn = PREV_INSN (last_insn); | |
6826 insn && insn != PREV_INSN (BB_HEAD (bb)); | |
6827 insn = PREV_INSN (insn)) | |
6828 { | |
6829 rtx set; | |
6830 | |
6831 if (! INSN_P (insn)) | |
6832 continue; | |
6833 set = single_set (insn); | |
6834 if (set | |
6835 && REG_P (SET_DEST (set)) | |
6836 && REGNO (SET_DEST (set)) == REGNO (cc_reg)) | |
6837 { | |
6838 cc_src_insn = insn; | |
6839 cc_src = SET_SRC (set); | |
6840 break; | |
6841 } | |
6842 else if (reg_set_p (cc_reg, insn)) | |
6843 break; | |
6844 } | |
6845 | |
6846 if (! cc_src_insn) | |
6847 continue; | |
6848 | |
6849 if (modified_between_p (cc_src, cc_src_insn, NEXT_INSN (last_insn))) | |
6850 continue; | |
6851 | |
6852 /* Now CC_REG is a condition code register used for a | |
6853 conditional jump at the end of the block, and CC_SRC, in | |
6854 CC_SRC_INSN, is the value to which that condition code | |
6855 register is set, and CC_SRC is still meaningful at the end of | |
6856 the basic block. */ | |
6857 | |
6858 orig_mode = GET_MODE (cc_src); | |
6859 mode = cse_cc_succs (bb, bb, cc_reg, cc_src, true); | |
6860 if (mode != VOIDmode) | |
6861 { | |
6862 gcc_assert (mode == GET_MODE (cc_src)); | |
6863 if (mode != orig_mode) | |
6864 { | |
6865 rtx newreg = gen_rtx_REG (mode, REGNO (cc_reg)); | |
6866 | |
6867 cse_change_cc_mode_insn (cc_src_insn, newreg); | |
6868 | |
6869 /* Do the same in the following insns that use the | |
6870 current value of CC_REG within BB. */ | |
6871 cse_change_cc_mode_insns (NEXT_INSN (cc_src_insn), | |
6872 NEXT_INSN (last_insn), | |
6873 newreg); | |
6874 } | |
6875 } | |
6876 } | |
6877 } | |
6878 | |
6879 | |
6880 /* Perform common subexpression elimination. Nonzero value from | |
6881 `cse_main' means that jumps were simplified and some code may now | |
6882 be unreachable, so do jump optimization again. */ | |
6883 static bool | |
6884 gate_handle_cse (void) | |
6885 { | |
6886 return optimize > 0; | |
6887 } | |
6888 | |
6889 static unsigned int | |
6890 rest_of_handle_cse (void) | |
6891 { | |
6892 int tem; | |
6893 | |
6894 if (dump_file) | |
6895 dump_flow_info (dump_file, dump_flags); | |
6896 | |
6897 tem = cse_main (get_insns (), max_reg_num ()); | |
6898 | |
6899 /* If we are not running more CSE passes, then we are no longer | |
6900 expecting CSE to be run. But always rerun it in a cheap mode. */ | |
6901 cse_not_expected = !flag_rerun_cse_after_loop && !flag_gcse; | |
6902 | |
6903 if (tem == 2) | |
6904 { | |
6905 timevar_push (TV_JUMP); | |
6906 rebuild_jump_labels (get_insns ()); | |
6907 cleanup_cfg (0); | |
6908 timevar_pop (TV_JUMP); | |
6909 } | |
6910 else if (tem == 1 || optimize > 1) | |
6911 cleanup_cfg (0); | |
6912 | |
6913 return 0; | |
6914 } | |
6915 | |
6916 struct rtl_opt_pass pass_cse = | |
6917 { | |
6918 { | |
6919 RTL_PASS, | |
6920 "cse1", /* name */ | |
6921 gate_handle_cse, /* gate */ | |
6922 rest_of_handle_cse, /* execute */ | |
6923 NULL, /* sub */ | |
6924 NULL, /* next */ | |
6925 0, /* static_pass_number */ | |
6926 TV_CSE, /* tv_id */ | |
6927 0, /* properties_required */ | |
6928 0, /* properties_provided */ | |
6929 0, /* properties_destroyed */ | |
6930 0, /* todo_flags_start */ | |
6931 TODO_df_finish | TODO_verify_rtl_sharing | | |
6932 TODO_dump_func | | |
6933 TODO_ggc_collect | | |
6934 TODO_verify_flow, /* todo_flags_finish */ | |
6935 } | |
6936 }; | |
6937 | |
6938 | |
6939 static bool | |
6940 gate_handle_cse2 (void) | |
6941 { | |
6942 return optimize > 0 && flag_rerun_cse_after_loop; | |
6943 } | |
6944 | |
6945 /* Run second CSE pass after loop optimizations. */ | |
6946 static unsigned int | |
6947 rest_of_handle_cse2 (void) | |
6948 { | |
6949 int tem; | |
6950 | |
6951 if (dump_file) | |
6952 dump_flow_info (dump_file, dump_flags); | |
6953 | |
6954 tem = cse_main (get_insns (), max_reg_num ()); | |
6955 | |
6956 /* Run a pass to eliminate duplicated assignments to condition code | |
6957 registers. We have to run this after bypass_jumps, because it | |
6958 makes it harder for that pass to determine whether a jump can be | |
6959 bypassed safely. */ | |
6960 cse_condition_code_reg (); | |
6961 | |
6962 delete_trivially_dead_insns (get_insns (), max_reg_num ()); | |
6963 | |
6964 if (tem == 2) | |
6965 { | |
6966 timevar_push (TV_JUMP); | |
6967 rebuild_jump_labels (get_insns ()); | |
6968 cleanup_cfg (0); | |
6969 timevar_pop (TV_JUMP); | |
6970 } | |
6971 else if (tem == 1) | |
6972 cleanup_cfg (0); | |
6973 | |
6974 cse_not_expected = 1; | |
6975 return 0; | |
6976 } | |
6977 | |
6978 | |
6979 struct rtl_opt_pass pass_cse2 = | |
6980 { | |
6981 { | |
6982 RTL_PASS, | |
6983 "cse2", /* name */ | |
6984 gate_handle_cse2, /* gate */ | |
6985 rest_of_handle_cse2, /* execute */ | |
6986 NULL, /* sub */ | |
6987 NULL, /* next */ | |
6988 0, /* static_pass_number */ | |
6989 TV_CSE2, /* tv_id */ | |
6990 0, /* properties_required */ | |
6991 0, /* properties_provided */ | |
6992 0, /* properties_destroyed */ | |
6993 0, /* todo_flags_start */ | |
6994 TODO_df_finish | TODO_verify_rtl_sharing | | |
6995 TODO_dump_func | | |
6996 TODO_ggc_collect | | |
6997 TODO_verify_flow /* todo_flags_finish */ | |
6998 } | |
6999 }; | |
7000 |