comparison gcc/tree-loop-distribution.c @ 0:a06113de4d67

first commit
author kent <kent@cr.ie.u-ryukyu.ac.jp>
date Fri, 17 Jul 2009 14:47:48 +0900
parents
children 77e2b8dfacca
comparison
equal deleted inserted replaced
-1:000000000000 0:a06113de4d67
1 /* Loop distribution.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Georges-Andre Silber <Georges-Andre.Silber@ensmp.fr>
4 and Sebastian Pop <sebastian.pop@amd.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* This pass performs loop distribution: for example, the loop
23
24 |DO I = 2, N
25 | A(I) = B(I) + C
26 | D(I) = A(I-1)*E
27 |ENDDO
28
29 is transformed to
30
31 |DOALL I = 2, N
32 | A(I) = B(I) + C
33 |ENDDO
34 |
35 |DOALL I = 2, N
36 | D(I) = A(I-1)*E
37 |ENDDO
38
39 This pass uses an RDG, Reduced Dependence Graph built on top of the
40 data dependence relations. The RDG is then topologically sorted to
41 obtain a map of information producers/consumers based on which it
42 generates the new loops. */
43
44 #include "config.h"
45 #include "system.h"
46 #include "coretypes.h"
47 #include "tm.h"
48 #include "ggc.h"
49 #include "tree.h"
50 #include "target.h"
51
52 #include "rtl.h"
53 #include "basic-block.h"
54 #include "diagnostic.h"
55 #include "tree-flow.h"
56 #include "tree-dump.h"
57 #include "timevar.h"
58 #include "cfgloop.h"
59 #include "expr.h"
60 #include "optabs.h"
61 #include "tree-chrec.h"
62 #include "tree-data-ref.h"
63 #include "tree-scalar-evolution.h"
64 #include "tree-pass.h"
65 #include "lambda.h"
66 #include "langhooks.h"
67 #include "tree-vectorizer.h"
68
69 /* If bit I is not set, it means that this node represents an
70 operation that has already been performed, and that should not be
71 performed again. This is the subgraph of remaining important
72 computations that is passed to the DFS algorithm for avoiding to
73 include several times the same stores in different loops. */
74 static bitmap remaining_stmts;
75
76 /* A node of the RDG is marked in this bitmap when it has as a
77 predecessor a node that writes to memory. */
78 static bitmap upstream_mem_writes;
79
80 /* TODOs we need to run after the pass. */
81 static unsigned int todo;
82
83 /* Update the PHI nodes of NEW_LOOP. NEW_LOOP is a duplicate of
84 ORIG_LOOP. */
85
86 static void
87 update_phis_for_loop_copy (struct loop *orig_loop, struct loop *new_loop)
88 {
89 tree new_ssa_name;
90 gimple_stmt_iterator si_new, si_orig;
91 edge orig_loop_latch = loop_latch_edge (orig_loop);
92 edge orig_entry_e = loop_preheader_edge (orig_loop);
93 edge new_loop_entry_e = loop_preheader_edge (new_loop);
94
95 /* Scan the phis in the headers of the old and new loops
96 (they are organized in exactly the same order). */
97 for (si_new = gsi_start_phis (new_loop->header),
98 si_orig = gsi_start_phis (orig_loop->header);
99 !gsi_end_p (si_new) && !gsi_end_p (si_orig);
100 gsi_next (&si_new), gsi_next (&si_orig))
101 {
102 tree def;
103 gimple phi_new = gsi_stmt (si_new);
104 gimple phi_orig = gsi_stmt (si_orig);
105
106 /* Add the first phi argument for the phi in NEW_LOOP (the one
107 associated with the entry of NEW_LOOP) */
108 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_entry_e);
109 add_phi_arg (phi_new, def, new_loop_entry_e);
110
111 /* Add the second phi argument for the phi in NEW_LOOP (the one
112 associated with the latch of NEW_LOOP) */
113 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
114
115 if (TREE_CODE (def) == SSA_NAME)
116 {
117 new_ssa_name = get_current_def (def);
118
119 if (!new_ssa_name)
120 /* This only happens if there are no definitions inside the
121 loop. Use the phi_result in this case. */
122 new_ssa_name = PHI_RESULT (phi_new);
123 }
124 else
125 /* Could be an integer. */
126 new_ssa_name = def;
127
128 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop));
129 }
130 }
131
132 /* Return a copy of LOOP placed before LOOP. */
133
134 static struct loop *
135 copy_loop_before (struct loop *loop)
136 {
137 struct loop *res;
138 edge preheader = loop_preheader_edge (loop);
139
140 if (!single_exit (loop))
141 return NULL;
142
143 initialize_original_copy_tables ();
144 res = slpeel_tree_duplicate_loop_to_edge_cfg (loop, preheader);
145 free_original_copy_tables ();
146
147 if (!res)
148 return NULL;
149
150 update_phis_for_loop_copy (loop, res);
151 rename_variables_in_loop (res);
152
153 return res;
154 }
155
156 /* Creates an empty basic block after LOOP. */
157
158 static void
159 create_bb_after_loop (struct loop *loop)
160 {
161 edge exit = single_exit (loop);
162
163 if (!exit)
164 return;
165
166 split_edge (exit);
167 }
168
169 /* Generate code for PARTITION from the code in LOOP. The loop is
170 copied when COPY_P is true. All the statements not flagged in the
171 PARTITION bitmap are removed from the loop or from its copy. The
172 statements are indexed in sequence inside a basic block, and the
173 basic blocks of a loop are taken in dom order. Returns true when
174 the code gen succeeded. */
175
176 static bool
177 generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p)
178 {
179 unsigned i, x;
180 gimple_stmt_iterator bsi;
181 basic_block *bbs;
182
183 if (copy_p)
184 {
185 loop = copy_loop_before (loop);
186 create_preheader (loop, CP_SIMPLE_PREHEADERS);
187 create_bb_after_loop (loop);
188 }
189
190 if (loop == NULL)
191 return false;
192
193 /* Remove stmts not in the PARTITION bitmap. The order in which we
194 visit the phi nodes and the statements is exactly as in
195 stmts_from_loop. */
196 bbs = get_loop_body_in_dom_order (loop);
197
198 for (x = 0, i = 0; i < loop->num_nodes; i++)
199 {
200 basic_block bb = bbs[i];
201
202 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);)
203 if (!bitmap_bit_p (partition, x++))
204 remove_phi_node (&bsi, true);
205 else
206 gsi_next (&bsi);
207
208 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);)
209 if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL
210 && !bitmap_bit_p (partition, x++))
211 gsi_remove (&bsi, false);
212 else
213 gsi_next (&bsi);
214
215 mark_virtual_ops_in_bb (bb);
216 }
217
218 free (bbs);
219 return true;
220 }
221
222 /* Build size argument. */
223
224 static inline tree
225 build_size_arg (tree nb_iter, tree op, gimple_seq* stmt_list)
226 {
227 tree nb_bytes;
228 gimple_seq stmts = NULL;
229
230 nb_bytes = fold_build2 (MULT_EXPR, TREE_TYPE (nb_iter),
231 nb_iter, TYPE_SIZE_UNIT (TREE_TYPE (op)));
232 nb_bytes = force_gimple_operand (nb_bytes, &stmts, true, NULL);
233 gimple_seq_add_seq (stmt_list, stmts);
234
235 return nb_bytes;
236 }
237
238 /* Generate a call to memset. Return true when the operation succeeded. */
239
240 static bool
241 generate_memset_zero (gimple stmt, tree op0, tree nb_iter,
242 gimple_stmt_iterator bsi)
243 {
244 tree t, addr_base;
245 tree nb_bytes = NULL;
246 bool res = false;
247 gimple_seq stmts = NULL, stmt_list = NULL;
248 gimple fn_call;
249 tree mem, fndecl, fntype, fn;
250 gimple_stmt_iterator i;
251 ssa_op_iter iter;
252 struct data_reference *dr = XCNEW (struct data_reference);
253
254 DR_STMT (dr) = stmt;
255 DR_REF (dr) = op0;
256 if (!dr_analyze_innermost (dr))
257 goto end;
258
259 /* Test for a positive stride, iterating over every element. */
260 if (integer_zerop (fold_build2 (MINUS_EXPR, integer_type_node, DR_STEP (dr),
261 TYPE_SIZE_UNIT (TREE_TYPE (op0)))))
262 {
263 tree offset = fold_convert (sizetype,
264 size_binop (PLUS_EXPR,
265 DR_OFFSET (dr),
266 DR_INIT (dr)));
267 addr_base = fold_build2 (POINTER_PLUS_EXPR,
268 TREE_TYPE (DR_BASE_ADDRESS (dr)),
269 DR_BASE_ADDRESS (dr), offset);
270 }
271
272 /* Test for a negative stride, iterating over every element. */
273 else if (integer_zerop (fold_build2 (PLUS_EXPR, integer_type_node,
274 TYPE_SIZE_UNIT (TREE_TYPE (op0)),
275 DR_STEP (dr))))
276 {
277 nb_bytes = build_size_arg (nb_iter, op0, &stmt_list);
278 addr_base = size_binop (PLUS_EXPR, DR_OFFSET (dr), DR_INIT (dr));
279 addr_base = fold_build2 (MINUS_EXPR, sizetype, addr_base, nb_bytes);
280 addr_base = force_gimple_operand (addr_base, &stmts, true, NULL);
281 gimple_seq_add_seq (&stmt_list, stmts);
282
283 addr_base = fold_build2 (POINTER_PLUS_EXPR,
284 TREE_TYPE (DR_BASE_ADDRESS (dr)),
285 DR_BASE_ADDRESS (dr), addr_base);
286 }
287 else
288 goto end;
289
290 mem = force_gimple_operand (addr_base, &stmts, true, NULL);
291 gimple_seq_add_seq (&stmt_list, stmts);
292
293 fndecl = implicit_built_in_decls [BUILT_IN_MEMSET];
294 fntype = TREE_TYPE (fndecl);
295 fn = build1 (ADDR_EXPR, build_pointer_type (fntype), fndecl);
296
297 if (!nb_bytes)
298 nb_bytes = build_size_arg (nb_iter, op0, &stmt_list);
299 fn_call = gimple_build_call (fn, 3, mem, integer_zero_node, nb_bytes);
300 gimple_seq_add_stmt (&stmt_list, fn_call);
301
302 for (i = gsi_start (stmt_list); !gsi_end_p (i); gsi_next (&i))
303 {
304 gimple s = gsi_stmt (i);
305 update_stmt_if_modified (s);
306
307 FOR_EACH_SSA_TREE_OPERAND (t, s, iter, SSA_OP_VIRTUAL_DEFS)
308 {
309 if (TREE_CODE (t) == SSA_NAME)
310 t = SSA_NAME_VAR (t);
311 mark_sym_for_renaming (t);
312 }
313 }
314
315 /* Mark also the uses of the VDEFS of STMT to be renamed. */
316 FOR_EACH_SSA_TREE_OPERAND (t, stmt, iter, SSA_OP_VIRTUAL_DEFS)
317 {
318 if (TREE_CODE (t) == SSA_NAME)
319 {
320 gimple s;
321 imm_use_iterator imm_iter;
322
323 FOR_EACH_IMM_USE_STMT (s, imm_iter, t)
324 update_stmt (s);
325
326 t = SSA_NAME_VAR (t);
327 }
328 mark_sym_for_renaming (t);
329 }
330
331 gsi_insert_seq_after (&bsi, stmt_list, GSI_CONTINUE_LINKING);
332 res = true;
333
334 if (dump_file && (dump_flags & TDF_DETAILS))
335 fprintf (dump_file, "generated memset zero\n");
336
337 todo |= TODO_rebuild_alias;
338
339 end:
340 free_data_ref (dr);
341 return res;
342 }
343
344 /* Propagate phis in BB b to their uses and remove them. */
345
346 static void
347 prop_phis (basic_block b)
348 {
349 gimple_stmt_iterator psi;
350 gimple_seq phis = phi_nodes (b);
351
352 for (psi = gsi_start (phis); !gsi_end_p (psi); )
353 {
354 gimple phi = gsi_stmt (psi);
355 tree def = gimple_phi_result (phi), use = gimple_phi_arg_def (phi, 0);
356
357 gcc_assert (gimple_phi_num_args (phi) == 1);
358
359 if (!is_gimple_reg (def))
360 {
361 imm_use_iterator iter;
362 use_operand_p use_p;
363 gimple stmt;
364
365 FOR_EACH_IMM_USE_STMT (stmt, iter, def)
366 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
367 SET_USE (use_p, use);
368 }
369 else
370 replace_uses_by (def, use);
371
372 remove_phi_node (&psi, true);
373 }
374 }
375
376 /* Tries to generate a builtin function for the instructions of LOOP
377 pointed to by the bits set in PARTITION. Returns true when the
378 operation succeeded. */
379
380 static bool
381 generate_builtin (struct loop *loop, bitmap partition, bool copy_p)
382 {
383 bool res = false;
384 unsigned i, x = 0;
385 basic_block *bbs;
386 gimple write = NULL;
387 tree op0, op1;
388 gimple_stmt_iterator bsi;
389 tree nb_iter = number_of_exit_cond_executions (loop);
390
391 if (!nb_iter || nb_iter == chrec_dont_know)
392 return false;
393
394 bbs = get_loop_body_in_dom_order (loop);
395
396 for (i = 0; i < loop->num_nodes; i++)
397 {
398 basic_block bb = bbs[i];
399
400 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
401 x++;
402
403 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
404 {
405 gimple stmt = gsi_stmt (bsi);
406
407 if (bitmap_bit_p (partition, x++)
408 && is_gimple_assign (stmt)
409 && !is_gimple_reg (gimple_assign_lhs (stmt)))
410 {
411 /* Don't generate the builtins when there are more than
412 one memory write. */
413 if (write != NULL)
414 goto end;
415
416 write = stmt;
417 }
418 }
419 }
420
421 if (!write)
422 goto end;
423
424 op0 = gimple_assign_lhs (write);
425 op1 = gimple_assign_rhs1 (write);
426
427 if (!(TREE_CODE (op0) == ARRAY_REF
428 || TREE_CODE (op0) == INDIRECT_REF))
429 goto end;
430
431 /* The new statements will be placed before LOOP. */
432 bsi = gsi_last_bb (loop_preheader_edge (loop)->src);
433
434 if (gimple_assign_rhs_code (write) == INTEGER_CST
435 && (integer_zerop (op1) || real_zerop (op1)))
436 res = generate_memset_zero (write, op0, nb_iter, bsi);
437
438 /* If this is the last partition for which we generate code, we have
439 to destroy the loop. */
440 if (res && !copy_p)
441 {
442 unsigned nbbs = loop->num_nodes;
443 basic_block src = loop_preheader_edge (loop)->src;
444 basic_block dest = single_exit (loop)->dest;
445 prop_phis (dest);
446 make_edge (src, dest, EDGE_FALLTHRU);
447 cancel_loop_tree (loop);
448
449 for (i = 0; i < nbbs; i++)
450 delete_basic_block (bbs[i]);
451
452 set_immediate_dominator (CDI_DOMINATORS, dest,
453 recompute_dominator (CDI_DOMINATORS, dest));
454 }
455
456 end:
457 free (bbs);
458 return res;
459 }
460
461 /* Generates code for PARTITION. For simple loops, this function can
462 generate a built-in. */
463
464 static bool
465 generate_code_for_partition (struct loop *loop, bitmap partition, bool copy_p)
466 {
467 if (generate_builtin (loop, partition, copy_p))
468 return true;
469
470 return generate_loops_for_partition (loop, partition, copy_p);
471 }
472
473
474 /* Returns true if the node V of RDG cannot be recomputed. */
475
476 static bool
477 rdg_cannot_recompute_vertex_p (struct graph *rdg, int v)
478 {
479 if (RDG_MEM_WRITE_STMT (rdg, v))
480 return true;
481
482 return false;
483 }
484
485 /* Returns true when the vertex V has already been generated in the
486 current partition (V is in PROCESSED), or when V belongs to another
487 partition and cannot be recomputed (V is not in REMAINING_STMTS). */
488
489 static inline bool
490 already_processed_vertex_p (bitmap processed, int v)
491 {
492 return (bitmap_bit_p (processed, v)
493 || !bitmap_bit_p (remaining_stmts, v));
494 }
495
496 /* Returns NULL when there is no anti-dependence among the successors
497 of vertex V, otherwise returns the edge with the anti-dep. */
498
499 static struct graph_edge *
500 has_anti_dependence (struct vertex *v)
501 {
502 struct graph_edge *e;
503
504 if (v->succ)
505 for (e = v->succ; e; e = e->succ_next)
506 if (RDGE_TYPE (e) == anti_dd)
507 return e;
508
509 return NULL;
510 }
511
512 /* Returns true when V has an anti-dependence edge among its successors. */
513
514 static bool
515 predecessor_has_mem_write (struct graph *rdg, struct vertex *v)
516 {
517 struct graph_edge *e;
518
519 if (v->pred)
520 for (e = v->pred; e; e = e->pred_next)
521 if (bitmap_bit_p (upstream_mem_writes, e->src)
522 /* Don't consider flow channels: a write to memory followed
523 by a read from memory. These channels allow the split of
524 the RDG in different partitions. */
525 && !RDG_MEM_WRITE_STMT (rdg, e->src))
526 return true;
527
528 return false;
529 }
530
531 /* Initializes the upstream_mem_writes bitmap following the
532 information from RDG. */
533
534 static void
535 mark_nodes_having_upstream_mem_writes (struct graph *rdg)
536 {
537 int v, x;
538 bitmap seen = BITMAP_ALLOC (NULL);
539
540 for (v = rdg->n_vertices - 1; v >= 0; v--)
541 if (!bitmap_bit_p (seen, v))
542 {
543 unsigned i;
544 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
545 bool has_upstream_mem_write_p = false;
546
547 graphds_dfs (rdg, &v, 1, &nodes, false, NULL);
548
549 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
550 {
551 if (bitmap_bit_p (seen, x))
552 continue;
553
554 bitmap_set_bit (seen, x);
555
556 if (RDG_MEM_WRITE_STMT (rdg, x)
557 || predecessor_has_mem_write (rdg, &(rdg->vertices[x]))
558 /* In anti dependences the read should occur before
559 the write, this is why both the read and the write
560 should be placed in the same partition. */
561 || has_anti_dependence (&(rdg->vertices[x])))
562 {
563 has_upstream_mem_write_p = true;
564 bitmap_set_bit (upstream_mem_writes, x);
565 }
566 }
567
568 VEC_free (int, heap, nodes);
569 }
570 }
571
572 /* Returns true when vertex u has a memory write node as a predecessor
573 in RDG. */
574
575 static bool
576 has_upstream_mem_writes (int u)
577 {
578 return bitmap_bit_p (upstream_mem_writes, u);
579 }
580
581 static void rdg_flag_vertex_and_dependent (struct graph *, int, bitmap, bitmap,
582 bitmap, bool *);
583
584 /* Flag all the uses of U. */
585
586 static void
587 rdg_flag_all_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
588 bitmap processed, bool *part_has_writes)
589 {
590 struct graph_edge *e;
591
592 for (e = rdg->vertices[u].succ; e; e = e->succ_next)
593 if (!bitmap_bit_p (processed, e->dest))
594 {
595 rdg_flag_vertex_and_dependent (rdg, e->dest, partition, loops,
596 processed, part_has_writes);
597 rdg_flag_all_uses (rdg, e->dest, partition, loops, processed,
598 part_has_writes);
599 }
600 }
601
602 /* Flag the uses of U stopping following the information from
603 upstream_mem_writes. */
604
605 static void
606 rdg_flag_uses (struct graph *rdg, int u, bitmap partition, bitmap loops,
607 bitmap processed, bool *part_has_writes)
608 {
609 ssa_op_iter iter;
610 use_operand_p use_p;
611 struct vertex *x = &(rdg->vertices[u]);
612 gimple stmt = RDGV_STMT (x);
613 struct graph_edge *anti_dep = has_anti_dependence (x);
614
615 /* Keep in the same partition the destination of an antidependence,
616 because this is a store to the exact same location. Putting this
617 in another partition is bad for cache locality. */
618 if (anti_dep)
619 {
620 int v = anti_dep->dest;
621
622 if (!already_processed_vertex_p (processed, v))
623 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
624 processed, part_has_writes);
625 }
626
627 if (gimple_code (stmt) != GIMPLE_PHI)
628 {
629 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_VIRTUAL_USES)
630 {
631 tree use = USE_FROM_PTR (use_p);
632
633 if (TREE_CODE (use) == SSA_NAME)
634 {
635 gimple def_stmt = SSA_NAME_DEF_STMT (use);
636 int v = rdg_vertex_for_stmt (rdg, def_stmt);
637
638 if (v >= 0
639 && !already_processed_vertex_p (processed, v))
640 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
641 processed, part_has_writes);
642 }
643 }
644 }
645
646 if (is_gimple_assign (stmt) && has_upstream_mem_writes (u))
647 {
648 tree op0 = gimple_assign_lhs (stmt);
649
650 /* Scalar channels don't have enough space for transmitting data
651 between tasks, unless we add more storage by privatizing. */
652 if (is_gimple_reg (op0))
653 {
654 use_operand_p use_p;
655 imm_use_iterator iter;
656
657 FOR_EACH_IMM_USE_FAST (use_p, iter, op0)
658 {
659 int v = rdg_vertex_for_stmt (rdg, USE_STMT (use_p));
660
661 if (!already_processed_vertex_p (processed, v))
662 rdg_flag_vertex_and_dependent (rdg, v, partition, loops,
663 processed, part_has_writes);
664 }
665 }
666 }
667 }
668
669 /* Flag V from RDG as part of PARTITION, and also flag its loop number
670 in LOOPS. */
671
672 static void
673 rdg_flag_vertex (struct graph *rdg, int v, bitmap partition, bitmap loops,
674 bool *part_has_writes)
675 {
676 struct loop *loop;
677
678 if (bitmap_bit_p (partition, v))
679 return;
680
681 loop = loop_containing_stmt (RDG_STMT (rdg, v));
682 bitmap_set_bit (loops, loop->num);
683 bitmap_set_bit (partition, v);
684
685 if (rdg_cannot_recompute_vertex_p (rdg, v))
686 {
687 *part_has_writes = true;
688 bitmap_clear_bit (remaining_stmts, v);
689 }
690 }
691
692 /* Flag in the bitmap PARTITION the vertex V and all its predecessors.
693 Also flag their loop number in LOOPS. */
694
695 static void
696 rdg_flag_vertex_and_dependent (struct graph *rdg, int v, bitmap partition,
697 bitmap loops, bitmap processed,
698 bool *part_has_writes)
699 {
700 unsigned i;
701 VEC (int, heap) *nodes = VEC_alloc (int, heap, 3);
702 int x;
703
704 bitmap_set_bit (processed, v);
705 rdg_flag_uses (rdg, v, partition, loops, processed, part_has_writes);
706 graphds_dfs (rdg, &v, 1, &nodes, false, remaining_stmts);
707 rdg_flag_vertex (rdg, v, partition, loops, part_has_writes);
708
709 for (i = 0; VEC_iterate (int, nodes, i, x); i++)
710 if (!already_processed_vertex_p (processed, x))
711 rdg_flag_vertex_and_dependent (rdg, x, partition, loops, processed,
712 part_has_writes);
713
714 VEC_free (int, heap, nodes);
715 }
716
717 /* Initialize CONDS with all the condition statements from the basic
718 blocks of LOOP. */
719
720 static void
721 collect_condition_stmts (struct loop *loop, VEC (gimple, heap) **conds)
722 {
723 unsigned i;
724 edge e;
725 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
726
727 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
728 {
729 gimple cond = last_stmt (e->src);
730
731 if (cond)
732 VEC_safe_push (gimple, heap, *conds, cond);
733 }
734
735 VEC_free (edge, heap, exits);
736 }
737
738 /* Add to PARTITION all the exit condition statements for LOOPS
739 together with all their dependent statements determined from
740 RDG. */
741
742 static void
743 rdg_flag_loop_exits (struct graph *rdg, bitmap loops, bitmap partition,
744 bitmap processed, bool *part_has_writes)
745 {
746 unsigned i;
747 bitmap_iterator bi;
748 VEC (gimple, heap) *conds = VEC_alloc (gimple, heap, 3);
749
750 EXECUTE_IF_SET_IN_BITMAP (loops, 0, i, bi)
751 collect_condition_stmts (get_loop (i), &conds);
752
753 while (!VEC_empty (gimple, conds))
754 {
755 gimple cond = VEC_pop (gimple, conds);
756 int v = rdg_vertex_for_stmt (rdg, cond);
757 bitmap new_loops = BITMAP_ALLOC (NULL);
758
759 if (!already_processed_vertex_p (processed, v))
760 rdg_flag_vertex_and_dependent (rdg, v, partition, new_loops, processed,
761 part_has_writes);
762
763 EXECUTE_IF_SET_IN_BITMAP (new_loops, 0, i, bi)
764 if (!bitmap_bit_p (loops, i))
765 {
766 bitmap_set_bit (loops, i);
767 collect_condition_stmts (get_loop (i), &conds);
768 }
769
770 BITMAP_FREE (new_loops);
771 }
772 }
773
774 /* Flag all the nodes of RDG containing memory accesses that could
775 potentially belong to arrays already accessed in the current
776 PARTITION. */
777
778 static void
779 rdg_flag_similar_memory_accesses (struct graph *rdg, bitmap partition,
780 bitmap loops, bitmap processed,
781 VEC (int, heap) **other_stores)
782 {
783 bool foo;
784 unsigned i, n;
785 int j, k, kk;
786 bitmap_iterator ii;
787 struct graph_edge *e;
788
789 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
790 if (RDG_MEM_WRITE_STMT (rdg, i)
791 || RDG_MEM_READS_STMT (rdg, i))
792 {
793 for (j = 0; j < rdg->n_vertices; j++)
794 if (!bitmap_bit_p (processed, j)
795 && (RDG_MEM_WRITE_STMT (rdg, j)
796 || RDG_MEM_READS_STMT (rdg, j))
797 && rdg_has_similar_memory_accesses (rdg, i, j))
798 {
799 /* Flag first the node J itself, and all the nodes that
800 are needed to compute J. */
801 rdg_flag_vertex_and_dependent (rdg, j, partition, loops,
802 processed, &foo);
803
804 /* When J is a read, we want to coalesce in the same
805 PARTITION all the nodes that are using J: this is
806 needed for better cache locality. */
807 rdg_flag_all_uses (rdg, j, partition, loops, processed, &foo);
808
809 /* Remove from OTHER_STORES the vertex that we flagged. */
810 if (RDG_MEM_WRITE_STMT (rdg, j))
811 for (k = 0; VEC_iterate (int, *other_stores, k, kk); k++)
812 if (kk == j)
813 {
814 VEC_unordered_remove (int, *other_stores, k);
815 break;
816 }
817 }
818
819 /* If the node I has two uses, then keep these together in the
820 same PARTITION. */
821 for (n = 0, e = rdg->vertices[i].succ; e; e = e->succ_next, n++);
822
823 if (n > 1)
824 rdg_flag_all_uses (rdg, i, partition, loops, processed, &foo);
825 }
826 }
827
828 /* Returns a bitmap in which all the statements needed for computing
829 the strongly connected component C of the RDG are flagged, also
830 including the loop exit conditions. */
831
832 static bitmap
833 build_rdg_partition_for_component (struct graph *rdg, rdgc c,
834 bool *part_has_writes,
835 VEC (int, heap) **other_stores)
836 {
837 int i, v;
838 bitmap partition = BITMAP_ALLOC (NULL);
839 bitmap loops = BITMAP_ALLOC (NULL);
840 bitmap processed = BITMAP_ALLOC (NULL);
841
842 for (i = 0; VEC_iterate (int, c->vertices, i, v); i++)
843 if (!already_processed_vertex_p (processed, v))
844 rdg_flag_vertex_and_dependent (rdg, v, partition, loops, processed,
845 part_has_writes);
846
847 /* Also iterate on the array of stores not in the starting vertices,
848 and determine those vertices that have some memory affinity with
849 the current nodes in the component: these are stores to the same
850 arrays, i.e. we're taking care of cache locality. */
851 rdg_flag_similar_memory_accesses (rdg, partition, loops, processed,
852 other_stores);
853
854 rdg_flag_loop_exits (rdg, loops, partition, processed, part_has_writes);
855
856 BITMAP_FREE (processed);
857 BITMAP_FREE (loops);
858 return partition;
859 }
860
861 /* Free memory for COMPONENTS. */
862
863 static void
864 free_rdg_components (VEC (rdgc, heap) *components)
865 {
866 int i;
867 rdgc x;
868
869 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
870 {
871 VEC_free (int, heap, x->vertices);
872 free (x);
873 }
874 }
875
876 /* Build the COMPONENTS vector with the strongly connected components
877 of RDG in which the STARTING_VERTICES occur. */
878
879 static void
880 rdg_build_components (struct graph *rdg, VEC (int, heap) *starting_vertices,
881 VEC (rdgc, heap) **components)
882 {
883 int i, v;
884 bitmap saved_components = BITMAP_ALLOC (NULL);
885 int n_components = graphds_scc (rdg, NULL);
886 VEC (int, heap) **all_components = XNEWVEC (VEC (int, heap) *, n_components);
887
888 for (i = 0; i < n_components; i++)
889 all_components[i] = VEC_alloc (int, heap, 3);
890
891 for (i = 0; i < rdg->n_vertices; i++)
892 VEC_safe_push (int, heap, all_components[rdg->vertices[i].component], i);
893
894 for (i = 0; VEC_iterate (int, starting_vertices, i, v); i++)
895 {
896 int c = rdg->vertices[v].component;
897
898 if (!bitmap_bit_p (saved_components, c))
899 {
900 rdgc x = XCNEW (struct rdg_component);
901 x->num = c;
902 x->vertices = all_components[c];
903
904 VEC_safe_push (rdgc, heap, *components, x);
905 bitmap_set_bit (saved_components, c);
906 }
907 }
908
909 for (i = 0; i < n_components; i++)
910 if (!bitmap_bit_p (saved_components, i))
911 VEC_free (int, heap, all_components[i]);
912
913 free (all_components);
914 BITMAP_FREE (saved_components);
915 }
916
917 /* Aggregate several components into a useful partition that is
918 registered in the PARTITIONS vector. Partitions will be
919 distributed in different loops. */
920
921 static void
922 rdg_build_partitions (struct graph *rdg, VEC (rdgc, heap) *components,
923 VEC (int, heap) **other_stores,
924 VEC (bitmap, heap) **partitions, bitmap processed)
925 {
926 int i;
927 rdgc x;
928 bitmap partition = BITMAP_ALLOC (NULL);
929
930 for (i = 0; VEC_iterate (rdgc, components, i, x); i++)
931 {
932 bitmap np;
933 bool part_has_writes = false;
934 int v = VEC_index (int, x->vertices, 0);
935
936 if (bitmap_bit_p (processed, v))
937 continue;
938
939 np = build_rdg_partition_for_component (rdg, x, &part_has_writes,
940 other_stores);
941 bitmap_ior_into (partition, np);
942 bitmap_ior_into (processed, np);
943 BITMAP_FREE (np);
944
945 if (part_has_writes)
946 {
947 if (dump_file && (dump_flags & TDF_DETAILS))
948 {
949 fprintf (dump_file, "ldist useful partition:\n");
950 dump_bitmap (dump_file, partition);
951 }
952
953 VEC_safe_push (bitmap, heap, *partitions, partition);
954 partition = BITMAP_ALLOC (NULL);
955 }
956 }
957
958 /* Add the nodes from the RDG that were not marked as processed, and
959 that are used outside the current loop. These are scalar
960 computations that are not yet part of previous partitions. */
961 for (i = 0; i < rdg->n_vertices; i++)
962 if (!bitmap_bit_p (processed, i)
963 && rdg_defs_used_in_other_loops_p (rdg, i))
964 VEC_safe_push (int, heap, *other_stores, i);
965
966 /* If there are still statements left in the OTHER_STORES array,
967 create other components and partitions with these stores and
968 their dependences. */
969 if (VEC_length (int, *other_stores) > 0)
970 {
971 VEC (rdgc, heap) *comps = VEC_alloc (rdgc, heap, 3);
972 VEC (int, heap) *foo = VEC_alloc (int, heap, 3);
973
974 rdg_build_components (rdg, *other_stores, &comps);
975 rdg_build_partitions (rdg, comps, &foo, partitions, processed);
976
977 VEC_free (int, heap, foo);
978 free_rdg_components (comps);
979 }
980
981 /* If there is something left in the last partition, save it. */
982 if (bitmap_count_bits (partition) > 0)
983 VEC_safe_push (bitmap, heap, *partitions, partition);
984 else
985 BITMAP_FREE (partition);
986 }
987
988 /* Dump to FILE the PARTITIONS. */
989
990 static void
991 dump_rdg_partitions (FILE *file, VEC (bitmap, heap) *partitions)
992 {
993 int i;
994 bitmap partition;
995
996 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
997 debug_bitmap_file (file, partition);
998 }
999
1000 /* Debug PARTITIONS. */
1001 extern void debug_rdg_partitions (VEC (bitmap, heap) *);
1002
1003 void
1004 debug_rdg_partitions (VEC (bitmap, heap) *partitions)
1005 {
1006 dump_rdg_partitions (stderr, partitions);
1007 }
1008
1009 /* Returns the number of read and write operations in the RDG. */
1010
1011 static int
1012 number_of_rw_in_rdg (struct graph *rdg)
1013 {
1014 int i, res = 0;
1015
1016 for (i = 0; i < rdg->n_vertices; i++)
1017 {
1018 if (RDG_MEM_WRITE_STMT (rdg, i))
1019 ++res;
1020
1021 if (RDG_MEM_READS_STMT (rdg, i))
1022 ++res;
1023 }
1024
1025 return res;
1026 }
1027
1028 /* Returns the number of read and write operations in a PARTITION of
1029 the RDG. */
1030
1031 static int
1032 number_of_rw_in_partition (struct graph *rdg, bitmap partition)
1033 {
1034 int res = 0;
1035 unsigned i;
1036 bitmap_iterator ii;
1037
1038 EXECUTE_IF_SET_IN_BITMAP (partition, 0, i, ii)
1039 {
1040 if (RDG_MEM_WRITE_STMT (rdg, i))
1041 ++res;
1042
1043 if (RDG_MEM_READS_STMT (rdg, i))
1044 ++res;
1045 }
1046
1047 return res;
1048 }
1049
1050 /* Returns true when one of the PARTITIONS contains all the read or
1051 write operations of RDG. */
1052
1053 static bool
1054 partition_contains_all_rw (struct graph *rdg, VEC (bitmap, heap) *partitions)
1055 {
1056 int i;
1057 bitmap partition;
1058 int nrw = number_of_rw_in_rdg (rdg);
1059
1060 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1061 if (nrw == number_of_rw_in_partition (rdg, partition))
1062 return true;
1063
1064 return false;
1065 }
1066
1067 /* Generate code from STARTING_VERTICES in RDG. Returns the number of
1068 distributed loops. */
1069
1070 static int
1071 ldist_gen (struct loop *loop, struct graph *rdg,
1072 VEC (int, heap) *starting_vertices)
1073 {
1074 int i, nbp;
1075 VEC (rdgc, heap) *components = VEC_alloc (rdgc, heap, 3);
1076 VEC (bitmap, heap) *partitions = VEC_alloc (bitmap, heap, 3);
1077 VEC (int, heap) *other_stores = VEC_alloc (int, heap, 3);
1078 bitmap partition, processed = BITMAP_ALLOC (NULL);
1079
1080 remaining_stmts = BITMAP_ALLOC (NULL);
1081 upstream_mem_writes = BITMAP_ALLOC (NULL);
1082
1083 for (i = 0; i < rdg->n_vertices; i++)
1084 {
1085 bitmap_set_bit (remaining_stmts, i);
1086
1087 /* Save in OTHER_STORES all the memory writes that are not in
1088 STARTING_VERTICES. */
1089 if (RDG_MEM_WRITE_STMT (rdg, i))
1090 {
1091 int v;
1092 unsigned j;
1093 bool found = false;
1094
1095 for (j = 0; VEC_iterate (int, starting_vertices, j, v); j++)
1096 if (i == v)
1097 {
1098 found = true;
1099 break;
1100 }
1101
1102 if (!found)
1103 VEC_safe_push (int, heap, other_stores, i);
1104 }
1105 }
1106
1107 mark_nodes_having_upstream_mem_writes (rdg);
1108 rdg_build_components (rdg, starting_vertices, &components);
1109 rdg_build_partitions (rdg, components, &other_stores, &partitions,
1110 processed);
1111 BITMAP_FREE (processed);
1112 nbp = VEC_length (bitmap, partitions);
1113
1114 if (nbp <= 1
1115 || partition_contains_all_rw (rdg, partitions))
1116 goto ldist_done;
1117
1118 if (dump_file && (dump_flags & TDF_DETAILS))
1119 dump_rdg_partitions (dump_file, partitions);
1120
1121 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1122 if (!generate_code_for_partition (loop, partition, i < nbp - 1))
1123 goto ldist_done;
1124
1125 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1126 update_ssa (TODO_update_ssa_only_virtuals | TODO_update_ssa);
1127
1128 ldist_done:
1129
1130 BITMAP_FREE (remaining_stmts);
1131 BITMAP_FREE (upstream_mem_writes);
1132
1133 for (i = 0; VEC_iterate (bitmap, partitions, i, partition); i++)
1134 BITMAP_FREE (partition);
1135
1136 VEC_free (int, heap, other_stores);
1137 VEC_free (bitmap, heap, partitions);
1138 free_rdg_components (components);
1139 return nbp;
1140 }
1141
1142 /* Distributes the code from LOOP in such a way that producer
1143 statements are placed before consumer statements. When STMTS is
1144 NULL, performs the maximal distribution, if STMTS is not NULL,
1145 tries to separate only these statements from the LOOP's body.
1146 Returns the number of distributed loops. */
1147
1148 static int
1149 distribute_loop (struct loop *loop, VEC (gimple, heap) *stmts)
1150 {
1151 bool res = false;
1152 struct graph *rdg;
1153 gimple s;
1154 unsigned i;
1155 VEC (int, heap) *vertices;
1156
1157 if (loop->num_nodes > 2)
1158 {
1159 if (dump_file && (dump_flags & TDF_DETAILS))
1160 fprintf (dump_file,
1161 "FIXME: Loop %d not distributed: it has more than two basic blocks.\n",
1162 loop->num);
1163
1164 return res;
1165 }
1166
1167 rdg = build_rdg (loop);
1168
1169 if (!rdg)
1170 {
1171 if (dump_file && (dump_flags & TDF_DETAILS))
1172 fprintf (dump_file,
1173 "FIXME: Loop %d not distributed: failed to build the RDG.\n",
1174 loop->num);
1175
1176 return res;
1177 }
1178
1179 vertices = VEC_alloc (int, heap, 3);
1180
1181 if (dump_file && (dump_flags & TDF_DETAILS))
1182 dump_rdg (dump_file, rdg);
1183
1184 for (i = 0; VEC_iterate (gimple, stmts, i, s); i++)
1185 {
1186 int v = rdg_vertex_for_stmt (rdg, s);
1187
1188 if (v >= 0)
1189 {
1190 VEC_safe_push (int, heap, vertices, v);
1191
1192 if (dump_file && (dump_flags & TDF_DETAILS))
1193 fprintf (dump_file,
1194 "ldist asked to generate code for vertex %d\n", v);
1195 }
1196 }
1197
1198 res = ldist_gen (loop, rdg, vertices);
1199 VEC_free (int, heap, vertices);
1200 free_rdg (rdg);
1201
1202 return res;
1203 }
1204
1205 /* Distribute all loops in the current function. */
1206
1207 static unsigned int
1208 tree_loop_distribution (void)
1209 {
1210 struct loop *loop;
1211 loop_iterator li;
1212 int nb_generated_loops = 0;
1213
1214 todo = 0;
1215
1216 FOR_EACH_LOOP (li, loop, 0)
1217 {
1218 VEC (gimple, heap) *work_list = VEC_alloc (gimple, heap, 3);
1219
1220 /* With the following working list, we're asking distribute_loop
1221 to separate the stores of the loop: when dependences allow,
1222 it will end on having one store per loop. */
1223 stores_from_loop (loop, &work_list);
1224
1225 /* A simple heuristic for cache locality is to not split stores
1226 to the same array. Without this call, an unrolled loop would
1227 be split into as many loops as unroll factor, each loop
1228 storing in the same array. */
1229 remove_similar_memory_refs (&work_list);
1230
1231 nb_generated_loops = distribute_loop (loop, work_list);
1232
1233 if (dump_file && (dump_flags & TDF_DETAILS))
1234 {
1235 if (nb_generated_loops > 1)
1236 fprintf (dump_file, "Loop %d distributed: split to %d loops.\n",
1237 loop->num, nb_generated_loops);
1238 else
1239 fprintf (dump_file, "Loop %d is the same.\n", loop->num);
1240 }
1241
1242 verify_loop_structure ();
1243
1244 VEC_free (gimple, heap, work_list);
1245 }
1246
1247 return todo;
1248 }
1249
1250 static bool
1251 gate_tree_loop_distribution (void)
1252 {
1253 return flag_tree_loop_distribution != 0;
1254 }
1255
1256 struct gimple_opt_pass pass_loop_distribution =
1257 {
1258 {
1259 GIMPLE_PASS,
1260 "ldist", /* name */
1261 gate_tree_loop_distribution, /* gate */
1262 tree_loop_distribution, /* execute */
1263 NULL, /* sub */
1264 NULL, /* next */
1265 0, /* static_pass_number */
1266 TV_TREE_LOOP_DISTRIBUTION, /* tv_id */
1267 PROP_cfg | PROP_ssa, /* properties_required */
1268 0, /* properties_provided */
1269 0, /* properties_destroyed */
1270 0, /* todo_flags_start */
1271 TODO_dump_func | TODO_verify_loops /* todo_flags_finish */
1272 }
1273 };