comparison gcc/tree-vect-patterns.c @ 0:a06113de4d67

first commit
author kent <kent@cr.ie.u-ryukyu.ac.jp>
date Fri, 17 Jul 2009 14:47:48 +0900
parents
children 77e2b8dfacca
comparison
equal deleted inserted replaced
-1:000000000000 0:a06113de4d67
1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27
28 #include "target.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "cfgloop.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "params.h"
38 #include "tree-data-ref.h"
39 #include "tree-vectorizer.h"
40 #include "recog.h"
41 #include "toplev.h"
42
43 /* Function prototypes */
44 static void vect_pattern_recog_1
45 (gimple (* ) (gimple, tree *, tree *), gimple_stmt_iterator);
46 static bool widened_name_p (tree, gimple, tree *, gimple *);
47
48 /* Pattern recognition functions */
49 static gimple vect_recog_widen_sum_pattern (gimple, tree *, tree *);
50 static gimple vect_recog_widen_mult_pattern (gimple, tree *, tree *);
51 static gimple vect_recog_dot_prod_pattern (gimple, tree *, tree *);
52 static gimple vect_recog_pow_pattern (gimple, tree *, tree *);
53 static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
54 vect_recog_widen_mult_pattern,
55 vect_recog_widen_sum_pattern,
56 vect_recog_dot_prod_pattern,
57 vect_recog_pow_pattern};
58
59
60 /* Function widened_name_p
61
62 Check whether NAME, an ssa-name used in USE_STMT,
63 is a result of a type-promotion, such that:
64 DEF_STMT: NAME = NOP (name0)
65 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
66 */
67
68 static bool
69 widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt)
70 {
71 tree dummy;
72 gimple dummy_gimple;
73 loop_vec_info loop_vinfo;
74 stmt_vec_info stmt_vinfo;
75 tree type = TREE_TYPE (name);
76 tree oprnd0;
77 enum vect_def_type dt;
78 tree def;
79
80 stmt_vinfo = vinfo_for_stmt (use_stmt);
81 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
82
83 if (!vect_is_simple_use (name, loop_vinfo, def_stmt, &def, &dt))
84 return false;
85
86 if (dt != vect_loop_def
87 && dt != vect_invariant_def && dt != vect_constant_def)
88 return false;
89
90 if (! *def_stmt)
91 return false;
92
93 if (!is_gimple_assign (*def_stmt))
94 return false;
95
96 if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR)
97 return false;
98
99 oprnd0 = gimple_assign_rhs1 (*def_stmt);
100
101 *half_type = TREE_TYPE (oprnd0);
102 if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type)
103 || (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type))
104 || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
105 return false;
106
107 if (!vect_is_simple_use (oprnd0, loop_vinfo, &dummy_gimple, &dummy, &dt))
108 return false;
109
110 return true;
111 }
112
113 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
114 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
115
116 static tree
117 vect_recog_temp_ssa_var (tree type, gimple stmt)
118 {
119 tree var = create_tmp_var (type, "patt");
120
121 add_referenced_var (var);
122 var = make_ssa_name (var, stmt);
123 return var;
124 }
125
126 /* Function vect_recog_dot_prod_pattern
127
128 Try to find the following pattern:
129
130 type x_t, y_t;
131 TYPE1 prod;
132 TYPE2 sum = init;
133 loop:
134 sum_0 = phi <init, sum_1>
135 S1 x_t = ...
136 S2 y_t = ...
137 S3 x_T = (TYPE1) x_t;
138 S4 y_T = (TYPE1) y_t;
139 S5 prod = x_T * y_T;
140 [S6 prod = (TYPE2) prod; #optional]
141 S7 sum_1 = prod + sum_0;
142
143 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
144 same size of 'TYPE1' or bigger. This is a special case of a reduction
145 computation.
146
147 Input:
148
149 * LAST_STMT: A stmt from which the pattern search begins. In the example,
150 when this function is called with S7, the pattern {S3,S4,S5,S6,S7} will be
151 detected.
152
153 Output:
154
155 * TYPE_IN: The type of the input arguments to the pattern.
156
157 * TYPE_OUT: The type of the output of this pattern.
158
159 * Return value: A new stmt that will be used to replace the sequence of
160 stmts that constitute the pattern. In this case it will be:
161 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
162
163 Note: The dot-prod idiom is a widening reduction pattern that is
164 vectorized without preserving all the intermediate results. It
165 produces only N/2 (widened) results (by summing up pairs of
166 intermediate results) rather than all N results. Therefore, we
167 cannot allow this pattern when we want to get all the results and in
168 the correct order (as is the case when this computation is in an
169 inner-loop nested in an outer-loop that us being vectorized). */
170
171 static gimple
172 vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
173 {
174 gimple stmt;
175 tree oprnd0, oprnd1;
176 tree oprnd00, oprnd01;
177 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
178 tree type, half_type;
179 gimple pattern_stmt;
180 tree prod_type;
181 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
182 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
183 tree var, rhs;
184
185 if (!is_gimple_assign (last_stmt))
186 return NULL;
187
188 type = gimple_expr_type (last_stmt);
189
190 /* Look for the following pattern
191 DX = (TYPE1) X;
192 DY = (TYPE1) Y;
193 DPROD = DX * DY;
194 DDPROD = (TYPE2) DPROD;
195 sum_1 = DDPROD + sum_0;
196 In which
197 - DX is double the size of X
198 - DY is double the size of Y
199 - DX, DY, DPROD all have the same type
200 - sum is the same size of DPROD or bigger
201 - sum has been recognized as a reduction variable.
202
203 This is equivalent to:
204 DPROD = X w* Y; #widen mult
205 sum_1 = DPROD w+ sum_0; #widen summation
206 or
207 DPROD = X w* Y; #widen mult
208 sum_1 = DPROD + sum_0; #summation
209 */
210
211 /* Starting from LAST_STMT, follow the defs of its uses in search
212 of the above pattern. */
213
214 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
215 return NULL;
216
217 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
218 {
219 /* Has been detected as widening-summation? */
220
221 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
222 type = gimple_expr_type (stmt);
223 if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
224 return NULL;
225 oprnd0 = gimple_assign_rhs1 (stmt);
226 oprnd1 = gimple_assign_rhs2 (stmt);
227 half_type = TREE_TYPE (oprnd0);
228 }
229 else
230 {
231 gimple def_stmt;
232
233 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
234 return NULL;
235 oprnd0 = gimple_assign_rhs1 (last_stmt);
236 oprnd1 = gimple_assign_rhs2 (last_stmt);
237 if (TYPE_MAIN_VARIANT (TREE_TYPE (oprnd0)) != TYPE_MAIN_VARIANT (type)
238 || TYPE_MAIN_VARIANT (TREE_TYPE (oprnd1)) != TYPE_MAIN_VARIANT (type))
239 return NULL;
240 stmt = last_stmt;
241
242 if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt))
243 {
244 stmt = def_stmt;
245 oprnd0 = gimple_assign_rhs1 (stmt);
246 }
247 else
248 half_type = type;
249 }
250
251 /* So far so good. Since last_stmt was detected as a (summation) reduction,
252 we know that oprnd1 is the reduction variable (defined by a loop-header
253 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
254 Left to check that oprnd0 is defined by a (widen_)mult_expr */
255
256 prod_type = half_type;
257 stmt = SSA_NAME_DEF_STMT (oprnd0);
258 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
259 inside the loop (in case we are analyzing an outer-loop). */
260 if (!is_gimple_assign (stmt))
261 return NULL;
262 stmt_vinfo = vinfo_for_stmt (stmt);
263 gcc_assert (stmt_vinfo);
264 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_loop_def)
265 return NULL;
266 if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
267 return NULL;
268 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
269 {
270 /* Has been detected as a widening multiplication? */
271
272 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
273 if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
274 return NULL;
275 stmt_vinfo = vinfo_for_stmt (stmt);
276 gcc_assert (stmt_vinfo);
277 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_loop_def);
278 oprnd00 = gimple_assign_rhs1 (stmt);
279 oprnd01 = gimple_assign_rhs2 (stmt);
280 }
281 else
282 {
283 tree half_type0, half_type1;
284 gimple def_stmt;
285 tree oprnd0, oprnd1;
286
287 oprnd0 = gimple_assign_rhs1 (stmt);
288 oprnd1 = gimple_assign_rhs2 (stmt);
289 if (TYPE_MAIN_VARIANT (TREE_TYPE (oprnd0))
290 != TYPE_MAIN_VARIANT (prod_type)
291 || TYPE_MAIN_VARIANT (TREE_TYPE (oprnd1))
292 != TYPE_MAIN_VARIANT (prod_type))
293 return NULL;
294 if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt))
295 return NULL;
296 oprnd00 = gimple_assign_rhs1 (def_stmt);
297 if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt))
298 return NULL;
299 oprnd01 = gimple_assign_rhs1 (def_stmt);
300 if (TYPE_MAIN_VARIANT (half_type0) != TYPE_MAIN_VARIANT (half_type1))
301 return NULL;
302 if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
303 return NULL;
304 }
305
306 half_type = TREE_TYPE (oprnd00);
307 *type_in = half_type;
308 *type_out = type;
309
310 /* Pattern detected. Create a stmt to be used to replace the pattern: */
311 var = vect_recog_temp_ssa_var (type, NULL);
312 rhs = build3 (DOT_PROD_EXPR, type, oprnd00, oprnd01, oprnd1),
313 pattern_stmt = gimple_build_assign (var, rhs);
314
315 if (vect_print_dump_info (REPORT_DETAILS))
316 {
317 fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
318 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
319 }
320
321 /* We don't allow changing the order of the computation in the inner-loop
322 when doing outer-loop vectorization. */
323 if (nested_in_vect_loop_p (loop, last_stmt))
324 {
325 if (vect_print_dump_info (REPORT_DETAILS))
326 fprintf (vect_dump, "vect_recog_dot_prod_pattern: not allowed.");
327 return NULL;
328 }
329
330 return pattern_stmt;
331 }
332
333 /* Function vect_recog_widen_mult_pattern
334
335 Try to find the following pattern:
336
337 type a_t, b_t;
338 TYPE a_T, b_T, prod_T;
339
340 S1 a_t = ;
341 S2 b_t = ;
342 S3 a_T = (TYPE) a_t;
343 S4 b_T = (TYPE) b_t;
344 S5 prod_T = a_T * b_T;
345
346 where type 'TYPE' is at least double the size of type 'type'.
347
348 Input:
349
350 * LAST_STMT: A stmt from which the pattern search begins. In the example,
351 when this function is called with S5, the pattern {S3,S4,S5} is be detected.
352
353 Output:
354
355 * TYPE_IN: The type of the input arguments to the pattern.
356
357 * TYPE_OUT: The type of the output of this pattern.
358
359 * Return value: A new stmt that will be used to replace the sequence of
360 stmts that constitute the pattern. In this case it will be:
361 WIDEN_MULT <a_t, b_t>
362 */
363
364 static gimple
365 vect_recog_widen_mult_pattern (gimple last_stmt,
366 tree *type_in,
367 tree *type_out)
368 {
369 gimple def_stmt0, def_stmt1;
370 tree oprnd0, oprnd1;
371 tree type, half_type0, half_type1;
372 gimple pattern_stmt;
373 tree vectype;
374 tree dummy;
375 tree var;
376 enum tree_code dummy_code;
377 int dummy_int;
378 VEC (tree, heap) *dummy_vec;
379
380 if (!is_gimple_assign (last_stmt))
381 return NULL;
382
383 type = gimple_expr_type (last_stmt);
384
385 /* Starting from LAST_STMT, follow the defs of its uses in search
386 of the above pattern. */
387
388 if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
389 return NULL;
390
391 oprnd0 = gimple_assign_rhs1 (last_stmt);
392 oprnd1 = gimple_assign_rhs2 (last_stmt);
393 if (TYPE_MAIN_VARIANT (TREE_TYPE (oprnd0)) != TYPE_MAIN_VARIANT (type)
394 || TYPE_MAIN_VARIANT (TREE_TYPE (oprnd1)) != TYPE_MAIN_VARIANT (type))
395 return NULL;
396
397 /* Check argument 0 */
398 if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0))
399 return NULL;
400 oprnd0 = gimple_assign_rhs1 (def_stmt0);
401
402 /* Check argument 1 */
403 if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1))
404 return NULL;
405 oprnd1 = gimple_assign_rhs1 (def_stmt1);
406
407 if (TYPE_MAIN_VARIANT (half_type0) != TYPE_MAIN_VARIANT (half_type1))
408 return NULL;
409
410 /* Pattern detected. */
411 if (vect_print_dump_info (REPORT_DETAILS))
412 fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");
413
414 /* Check target support */
415 vectype = get_vectype_for_scalar_type (half_type0);
416 if (!vectype
417 || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt, vectype,
418 &dummy, &dummy, &dummy_code,
419 &dummy_code, &dummy_int, &dummy_vec))
420 return NULL;
421
422 *type_in = vectype;
423 *type_out = NULL_TREE;
424
425 /* Pattern supported. Create a stmt to be used to replace the pattern: */
426 var = vect_recog_temp_ssa_var (type, NULL);
427 pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
428 oprnd1);
429 SSA_NAME_DEF_STMT (var) = pattern_stmt;
430
431 if (vect_print_dump_info (REPORT_DETAILS))
432 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
433
434 return pattern_stmt;
435 }
436
437
438 /* Function vect_recog_pow_pattern
439
440 Try to find the following pattern:
441
442 x = POW (y, N);
443
444 with POW being one of pow, powf, powi, powif and N being
445 either 2 or 0.5.
446
447 Input:
448
449 * LAST_STMT: A stmt from which the pattern search begins.
450
451 Output:
452
453 * TYPE_IN: The type of the input arguments to the pattern.
454
455 * TYPE_OUT: The type of the output of this pattern.
456
457 * Return value: A new stmt that will be used to replace the sequence of
458 stmts that constitute the pattern. In this case it will be:
459 x = x * x
460 or
461 x = sqrt (x)
462 */
463
464 static gimple
465 vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
466 {
467 tree type;
468 tree fn, base, exp = NULL;
469 gimple stmt;
470 tree var;
471
472 if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
473 return NULL;
474
475 type = gimple_expr_type (last_stmt);
476
477 fn = gimple_call_fndecl (last_stmt);
478 switch (DECL_FUNCTION_CODE (fn))
479 {
480 case BUILT_IN_POWIF:
481 case BUILT_IN_POWI:
482 case BUILT_IN_POWF:
483 case BUILT_IN_POW:
484 base = gimple_call_arg (last_stmt, 0);
485 exp = gimple_call_arg (last_stmt, 1);
486 if (TREE_CODE (exp) != REAL_CST
487 && TREE_CODE (exp) != INTEGER_CST)
488 return NULL;
489 break;
490
491 default:
492 return NULL;
493 }
494
495 /* We now have a pow or powi builtin function call with a constant
496 exponent. */
497
498 *type_out = NULL_TREE;
499
500 /* Catch squaring. */
501 if ((host_integerp (exp, 0)
502 && tree_low_cst (exp, 0) == 2)
503 || (TREE_CODE (exp) == REAL_CST
504 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
505 {
506 *type_in = TREE_TYPE (base);
507
508 var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
509 stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
510 SSA_NAME_DEF_STMT (var) = stmt;
511 return stmt;
512 }
513
514 /* Catch square root. */
515 if (TREE_CODE (exp) == REAL_CST
516 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
517 {
518 tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
519 *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
520 if (*type_in)
521 {
522 gimple stmt = gimple_build_call (newfn, 1, base);
523 if (vectorizable_function (stmt, *type_in, *type_in)
524 != NULL_TREE)
525 {
526 var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
527 gimple_call_set_lhs (stmt, var);
528 return stmt;
529 }
530 }
531 }
532
533 return NULL;
534 }
535
536
537 /* Function vect_recog_widen_sum_pattern
538
539 Try to find the following pattern:
540
541 type x_t;
542 TYPE x_T, sum = init;
543 loop:
544 sum_0 = phi <init, sum_1>
545 S1 x_t = *p;
546 S2 x_T = (TYPE) x_t;
547 S3 sum_1 = x_T + sum_0;
548
549 where type 'TYPE' is at least double the size of type 'type', i.e - we're
550 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
551 a special case of a reduction computation.
552
553 Input:
554
555 * LAST_STMT: A stmt from which the pattern search begins. In the example,
556 when this function is called with S3, the pattern {S2,S3} will be detected.
557
558 Output:
559
560 * TYPE_IN: The type of the input arguments to the pattern.
561
562 * TYPE_OUT: The type of the output of this pattern.
563
564 * Return value: A new stmt that will be used to replace the sequence of
565 stmts that constitute the pattern. In this case it will be:
566 WIDEN_SUM <x_t, sum_0>
567
568 Note: The widening-sum idiom is a widening reduction pattern that is
569 vectorized without preserving all the intermediate results. It
570 produces only N/2 (widened) results (by summing up pairs of
571 intermediate results) rather than all N results. Therefore, we
572 cannot allow this pattern when we want to get all the results and in
573 the correct order (as is the case when this computation is in an
574 inner-loop nested in an outer-loop that us being vectorized). */
575
576 static gimple
577 vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
578 {
579 gimple stmt;
580 tree oprnd0, oprnd1;
581 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
582 tree type, half_type;
583 gimple pattern_stmt;
584 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
585 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
586 tree var;
587
588 if (!is_gimple_assign (last_stmt))
589 return NULL;
590
591 type = gimple_expr_type (last_stmt);
592
593 /* Look for the following pattern
594 DX = (TYPE) X;
595 sum_1 = DX + sum_0;
596 In which DX is at least double the size of X, and sum_1 has been
597 recognized as a reduction variable.
598 */
599
600 /* Starting from LAST_STMT, follow the defs of its uses in search
601 of the above pattern. */
602
603 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
604 return NULL;
605
606 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
607 return NULL;
608
609 oprnd0 = gimple_assign_rhs1 (last_stmt);
610 oprnd1 = gimple_assign_rhs2 (last_stmt);
611 if (TYPE_MAIN_VARIANT (TREE_TYPE (oprnd0)) != TYPE_MAIN_VARIANT (type)
612 || TYPE_MAIN_VARIANT (TREE_TYPE (oprnd1)) != TYPE_MAIN_VARIANT (type))
613 return NULL;
614
615 /* So far so good. Since last_stmt was detected as a (summation) reduction,
616 we know that oprnd1 is the reduction variable (defined by a loop-header
617 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
618 Left to check that oprnd0 is defined by a cast from type 'type' to type
619 'TYPE'. */
620
621 if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt))
622 return NULL;
623
624 oprnd0 = gimple_assign_rhs1 (stmt);
625 *type_in = half_type;
626 *type_out = type;
627
628 /* Pattern detected. Create a stmt to be used to replace the pattern: */
629 var = vect_recog_temp_ssa_var (type, NULL);
630 pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
631 oprnd0, oprnd1);
632 SSA_NAME_DEF_STMT (var) = pattern_stmt;
633
634 if (vect_print_dump_info (REPORT_DETAILS))
635 {
636 fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
637 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
638 }
639
640 /* We don't allow changing the order of the computation in the inner-loop
641 when doing outer-loop vectorization. */
642 if (nested_in_vect_loop_p (loop, last_stmt))
643 {
644 if (vect_print_dump_info (REPORT_DETAILS))
645 fprintf (vect_dump, "vect_recog_widen_sum_pattern: not allowed.");
646 return NULL;
647 }
648
649 return pattern_stmt;
650 }
651
652
653 /* Function vect_pattern_recog_1
654
655 Input:
656 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
657 computation pattern.
658 STMT: A stmt from which the pattern search should start.
659
660 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
661 expression that computes the same functionality and can be used to
662 replace the sequence of stmts that are involved in the pattern.
663
664 Output:
665 This function checks if the expression returned by PATTERN_RECOG_FUNC is
666 supported in vector form by the target. We use 'TYPE_IN' to obtain the
667 relevant vector type. If 'TYPE_IN' is already a vector type, then this
668 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
669 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
670 to the available target pattern.
671
672 This function also does some bookkeeping, as explained in the documentation
673 for vect_recog_pattern. */
674
675 static void
676 vect_pattern_recog_1 (
677 gimple (* vect_recog_func) (gimple, tree *, tree *),
678 gimple_stmt_iterator si)
679 {
680 gimple stmt = gsi_stmt (si), pattern_stmt;
681 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
682 stmt_vec_info pattern_stmt_info;
683 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
684 tree pattern_vectype;
685 tree type_in, type_out;
686 enum tree_code code;
687
688 pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out);
689 if (!pattern_stmt)
690 return;
691
692 if (VECTOR_MODE_P (TYPE_MODE (type_in)))
693 {
694 /* No need to check target support (already checked by the pattern
695 recognition function). */
696 pattern_vectype = type_in;
697 }
698 else
699 {
700 enum tree_code vec_mode;
701 enum insn_code icode;
702 optab optab;
703
704 /* Check target support */
705 pattern_vectype = get_vectype_for_scalar_type (type_in);
706 if (!pattern_vectype)
707 return;
708
709 if (is_gimple_assign (pattern_stmt))
710 code = gimple_assign_rhs_code (pattern_stmt);
711 else
712 {
713 gcc_assert (is_gimple_call (pattern_stmt));
714 code = CALL_EXPR;
715 }
716
717 optab = optab_for_tree_code (code, pattern_vectype, optab_default);
718 vec_mode = TYPE_MODE (pattern_vectype);
719 if (!optab
720 || (icode = optab_handler (optab, vec_mode)->insn_code) ==
721 CODE_FOR_nothing
722 || (type_out
723 && (!get_vectype_for_scalar_type (type_out)
724 || (insn_data[icode].operand[0].mode !=
725 TYPE_MODE (get_vectype_for_scalar_type (type_out))))))
726 return;
727 }
728
729 /* Found a vectorizable pattern. */
730 if (vect_print_dump_info (REPORT_DETAILS))
731 {
732 fprintf (vect_dump, "pattern recognized: ");
733 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
734 }
735
736 /* Mark the stmts that are involved in the pattern. */
737 gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT);
738 set_vinfo_for_stmt (pattern_stmt,
739 new_stmt_vec_info (pattern_stmt, loop_vinfo));
740 pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
741
742 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
743 STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
744 STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
745 STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
746 STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt;
747
748 return;
749 }
750
751
752 /* Function vect_pattern_recog
753
754 Input:
755 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
756 computation idioms.
757
758 Output - for each computation idiom that is detected we insert a new stmt
759 that provides the same functionality and that can be vectorized. We
760 also record some information in the struct_stmt_info of the relevant
761 stmts, as explained below:
762
763 At the entry to this function we have the following stmts, with the
764 following initial value in the STMT_VINFO fields:
765
766 stmt in_pattern_p related_stmt vec_stmt
767 S1: a_i = .... - - -
768 S2: a_2 = ..use(a_i).. - - -
769 S3: a_1 = ..use(a_2).. - - -
770 S4: a_0 = ..use(a_1).. - - -
771 S5: ... = ..use(a_0).. - - -
772
773 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
774 represented by a single stmt. We then:
775 - create a new stmt S6 that will replace the pattern.
776 - insert the new stmt S6 before the last stmt in the pattern
777 - fill in the STMT_VINFO fields as follows:
778
779 in_pattern_p related_stmt vec_stmt
780 S1: a_i = .... - - -
781 S2: a_2 = ..use(a_i).. - - -
782 S3: a_1 = ..use(a_2).. - - -
783 > S6: a_new = .... - S4 -
784 S4: a_0 = ..use(a_1).. true S6 -
785 S5: ... = ..use(a_0).. - - -
786
787 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
788 to each other through the RELATED_STMT field).
789
790 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
791 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
792 remain irrelevant unless used by stmts other than S4.
793
794 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
795 (because they are marked as irrelevant). It will vectorize S6, and record
796 a pointer to the new vector stmt VS6 both from S6 (as usual), and also
797 from S4. We do that so that when we get to vectorizing stmts that use the
798 def of S4 (like S5 that uses a_0), we'll know where to take the relevant
799 vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
800
801 in_pattern_p related_stmt vec_stmt
802 S1: a_i = .... - - -
803 S2: a_2 = ..use(a_i).. - - -
804 S3: a_1 = ..use(a_2).. - - -
805 > VS6: va_new = .... - - -
806 S6: a_new = .... - S4 VS6
807 S4: a_0 = ..use(a_1).. true S6 VS6
808 > VS5: ... = ..vuse(va_new).. - - -
809 S5: ... = ..use(a_0).. - - -
810
811 DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
812 elsewhere), and we'll end up with:
813
814 VS6: va_new = ....
815 VS5: ... = ..vuse(va_new)..
816
817 If vectorization does not succeed, DCE will clean S6 away (its def is
818 not used), and we'll end up with the original sequence.
819 */
820
821 void
822 vect_pattern_recog (loop_vec_info loop_vinfo)
823 {
824 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
825 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
826 unsigned int nbbs = loop->num_nodes;
827 gimple_stmt_iterator si;
828 gimple stmt;
829 unsigned int i, j;
830 gimple (* vect_recog_func_ptr) (gimple, tree *, tree *);
831
832 if (vect_print_dump_info (REPORT_DETAILS))
833 fprintf (vect_dump, "=== vect_pattern_recog ===");
834
835 /* Scan through the loop stmts, applying the pattern recognition
836 functions starting at each stmt visited: */
837 for (i = 0; i < nbbs; i++)
838 {
839 basic_block bb = bbs[i];
840 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
841 {
842 stmt = gsi_stmt (si);
843
844 /* Scan over all generic vect_recog_xxx_pattern functions. */
845 for (j = 0; j < NUM_PATTERNS; j++)
846 {
847 vect_recog_func_ptr = vect_vect_recog_func_ptrs[j];
848 vect_pattern_recog_1 (vect_recog_func_ptr, si);
849 }
850 }
851 }
852 }