145
|
1 /* Classes for saving, deduplicating, and emitting analyzer diagnostics.
|
|
2 Copyright (C) 2019-2020 Free Software Foundation, Inc.
|
|
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
|
|
4
|
|
5 This file is part of GCC.
|
|
6
|
|
7 GCC is free software; you can redistribute it and/or modify it
|
|
8 under the terms of the GNU General Public License as published by
|
|
9 the Free Software Foundation; either version 3, or (at your option)
|
|
10 any later version.
|
|
11
|
|
12 GCC is distributed in the hope that it will be useful, but
|
|
13 WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
15 General Public License for more details.
|
|
16
|
|
17 You should have received a copy of the GNU General Public License
|
|
18 along with GCC; see the file COPYING3. If not see
|
|
19 <http://www.gnu.org/licenses/>. */
|
|
20
|
|
21 #include "config.h"
|
|
22 #include "system.h"
|
|
23 #include "coretypes.h"
|
|
24 #include "tree.h"
|
|
25 #include "pretty-print.h"
|
|
26 #include "gcc-rich-location.h"
|
|
27 #include "gimple-pretty-print.h"
|
|
28 #include "function.h"
|
|
29 #include "diagnostic-core.h"
|
|
30 #include "diagnostic-event-id.h"
|
|
31 #include "diagnostic-path.h"
|
|
32 #include "alloc-pool.h"
|
|
33 #include "fibonacci_heap.h"
|
|
34 #include "shortest-paths.h"
|
|
35 #include "sbitmap.h"
|
|
36 #include "tristate.h"
|
|
37 #include "selftest.h"
|
|
38 #include "ordered-hash-map.h"
|
|
39 #include "analyzer/analyzer.h"
|
|
40 #include "analyzer/analyzer-logging.h"
|
|
41 #include "analyzer/sm.h"
|
|
42 #include "analyzer/pending-diagnostic.h"
|
|
43 #include "analyzer/diagnostic-manager.h"
|
|
44 #include "analyzer/region-model.h"
|
|
45 #include "analyzer/constraint-manager.h"
|
|
46 #include "cfg.h"
|
|
47 #include "basic-block.h"
|
|
48 #include "gimple.h"
|
|
49 #include "gimple-iterator.h"
|
|
50 #include "cgraph.h"
|
|
51 #include "digraph.h"
|
|
52 #include "analyzer/supergraph.h"
|
|
53 #include "analyzer/call-string.h"
|
|
54 #include "analyzer/program-point.h"
|
|
55 #include "analyzer/program-state.h"
|
|
56 #include "analyzer/exploded-graph.h"
|
|
57 #include "analyzer/checker-path.h"
|
|
58
|
|
59 #if ENABLE_ANALYZER
|
|
60
|
|
61 namespace ana {
|
|
62
|
|
63 /* class saved_diagnostic. */
|
|
64
|
|
65 /* saved_diagnostic's ctor.
|
|
66 Take ownership of D and STMT_FINDER. */
|
|
67
|
|
68 saved_diagnostic::saved_diagnostic (const state_machine *sm,
|
|
69 const exploded_node *enode,
|
|
70 const supernode *snode, const gimple *stmt,
|
|
71 stmt_finder *stmt_finder,
|
|
72 tree var, state_machine::state_t state,
|
|
73 pending_diagnostic *d)
|
|
74 : m_sm (sm), m_enode (enode), m_snode (snode), m_stmt (stmt),
|
|
75 /* stmt_finder could be on-stack; we want our own copy that can
|
|
76 outlive that. */
|
|
77 m_stmt_finder (stmt_finder ? stmt_finder->clone () : NULL),
|
|
78 m_var (var), m_state (state),
|
|
79 m_d (d), m_trailing_eedge (NULL)
|
|
80 {
|
|
81 gcc_assert (m_stmt || m_stmt_finder);
|
|
82
|
|
83 /* We must have an enode in order to be able to look for paths
|
|
84 through the exploded_graph to this diagnostic. */
|
|
85 gcc_assert (m_enode);
|
|
86 }
|
|
87
|
|
88 /* saved_diagnostic's dtor. */
|
|
89
|
|
90 saved_diagnostic::~saved_diagnostic ()
|
|
91 {
|
|
92 delete m_stmt_finder;
|
|
93 delete m_d;
|
|
94 }
|
|
95
|
|
96 bool
|
|
97 saved_diagnostic::operator== (const saved_diagnostic &other) const
|
|
98 {
|
|
99 return (m_sm == other.m_sm
|
|
100 /* We don't compare m_enode. */
|
|
101 && m_snode == other.m_snode
|
|
102 && m_stmt == other.m_stmt
|
|
103 /* We don't compare m_stmt_finder. */
|
|
104 && pending_diagnostic::same_tree_p (m_var, other.m_var)
|
|
105 && m_state == other.m_state
|
|
106 && m_d->equal_p (*other.m_d)
|
|
107 && m_trailing_eedge == other.m_trailing_eedge);
|
|
108 }
|
|
109
|
|
110 /* class diagnostic_manager. */
|
|
111
|
|
112 /* diagnostic_manager's ctor. */
|
|
113
|
|
114 diagnostic_manager::diagnostic_manager (logger *logger, int verbosity)
|
|
115 : log_user (logger), m_verbosity (verbosity)
|
|
116 {
|
|
117 }
|
|
118
|
|
119 /* Queue pending_diagnostic D at ENODE for later emission. */
|
|
120
|
|
121 void
|
|
122 diagnostic_manager::add_diagnostic (const state_machine *sm,
|
|
123 const exploded_node *enode,
|
|
124 const supernode *snode, const gimple *stmt,
|
|
125 stmt_finder *finder,
|
|
126 tree var, state_machine::state_t state,
|
|
127 pending_diagnostic *d)
|
|
128 {
|
|
129 LOG_FUNC (get_logger ());
|
|
130
|
|
131 /* We must have an enode in order to be able to look for paths
|
|
132 through the exploded_graph to the diagnostic. */
|
|
133 gcc_assert (enode);
|
|
134
|
|
135 saved_diagnostic *sd
|
|
136 = new saved_diagnostic (sm, enode, snode, stmt, finder, var, state, d);
|
|
137 m_saved_diagnostics.safe_push (sd);
|
|
138 if (get_logger ())
|
|
139 log ("adding saved diagnostic %i at SN %i: %qs",
|
|
140 m_saved_diagnostics.length () - 1,
|
|
141 snode->m_index, d->get_kind ());
|
|
142 }
|
|
143
|
|
144 /* Queue pending_diagnostic D at ENODE for later emission. */
|
|
145
|
|
146 void
|
|
147 diagnostic_manager::add_diagnostic (const exploded_node *enode,
|
|
148 const supernode *snode, const gimple *stmt,
|
|
149 stmt_finder *finder,
|
|
150 pending_diagnostic *d)
|
|
151 {
|
|
152 gcc_assert (enode);
|
|
153 add_diagnostic (NULL, enode, snode, stmt, finder, NULL_TREE, 0, d);
|
|
154 }
|
|
155
|
|
156 /* A class for identifying sets of duplicated pending_diagnostic.
|
|
157
|
|
158 We want to find the simplest dedupe_candidate amongst those that share a
|
|
159 dedupe_key. */
|
|
160
|
|
161 class dedupe_key
|
|
162 {
|
|
163 public:
|
|
164 dedupe_key (const saved_diagnostic &sd,
|
|
165 const exploded_path &epath)
|
|
166 : m_sd (sd), m_stmt (sd.m_stmt)
|
|
167 {
|
|
168 /* Support deferring the choice of stmt until after an emission path has
|
|
169 been built, using an optional stmt_finder. */
|
|
170 if (m_stmt == NULL)
|
|
171 {
|
|
172 gcc_assert (sd.m_stmt_finder);
|
|
173 m_stmt = sd.m_stmt_finder->find_stmt (epath);
|
|
174 }
|
|
175 gcc_assert (m_stmt);
|
|
176 }
|
|
177
|
|
178 hashval_t hash () const
|
|
179 {
|
|
180 inchash::hash hstate;
|
|
181 hstate.add_ptr (m_stmt);
|
|
182 // TODO: m_sd
|
|
183 return hstate.end ();
|
|
184 }
|
|
185 bool operator== (const dedupe_key &other) const
|
|
186 {
|
|
187 return (m_sd == other.m_sd
|
|
188 && m_stmt == other.m_stmt);
|
|
189 }
|
|
190
|
|
191 location_t get_location () const
|
|
192 {
|
|
193 return m_stmt->location;
|
|
194 }
|
|
195
|
|
196 /* A qsort comparator for use by dedupe_winners::emit_best
|
|
197 to sort them into location_t order. */
|
|
198
|
|
199 static int
|
|
200 comparator (const void *p1, const void *p2)
|
|
201 {
|
|
202 const dedupe_key *pk1 = *(const dedupe_key * const *)p1;
|
|
203 const dedupe_key *pk2 = *(const dedupe_key * const *)p2;
|
|
204
|
|
205 location_t loc1 = pk1->get_location ();
|
|
206 location_t loc2 = pk2->get_location ();
|
|
207
|
|
208 return linemap_compare_locations (line_table, loc2, loc1);
|
|
209 }
|
|
210
|
|
211 const saved_diagnostic &m_sd;
|
|
212 const gimple *m_stmt;
|
|
213 };
|
|
214
|
|
215 /* The value of a slot for a dedupe_key within dedupe_winners:
|
|
216 the exploded_path for the best candidate for that key, and the
|
|
217 number of duplicates seen so far. */
|
|
218
|
|
219 class dedupe_candidate
|
|
220 {
|
|
221 public:
|
|
222 // has the exploded_path
|
|
223 dedupe_candidate (const shortest_exploded_paths &sp,
|
|
224 const saved_diagnostic &sd)
|
|
225 : m_epath (sp.get_shortest_path (sd.m_enode)),
|
|
226 m_num_dupes (0)
|
|
227 {
|
|
228 }
|
|
229
|
|
230 unsigned length () const { return m_epath.length (); }
|
|
231 const exploded_path &get_path () const { return m_epath; }
|
|
232
|
|
233 void add_duplicate () { m_num_dupes++; }
|
|
234 int get_num_dupes () const { return m_num_dupes; }
|
|
235
|
|
236 private:
|
|
237 exploded_path m_epath;
|
|
238 public:
|
|
239 int m_num_dupes;
|
|
240 };
|
|
241
|
|
242 /* Traits for use by dedupe_winners. */
|
|
243
|
|
244 class dedupe_hash_map_traits
|
|
245 {
|
|
246 public:
|
|
247 typedef const dedupe_key *key_type;
|
|
248 typedef dedupe_candidate *value_type;
|
|
249 typedef dedupe_candidate *compare_type;
|
|
250
|
|
251 static inline hashval_t hash (const key_type &v)
|
|
252 {
|
|
253 return v->hash ();
|
|
254 }
|
|
255 static inline bool equal_keys (const key_type &k1, const key_type &k2)
|
|
256 {
|
|
257 return *k1 == *k2;
|
|
258 }
|
|
259 template <typename T>
|
|
260 static inline void remove (T &)
|
|
261 {
|
|
262 // TODO
|
|
263 }
|
|
264 template <typename T>
|
|
265 static inline void mark_deleted (T &entry)
|
|
266 {
|
|
267 entry.m_key = reinterpret_cast<key_type> (1);
|
|
268 }
|
|
269 template <typename T>
|
|
270 static inline void mark_empty (T &entry)
|
|
271 {
|
|
272 entry.m_key = NULL;
|
|
273 }
|
|
274 template <typename T>
|
|
275 static inline bool is_deleted (const T &entry)
|
|
276 {
|
|
277 return entry.m_key == reinterpret_cast<key_type> (1);
|
|
278 }
|
|
279 template <typename T>
|
|
280 static inline bool is_empty (const T &entry)
|
|
281 {
|
|
282 return entry.m_key == NULL;
|
|
283 }
|
|
284 static const bool empty_zero_p = true;
|
|
285 };
|
|
286
|
|
287 /* A class for deduplicating diagnostics and finding (and emitting) the
|
|
288 best diagnostic within each partition. */
|
|
289
|
|
290 class dedupe_winners
|
|
291 {
|
|
292 public:
|
|
293 ~dedupe_winners ()
|
|
294 {
|
|
295 /* Delete all keys and candidates. */
|
|
296 for (map_t::iterator iter = m_map.begin ();
|
|
297 iter != m_map.end ();
|
|
298 ++iter)
|
|
299 {
|
|
300 delete (*iter).first;
|
|
301 delete (*iter).second;
|
|
302 }
|
|
303 }
|
|
304
|
|
305 /* Determine an exploded_path for SD using SP and, if it's feasible,
|
|
306 determine if it's the best seen so far for its dedupe_key.
|
|
307 Retain the winner for each dedupe_key, and discard the rest. */
|
|
308
|
|
309 void add (logger *logger,
|
|
310 const shortest_exploded_paths &sp,
|
|
311 const saved_diagnostic &sd)
|
|
312 {
|
|
313 /* Build a dedupe_candidate for SD.
|
|
314 This uses SP to build an exploded_path. */
|
|
315 dedupe_candidate *dc = new dedupe_candidate (sp, sd);
|
|
316
|
|
317 /* Verify that the epath is feasible.
|
|
318 State-merging means that not every path in the epath corresponds
|
|
319 to a feasible one w.r.t. states.
|
|
320 Here we simply check each duplicate saved_diagnostic's
|
|
321 shortest_path, and reject any that aren't feasible.
|
|
322 This could introduce false negatives, as there could be longer
|
|
323 feasible paths within the egraph. */
|
|
324 if (logger)
|
|
325 logger->log ("considering %qs at SN: %i",
|
|
326 sd.m_d->get_kind (), sd.m_snode->m_index);
|
|
327 if (!dc->get_path ().feasible_p (logger))
|
|
328 {
|
|
329 if (logger)
|
|
330 logger->log ("rejecting %qs at SN: %i"
|
|
331 " due to infeasible path",
|
|
332 sd.m_d->get_kind (), sd.m_snode->m_index);
|
|
333 delete dc;
|
|
334 return;
|
|
335 }
|
|
336 else
|
|
337 if (logger)
|
|
338 logger->log ("accepting %qs at SN: %i with feasible path",
|
|
339 sd.m_d->get_kind (), sd.m_snode->m_index);
|
|
340
|
|
341 dedupe_key *key = new dedupe_key (sd, dc->get_path ());
|
|
342 if (dedupe_candidate **slot = m_map.get (key))
|
|
343 {
|
|
344 if (logger)
|
|
345 logger->log ("already have this dedupe_key");
|
|
346
|
|
347 (*slot)->add_duplicate ();
|
|
348
|
|
349 if (dc->length () < (*slot)->length ())
|
|
350 {
|
|
351 /* We've got a shorter path for the key; replace
|
|
352 the current candidate. */
|
|
353 if (logger)
|
|
354 logger->log ("length %i is better than existing length %i;"
|
|
355 " taking over this dedupe_key",
|
|
356 dc->length (), (*slot)->length ());
|
|
357 dc->m_num_dupes = (*slot)->get_num_dupes ();
|
|
358 delete *slot;
|
|
359 *slot = dc;
|
|
360 }
|
|
361 else
|
|
362 /* We haven't beaten the current best candidate;
|
|
363 drop the new candidate. */
|
|
364 {
|
|
365 if (logger)
|
|
366 logger->log ("length %i isn't better than existing length %i;"
|
|
367 " dropping this candidate",
|
|
368 dc->length (), (*slot)->length ());
|
|
369 delete dc;
|
|
370 }
|
|
371 delete key;
|
|
372 }
|
|
373 else
|
|
374 {
|
|
375 /* This is the first candidate for this key. */
|
|
376 m_map.put (key, dc);
|
|
377 if (logger)
|
|
378 logger->log ("first candidate for this dedupe_key");
|
|
379 }
|
|
380 }
|
|
381
|
|
382 /* Emit the simplest diagnostic within each set. */
|
|
383
|
|
384 void emit_best (diagnostic_manager *dm,
|
|
385 const exploded_graph &eg)
|
|
386 {
|
|
387 LOG_SCOPE (dm->get_logger ());
|
|
388
|
|
389 /* Get keys into a vec for sorting. */
|
|
390 auto_vec<const dedupe_key *> keys (m_map.elements ());
|
|
391 for (map_t::iterator iter = m_map.begin ();
|
|
392 iter != m_map.end ();
|
|
393 ++iter)
|
|
394 keys.quick_push ((*iter).first);
|
|
395
|
|
396 dm->log ("# keys after de-duplication: %i", keys.length ());
|
|
397
|
|
398 /* Sort into a good emission order. */
|
|
399 keys.qsort (dedupe_key::comparator);
|
|
400
|
|
401 /* Emit the best candidate for each key. */
|
|
402 int i;
|
|
403 const dedupe_key *key;
|
|
404 FOR_EACH_VEC_ELT (keys, i, key)
|
|
405 {
|
|
406 dedupe_candidate **slot = m_map.get (key);
|
|
407 gcc_assert (*slot);
|
|
408 const dedupe_candidate &dc = **slot;
|
|
409
|
|
410 dm->emit_saved_diagnostic (eg, key->m_sd,
|
|
411 dc.get_path (), key->m_stmt,
|
|
412 dc.get_num_dupes ());
|
|
413 }
|
|
414 }
|
|
415
|
|
416 private:
|
|
417
|
|
418 /* This maps from each dedupe_key to a current best dedupe_candidate. */
|
|
419
|
|
420 typedef hash_map<const dedupe_key *, dedupe_candidate *,
|
|
421 dedupe_hash_map_traits> map_t;
|
|
422 map_t m_map;
|
|
423 };
|
|
424
|
|
425 /* Emit all saved diagnostics. */
|
|
426
|
|
427 void
|
|
428 diagnostic_manager::emit_saved_diagnostics (const exploded_graph &eg)
|
|
429 {
|
|
430 LOG_SCOPE (get_logger ());
|
|
431 auto_timevar tv (TV_ANALYZER_DIAGNOSTICS);
|
|
432 log ("# saved diagnostics: %i", m_saved_diagnostics.length ());
|
|
433
|
|
434 if (m_saved_diagnostics.length () == 0)
|
|
435 return;
|
|
436
|
|
437 /* Compute the shortest_paths once, sharing it between all diagnostics. */
|
|
438 shortest_exploded_paths sp (eg, eg.get_origin ());
|
|
439
|
|
440 /* Iterate through all saved diagnostics, adding them to a dedupe_winners
|
|
441 instance. This partitions the saved diagnostics by dedupe_key,
|
|
442 generating exploded_paths for them, and retaining the best one in each
|
|
443 partition. */
|
|
444 dedupe_winners best_candidates;
|
|
445
|
|
446 int i;
|
|
447 saved_diagnostic *sd;
|
|
448 FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
|
|
449 best_candidates.add (get_logger (), sp, *sd);
|
|
450
|
|
451 /* For each dedupe-key, call emit_saved_diagnostic on the "best"
|
|
452 saved_diagnostic. */
|
|
453 best_candidates.emit_best (this, eg);
|
|
454 }
|
|
455
|
|
456 /* Given a saved_diagnostic SD at STMT with feasible path EPATH through EG,
|
|
457 create an checker_path of suitable events and use it to call
|
|
458 SD's underlying pending_diagnostic "emit" vfunc to emit a diagnostic. */
|
|
459
|
|
460 void
|
|
461 diagnostic_manager::emit_saved_diagnostic (const exploded_graph &eg,
|
|
462 const saved_diagnostic &sd,
|
|
463 const exploded_path &epath,
|
|
464 const gimple *stmt,
|
|
465 int num_dupes)
|
|
466 {
|
|
467 LOG_SCOPE (get_logger ());
|
|
468 log ("sd: %qs at SN: %i", sd.m_d->get_kind (), sd.m_snode->m_index);
|
|
469 log ("num dupes: %i", num_dupes);
|
|
470
|
|
471 pretty_printer *pp = global_dc->printer->clone ();
|
|
472
|
|
473 checker_path emission_path;
|
|
474
|
|
475 /* Populate emission_path with a full description of EPATH. */
|
|
476 build_emission_path (eg, epath, &emission_path);
|
|
477
|
|
478 /* Now prune it to just cover the most pertinent events. */
|
|
479 prune_path (&emission_path, sd.m_sm, sd.m_var, sd.m_state);
|
|
480
|
|
481 /* Add a final event to the path, covering the diagnostic itself.
|
|
482 We use the final enode from the epath, which might be different from
|
|
483 the sd.m_enode, as the dedupe code doesn't care about enodes, just
|
|
484 snodes. */
|
|
485 emission_path.add_final_event (sd.m_sm, epath.get_final_enode (), stmt,
|
|
486 sd.m_var, sd.m_state);
|
|
487
|
|
488 /* The "final" event might not be final; if the saved_diagnostic has a
|
|
489 trailing eedge stashed, add any events for it. This is for use
|
|
490 in handling longjmp, to show where a longjmp is rewinding to. */
|
|
491 if (sd.m_trailing_eedge)
|
|
492 add_events_for_eedge (*sd.m_trailing_eedge, eg.get_ext_state (),
|
|
493 &emission_path);
|
|
494
|
|
495 emission_path.prepare_for_emission (sd.m_d);
|
|
496
|
|
497 gcc_rich_location rich_loc (stmt->location);
|
|
498 rich_loc.set_path (&emission_path);
|
|
499
|
|
500 auto_diagnostic_group d;
|
|
501 auto_cfun sentinel (sd.m_snode->m_fun);
|
|
502 if (sd.m_d->emit (&rich_loc))
|
|
503 {
|
|
504 if (num_dupes > 0)
|
|
505 inform_n (stmt->location, num_dupes,
|
|
506 "%i duplicate", "%i duplicates",
|
|
507 num_dupes);
|
|
508 }
|
|
509 delete pp;
|
|
510 }
|
|
511
|
|
512 /* Given a state change to DST_REP, determine a tree that gives the origin
|
|
513 of that state at STMT, using DST_STATE's region model, so that state
|
|
514 changes based on assignments can be tracked back to their origins.
|
|
515
|
|
516 For example, if we have
|
|
517
|
|
518 (S1) _1 = malloc (64);
|
|
519 (S2) EXPR = _1;
|
|
520
|
|
521 then at stmt S2 we can get the origin of EXPR's state as being _1,
|
|
522 and thus track the allocation back to S1. */
|
|
523
|
|
524 static tree
|
|
525 get_any_origin (const gimple *stmt,
|
|
526 tree dst_rep,
|
|
527 const program_state &dst_state)
|
|
528 {
|
|
529 if (!stmt)
|
|
530 return NULL_TREE;
|
|
531
|
|
532 gcc_assert (dst_rep);
|
|
533
|
|
534 if (const gassign *assign = dyn_cast <const gassign *> (stmt))
|
|
535 {
|
|
536 tree lhs = gimple_assign_lhs (assign);
|
|
537 /* Use region IDs to compare lhs with DST_REP. */
|
|
538 if (dst_state.m_region_model->get_lvalue (lhs, NULL)
|
|
539 == dst_state.m_region_model->get_lvalue (dst_rep, NULL))
|
|
540 {
|
|
541 tree rhs1 = gimple_assign_rhs1 (assign);
|
|
542 enum tree_code op = gimple_assign_rhs_code (assign);
|
|
543 switch (op)
|
|
544 {
|
|
545 default:
|
|
546 //gcc_unreachable (); // TODO
|
|
547 break;
|
|
548 case COMPONENT_REF:
|
|
549 case SSA_NAME:
|
|
550 return rhs1;
|
|
551 }
|
|
552 }
|
|
553 }
|
|
554 return NULL_TREE;
|
|
555 }
|
|
556
|
|
557 /* Emit a "path" of events to EMISSION_PATH describing the exploded path
|
|
558 EPATH within EG. */
|
|
559
|
|
560 void
|
|
561 diagnostic_manager::build_emission_path (const exploded_graph &eg,
|
|
562 const exploded_path &epath,
|
|
563 checker_path *emission_path) const
|
|
564 {
|
|
565 LOG_SCOPE (get_logger ());
|
|
566 const extrinsic_state &ext_state = eg.get_ext_state ();
|
|
567 for (unsigned i = 0; i < epath.m_edges.length (); i++)
|
|
568 {
|
|
569 const exploded_edge *eedge = epath.m_edges[i];
|
|
570 add_events_for_eedge (*eedge, ext_state, emission_path);
|
|
571 }
|
|
572 }
|
|
573
|
|
574 /* Subclass of state_change_visitor that creates state_change_event
|
|
575 instances. */
|
|
576
|
|
577 class state_change_event_creator : public state_change_visitor
|
|
578 {
|
|
579 public:
|
|
580 state_change_event_creator (const exploded_edge &eedge,
|
|
581 checker_path *emission_path)
|
|
582 : m_eedge (eedge),
|
|
583 m_emission_path (emission_path)
|
|
584 {}
|
|
585
|
|
586 bool on_global_state_change (const state_machine &sm,
|
|
587 state_machine::state_t src_sm_val,
|
|
588 state_machine::state_t dst_sm_val)
|
|
589 FINAL OVERRIDE
|
|
590 {
|
|
591 const exploded_node *src_node = m_eedge.m_src;
|
|
592 const program_point &src_point = src_node->get_point ();
|
|
593 const int src_stack_depth = src_point.get_stack_depth ();
|
|
594 const exploded_node *dst_node = m_eedge.m_dest;
|
|
595 const gimple *stmt = src_point.get_stmt ();
|
|
596 const supernode *supernode = src_point.get_supernode ();
|
|
597 const program_state &dst_state = dst_node->get_state ();
|
|
598
|
|
599 int stack_depth = src_stack_depth;
|
|
600
|
|
601 m_emission_path->add_event (new state_change_event (supernode,
|
|
602 stmt,
|
|
603 stack_depth,
|
|
604 sm,
|
|
605 NULL_TREE,
|
|
606 src_sm_val,
|
|
607 dst_sm_val,
|
|
608 NULL_TREE,
|
|
609 dst_state));
|
|
610 return false;
|
|
611 }
|
|
612
|
|
613 bool on_state_change (const state_machine &sm,
|
|
614 state_machine::state_t src_sm_val,
|
|
615 state_machine::state_t dst_sm_val,
|
|
616 tree dst_rep,
|
|
617 svalue_id dst_origin_sid) FINAL OVERRIDE
|
|
618 {
|
|
619 const exploded_node *src_node = m_eedge.m_src;
|
|
620 const program_point &src_point = src_node->get_point ();
|
|
621 const int src_stack_depth = src_point.get_stack_depth ();
|
|
622 const exploded_node *dst_node = m_eedge.m_dest;
|
|
623 const gimple *stmt = src_point.get_stmt ();
|
|
624 const supernode *supernode = src_point.get_supernode ();
|
|
625 const program_state &dst_state = dst_node->get_state ();
|
|
626
|
|
627 int stack_depth = src_stack_depth;
|
|
628
|
|
629 if (m_eedge.m_sedge
|
|
630 && m_eedge.m_sedge->m_kind == SUPEREDGE_CFG_EDGE)
|
|
631 {
|
|
632 supernode = src_point.get_supernode ();
|
|
633 stmt = supernode->get_last_stmt ();
|
|
634 stack_depth = src_stack_depth;
|
|
635 }
|
|
636
|
|
637 /* Bulletproofing for state changes at calls/returns;
|
|
638 TODO: is there a better way? */
|
|
639 if (!stmt)
|
|
640 return false;
|
|
641
|
|
642 tree origin_rep
|
|
643 = dst_state.get_representative_tree (dst_origin_sid);
|
|
644
|
|
645 if (origin_rep == NULL_TREE)
|
|
646 origin_rep = get_any_origin (stmt, dst_rep, dst_state);
|
|
647 m_emission_path->add_event (new state_change_event (supernode,
|
|
648 stmt,
|
|
649 stack_depth,
|
|
650 sm,
|
|
651 dst_rep,
|
|
652 src_sm_val,
|
|
653 dst_sm_val,
|
|
654 origin_rep,
|
|
655 dst_state));
|
|
656 return false;
|
|
657 }
|
|
658
|
|
659 const exploded_edge &m_eedge;
|
|
660 checker_path *m_emission_path;
|
|
661 };
|
|
662
|
|
663 /* Compare SRC_STATE and DST_STATE (which use EXT_STATE), and call
|
|
664 VISITOR's on_state_change for every sm-state change that occurs
|
|
665 to a tree, and on_global_state_change for every global state change
|
|
666 that occurs.
|
|
667
|
|
668 This determines the state changes that ought to be reported to
|
|
669 the user: a combination of the effects of changes to sm_state_map
|
|
670 (which maps svalues to sm-states), and of region_model changes
|
|
671 (which map trees to svalues).
|
|
672
|
|
673 Bail out early and return true if any call to on_global_state_change
|
|
674 or on_state_change returns true, otherwise return false.
|
|
675
|
|
676 This is split out to make it easier to experiment with changes to
|
|
677 exploded_node granularity (so that we can observe what state changes
|
|
678 lead to state_change_events being emitted). */
|
|
679
|
|
680 bool
|
|
681 for_each_state_change (const program_state &src_state,
|
|
682 const program_state &dst_state,
|
|
683 const extrinsic_state &ext_state,
|
|
684 state_change_visitor *visitor)
|
|
685 {
|
|
686 gcc_assert (src_state.m_checker_states.length ()
|
|
687 == ext_state.get_num_checkers ());
|
|
688 gcc_assert (dst_state.m_checker_states.length ()
|
|
689 == ext_state.get_num_checkers ());
|
|
690 for (unsigned i = 0; i < ext_state.get_num_checkers (); i++)
|
|
691 {
|
|
692 const state_machine &sm = ext_state.get_sm (i);
|
|
693 const sm_state_map &src_smap = *src_state.m_checker_states[i];
|
|
694 const sm_state_map &dst_smap = *dst_state.m_checker_states[i];
|
|
695
|
|
696 /* Add events for any global state changes. */
|
|
697 if (src_smap.get_global_state () != dst_smap.get_global_state ())
|
|
698 if (visitor->on_global_state_change (sm,
|
|
699 src_smap.get_global_state (),
|
|
700 dst_smap.get_global_state ()))
|
|
701 return true;
|
|
702
|
|
703 /* Add events for per-svalue state changes. */
|
|
704 for (sm_state_map::iterator_t iter = dst_smap.begin ();
|
|
705 iter != dst_smap.end ();
|
|
706 ++iter)
|
|
707 {
|
|
708 /* Ideally we'd directly compare the SM state between src state
|
|
709 and dst state, but there's no guarantee that the IDs can
|
|
710 be meaningfully compared. */
|
|
711 svalue_id dst_sid = (*iter).first;
|
|
712 state_machine::state_t dst_sm_val = (*iter).second.m_state;
|
|
713
|
|
714 auto_vec<path_var> dst_pvs;
|
|
715 dst_state.m_region_model->get_path_vars_for_svalue (dst_sid,
|
|
716 &dst_pvs);
|
|
717
|
|
718 unsigned j;
|
|
719 path_var *dst_pv;
|
|
720 FOR_EACH_VEC_ELT (dst_pvs, j, dst_pv)
|
|
721 {
|
|
722 tree dst_rep = dst_pv->m_tree;
|
|
723 gcc_assert (dst_rep);
|
|
724 if (dst_pv->m_stack_depth
|
|
725 >= src_state.m_region_model->get_stack_depth ())
|
|
726 continue;
|
|
727 svalue_id src_sid
|
|
728 = src_state.m_region_model->get_rvalue (*dst_pv, NULL);
|
|
729 if (src_sid.null_p ())
|
|
730 continue;
|
|
731 state_machine::state_t src_sm_val = src_smap.get_state (src_sid);
|
|
732 if (dst_sm_val != src_sm_val)
|
|
733 {
|
|
734 svalue_id dst_origin_sid = (*iter).second.m_origin;
|
|
735 if (visitor->on_state_change (sm, src_sm_val, dst_sm_val,
|
|
736 dst_rep, dst_origin_sid))
|
|
737 return true;
|
|
738 }
|
|
739 }
|
|
740 }
|
|
741 }
|
|
742 return false;
|
|
743 }
|
|
744
|
|
745 /* Subroutine of diagnostic_manager::build_emission_path.
|
|
746 Add any events for EEDGE to EMISSION_PATH. */
|
|
747
|
|
748 void
|
|
749 diagnostic_manager::add_events_for_eedge (const exploded_edge &eedge,
|
|
750 const extrinsic_state &ext_state,
|
|
751 checker_path *emission_path) const
|
|
752 {
|
|
753 const exploded_node *src_node = eedge.m_src;
|
|
754 const program_point &src_point = src_node->get_point ();
|
|
755 const exploded_node *dst_node = eedge.m_dest;
|
|
756 const program_point &dst_point = dst_node->get_point ();
|
|
757 const int dst_stack_depth = dst_point.get_stack_depth ();
|
|
758 if (get_logger ())
|
|
759 {
|
|
760 get_logger ()->start_log_line ();
|
|
761 pretty_printer *pp = get_logger ()->get_printer ();
|
|
762 pp_printf (pp, "EN %i -> EN %i: ",
|
|
763 eedge.m_src->m_index,
|
|
764 eedge.m_dest->m_index);
|
|
765 src_point.print (pp, format (false));
|
|
766 pp_string (pp, "-> ");
|
|
767 dst_point.print (pp, format (false));
|
|
768 get_logger ()->end_log_line ();
|
|
769 }
|
|
770 const program_state &src_state = src_node->get_state ();
|
|
771 const program_state &dst_state = dst_node->get_state ();
|
|
772
|
|
773 /* Add state change events for the states that have changed.
|
|
774 We add these before events for superedges, so that if we have a
|
|
775 state_change_event due to following an edge, we'll get this sequence
|
|
776 of events:
|
|
777
|
|
778 | if (!ptr)
|
|
779 | ~
|
|
780 | |
|
|
781 | (1) assuming 'ptr' is non-NULL (state_change_event)
|
|
782 | (2) following 'false' branch... (start_cfg_edge_event)
|
|
783 ...
|
|
784 | do_something (ptr);
|
|
785 | ~~~~~~~~~~~~~^~~~~
|
|
786 | |
|
|
787 | (3) ...to here (end_cfg_edge_event). */
|
|
788 state_change_event_creator visitor (eedge, emission_path);
|
|
789 for_each_state_change (src_state, dst_state, ext_state,
|
|
790 &visitor);
|
|
791
|
|
792 /* Allow non-standard edges to add events, e.g. when rewinding from
|
|
793 longjmp to a setjmp. */
|
|
794 if (eedge.m_custom_info)
|
|
795 eedge.m_custom_info->add_events_to_path (emission_path, eedge);
|
|
796
|
|
797 /* Add events for superedges, function entries, and for statements. */
|
|
798 switch (dst_point.get_kind ())
|
|
799 {
|
|
800 default:
|
|
801 break;
|
|
802 case PK_BEFORE_SUPERNODE:
|
|
803 if (src_point.get_kind () == PK_AFTER_SUPERNODE)
|
|
804 {
|
|
805 if (eedge.m_sedge)
|
|
806 add_events_for_superedge (eedge, emission_path);
|
|
807 }
|
|
808 /* Add function entry events. */
|
|
809 if (dst_point.get_supernode ()->entry_p ())
|
|
810 {
|
|
811 emission_path->add_event
|
|
812 (new function_entry_event
|
|
813 (dst_point.get_supernode ()->get_start_location (),
|
|
814 dst_point.get_fndecl (),
|
|
815 dst_stack_depth));
|
|
816 }
|
|
817 break;
|
|
818 case PK_BEFORE_STMT:
|
|
819 {
|
|
820 const gimple *stmt = dst_point.get_stmt ();
|
|
821 const gcall *call = dyn_cast <const gcall *> (stmt);
|
|
822 if (call && is_setjmp_call_p (call))
|
|
823 emission_path->add_event
|
|
824 (new setjmp_event (stmt->location,
|
|
825 dst_node,
|
|
826 dst_point.get_fndecl (),
|
|
827 dst_stack_depth,
|
|
828 call));
|
|
829 else
|
|
830 emission_path->add_event
|
|
831 (new statement_event (stmt,
|
|
832 dst_point.get_fndecl (),
|
|
833 dst_stack_depth, dst_state));
|
|
834 }
|
|
835 break;
|
|
836 }
|
|
837 }
|
|
838
|
|
839 /* Subroutine of diagnostic_manager::add_events_for_eedge
|
|
840 where EEDGE has an underlying superedge i.e. a CFG edge,
|
|
841 or an interprocedural call/return.
|
|
842 Add any events for the superedge to EMISSION_PATH. */
|
|
843
|
|
844 void
|
|
845 diagnostic_manager::add_events_for_superedge (const exploded_edge &eedge,
|
|
846 checker_path *emission_path)
|
|
847 const
|
|
848 {
|
|
849 gcc_assert (eedge.m_sedge);
|
|
850
|
|
851 const exploded_node *src_node = eedge.m_src;
|
|
852 const program_point &src_point = src_node->get_point ();
|
|
853 const exploded_node *dst_node = eedge.m_dest;
|
|
854 const program_point &dst_point = dst_node->get_point ();
|
|
855 const int src_stack_depth = src_point.get_stack_depth ();
|
|
856 const int dst_stack_depth = dst_point.get_stack_depth ();
|
|
857 const gimple *last_stmt = src_point.get_supernode ()->get_last_stmt ();
|
|
858
|
|
859 switch (eedge.m_sedge->m_kind)
|
|
860 {
|
|
861 case SUPEREDGE_CFG_EDGE:
|
|
862 {
|
|
863 emission_path->add_event
|
|
864 (new start_cfg_edge_event (eedge,
|
|
865 (last_stmt
|
|
866 ? last_stmt->location
|
|
867 : UNKNOWN_LOCATION),
|
|
868 src_point.get_fndecl (),
|
|
869 src_stack_depth));
|
|
870 emission_path->add_event
|
|
871 (new end_cfg_edge_event (eedge,
|
|
872 dst_point.get_supernode ()->get_start_location (),
|
|
873 dst_point.get_fndecl (),
|
|
874 dst_stack_depth));
|
|
875 }
|
|
876 break;
|
|
877
|
|
878 case SUPEREDGE_CALL:
|
|
879 {
|
|
880 emission_path->add_event
|
|
881 (new call_event (eedge,
|
|
882 (last_stmt
|
|
883 ? last_stmt->location
|
|
884 : UNKNOWN_LOCATION),
|
|
885 src_point.get_fndecl (),
|
|
886 src_stack_depth));
|
|
887 }
|
|
888 break;
|
|
889
|
|
890 case SUPEREDGE_INTRAPROCEDURAL_CALL:
|
|
891 {
|
|
892 /* TODO: add a subclass for this, or generate events for the
|
|
893 summary. */
|
|
894 emission_path->add_event
|
|
895 (new debug_event ((last_stmt
|
|
896 ? last_stmt->location
|
|
897 : UNKNOWN_LOCATION),
|
|
898 src_point.get_fndecl (),
|
|
899 src_stack_depth,
|
|
900 "call summary"));
|
|
901 }
|
|
902 break;
|
|
903
|
|
904 case SUPEREDGE_RETURN:
|
|
905 {
|
|
906 const return_superedge *return_edge
|
|
907 = as_a <const return_superedge *> (eedge.m_sedge);
|
|
908
|
|
909 const gcall *call_stmt = return_edge->get_call_stmt ();
|
|
910 emission_path->add_event
|
|
911 (new return_event (eedge,
|
|
912 (call_stmt
|
|
913 ? call_stmt->location
|
|
914 : UNKNOWN_LOCATION),
|
|
915 dst_point.get_fndecl (),
|
|
916 dst_stack_depth));
|
|
917 }
|
|
918 break;
|
|
919 }
|
|
920 }
|
|
921
|
|
922 /* Prune PATH, based on the verbosity level, to the most pertinent
|
|
923 events for a diagnostic that involves VAR ending in state STATE
|
|
924 (for state machine SM).
|
|
925
|
|
926 PATH is updated in place, and the redundant checker_events are deleted.
|
|
927
|
|
928 As well as deleting events, call record_critical_state on events in
|
|
929 which state critical to the pending_diagnostic is being handled; see
|
|
930 the comment for diagnostic_manager::prune_for_sm_diagnostic. */
|
|
931
|
|
932 void
|
|
933 diagnostic_manager::prune_path (checker_path *path,
|
|
934 const state_machine *sm,
|
|
935 tree var,
|
|
936 state_machine::state_t state) const
|
|
937 {
|
|
938 LOG_FUNC (get_logger ());
|
|
939 path->maybe_log (get_logger (), "path");
|
|
940 prune_for_sm_diagnostic (path, sm, var, state);
|
|
941 prune_interproc_events (path);
|
|
942 finish_pruning (path);
|
|
943 path->maybe_log (get_logger (), "pruned");
|
|
944 }
|
|
945
|
|
946 /* First pass of diagnostic_manager::prune_path: apply verbosity level,
|
|
947 pruning unrelated state change events.
|
|
948
|
|
949 Iterate backwards through PATH, skipping state change events that aren't
|
|
950 VAR but update the pertinent VAR when state-copying occurs.
|
|
951
|
|
952 As well as deleting events, call record_critical_state on events in
|
|
953 which state critical to the pending_diagnostic is being handled, so
|
|
954 that the event's get_desc vfunc can potentially supply a more precise
|
|
955 description of the event to the user.
|
|
956 e.g. improving
|
|
957 "calling 'foo' from 'bar'"
|
|
958 to
|
|
959 "passing possibly-NULL pointer 'ptr' to 'foo' from 'bar' as param 1"
|
|
960 when the diagnostic relates to later dereferencing 'ptr'. */
|
|
961
|
|
962 void
|
|
963 diagnostic_manager::prune_for_sm_diagnostic (checker_path *path,
|
|
964 const state_machine *sm,
|
|
965 tree var,
|
|
966 state_machine::state_t state) const
|
|
967 {
|
|
968 /* If we have a constant (such as NULL), assume its state is also
|
|
969 constant, so as not to attempt to get its lvalue whilst tracking the
|
|
970 origin of the state. */
|
|
971 if (var && CONSTANT_CLASS_P (var))
|
|
972 var = NULL_TREE;
|
|
973
|
|
974 int idx = path->num_events () - 1;
|
|
975 while (idx >= 0 && idx < (signed)path->num_events ())
|
|
976 {
|
|
977 checker_event *base_event = path->get_checker_event (idx);
|
|
978 if (get_logger ())
|
|
979 {
|
|
980 if (sm)
|
|
981 {
|
|
982 if (var)
|
|
983 log ("considering event %i, with var: %qE, state: %qs",
|
|
984 idx, var, sm->get_state_name (state));
|
|
985 else
|
|
986 log ("considering event %i, with global state: %qs",
|
|
987 idx, sm->get_state_name (state));
|
|
988 }
|
|
989 else
|
|
990 log ("considering event %i", idx);
|
|
991 }
|
|
992 switch (base_event->m_kind)
|
|
993 {
|
|
994 default:
|
|
995 gcc_unreachable ();
|
|
996
|
|
997 case EK_DEBUG:
|
|
998 if (m_verbosity < 3)
|
|
999 {
|
|
1000 log ("filtering event %i: debug event", idx);
|
|
1001 path->delete_event (idx);
|
|
1002 }
|
|
1003 break;
|
|
1004
|
|
1005 case EK_CUSTOM:
|
|
1006 /* Don't filter custom events. */
|
|
1007 break;
|
|
1008
|
|
1009 case EK_STMT:
|
|
1010 {
|
|
1011 /* If this stmt is the origin of "var", update var. */
|
|
1012 if (var)
|
|
1013 {
|
|
1014 statement_event *stmt_event = (statement_event *)base_event;
|
|
1015 tree new_var = get_any_origin (stmt_event->m_stmt, var,
|
|
1016 stmt_event->m_dst_state);
|
|
1017 if (new_var)
|
|
1018 {
|
|
1019 log ("event %i: switching var of interest from %qE to %qE",
|
|
1020 idx, var, new_var);
|
|
1021 var = new_var;
|
|
1022 }
|
|
1023 }
|
|
1024 if (m_verbosity < 3)
|
|
1025 {
|
|
1026 log ("filtering event %i: statement event", idx);
|
|
1027 path->delete_event (idx);
|
|
1028 }
|
|
1029 }
|
|
1030 break;
|
|
1031
|
|
1032 case EK_FUNCTION_ENTRY:
|
|
1033 if (m_verbosity < 1)
|
|
1034 {
|
|
1035 log ("filtering event %i: function entry", idx);
|
|
1036 path->delete_event (idx);
|
|
1037 }
|
|
1038 break;
|
|
1039
|
|
1040 case EK_STATE_CHANGE:
|
|
1041 {
|
|
1042 state_change_event *state_change = (state_change_event *)base_event;
|
|
1043 if (state_change->get_lvalue (state_change->m_var)
|
|
1044 == state_change->get_lvalue (var))
|
|
1045 {
|
|
1046 if (state_change->m_origin)
|
|
1047 {
|
|
1048 log ("event %i: switching var of interest from %qE to %qE",
|
|
1049 idx, var, state_change->m_origin);
|
|
1050 var = state_change->m_origin;
|
|
1051 }
|
|
1052 log ("event %i: switching state of interest from %qs to %qs",
|
|
1053 idx, sm->get_state_name (state_change->m_to),
|
|
1054 sm->get_state_name (state_change->m_from));
|
|
1055 state = state_change->m_from;
|
|
1056 }
|
|
1057 else if (m_verbosity < 3)
|
|
1058 {
|
|
1059 if (var)
|
|
1060 log ("filtering event %i:"
|
|
1061 " state change to %qE unrelated to %qE",
|
|
1062 idx, state_change->m_var, var);
|
|
1063 else
|
|
1064 log ("filtering event %i: state change to %qE",
|
|
1065 idx, state_change->m_var);
|
|
1066 path->delete_event (idx);
|
|
1067 }
|
|
1068 }
|
|
1069 break;
|
|
1070
|
|
1071 case EK_START_CFG_EDGE:
|
|
1072 {
|
|
1073 cfg_edge_event *event = (cfg_edge_event *)base_event;
|
|
1074 const cfg_superedge& cfg_superedge
|
|
1075 = event->get_cfg_superedge ();
|
|
1076 const supernode *dest = event->m_sedge->m_dest;
|
|
1077 /* Do we have an SSA_NAME defined via a phi node in
|
|
1078 the dest CFG node? */
|
|
1079 if (var && TREE_CODE (var) == SSA_NAME)
|
|
1080 if (SSA_NAME_DEF_STMT (var)->bb == dest->m_bb)
|
|
1081 {
|
|
1082 if (gphi *phi
|
|
1083 = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (var)))
|
|
1084 {
|
|
1085 /* Update var based on its phi node. */
|
|
1086 tree old_var = var;
|
|
1087 var = cfg_superedge.get_phi_arg (phi);
|
|
1088 log ("updating from %qE to %qE based on phi node",
|
|
1089 old_var, var);
|
|
1090 if (get_logger ())
|
|
1091 {
|
|
1092 pretty_printer pp;
|
|
1093 pp_gimple_stmt_1 (&pp, phi, 0, (dump_flags_t)0);
|
|
1094 log (" phi: %s", pp_formatted_text (&pp));
|
|
1095 }
|
|
1096 /* If we've chosen a bad exploded_path, then the
|
|
1097 phi arg might be a constant. Fail gracefully for
|
|
1098 this case. */
|
|
1099 if (CONSTANT_CLASS_P (var))
|
|
1100 {
|
|
1101 log ("new var is a constant (bad path?);"
|
|
1102 " setting var to NULL");
|
|
1103 var = NULL;
|
|
1104 }
|
|
1105 }
|
|
1106 }
|
|
1107
|
|
1108 /* TODO: is this edge significant to var?
|
|
1109 See if var can be in other states in the dest, but not
|
|
1110 in other states in the src?
|
|
1111 Must have multiple sibling edges. */
|
|
1112
|
|
1113 if (event->should_filter_p (m_verbosity))
|
|
1114 {
|
|
1115 log ("filtering event %i: CFG edge", idx);
|
|
1116 path->delete_event (idx);
|
|
1117 /* Also delete the corresponding EK_END_CFG_EDGE. */
|
|
1118 gcc_assert (path->get_checker_event (idx)->m_kind
|
|
1119 == EK_END_CFG_EDGE);
|
|
1120 path->delete_event (idx);
|
|
1121 }
|
|
1122 }
|
|
1123 break;
|
|
1124
|
|
1125 case EK_END_CFG_EDGE:
|
|
1126 /* These come in pairs with EK_START_CFG_EDGE events and are
|
|
1127 filtered when their start event is filtered. */
|
|
1128 break;
|
|
1129
|
|
1130 case EK_CALL_EDGE:
|
|
1131 {
|
|
1132 call_event *event = (call_event *)base_event;
|
|
1133 const callgraph_superedge& cg_superedge
|
|
1134 = event->get_callgraph_superedge ();
|
|
1135 callsite_expr expr;
|
|
1136 tree caller_var
|
|
1137 = cg_superedge.map_expr_from_callee_to_caller (var, &expr);
|
|
1138 if (caller_var)
|
|
1139 {
|
|
1140 log ("event %i:"
|
|
1141 " switching var of interest"
|
|
1142 " from %qE in callee to %qE in caller",
|
|
1143 idx, var, caller_var);
|
|
1144 var = caller_var;
|
|
1145 if (expr.param_p ())
|
|
1146 event->record_critical_state (var, state);
|
|
1147 }
|
|
1148 }
|
|
1149 break;
|
|
1150
|
|
1151 case EK_RETURN_EDGE:
|
|
1152 // TODO: potentially update var/state based on return value,
|
|
1153 // args etc
|
|
1154 {
|
|
1155 if (var)
|
|
1156 {
|
|
1157 return_event *event = (return_event *)base_event;
|
|
1158 const callgraph_superedge& cg_superedge
|
|
1159 = event->get_callgraph_superedge ();
|
|
1160 callsite_expr expr;
|
|
1161 tree callee_var
|
|
1162 = cg_superedge.map_expr_from_caller_to_callee (var, &expr);
|
|
1163 if (callee_var)
|
|
1164 {
|
|
1165 log ("event %i:"
|
|
1166 " switching var of interest"
|
|
1167 " from %qE in caller to %qE in callee",
|
|
1168 idx, var, callee_var);
|
|
1169 var = callee_var;
|
|
1170 if (expr.return_value_p ())
|
|
1171 event->record_critical_state (var, state);
|
|
1172 }
|
|
1173 }
|
|
1174 }
|
|
1175 break;
|
|
1176
|
|
1177 case EK_SETJMP:
|
|
1178 /* TODO: only show setjmp_events that matter i.e. those for which
|
|
1179 there is a later rewind event using them. */
|
|
1180 case EK_REWIND_FROM_LONGJMP:
|
|
1181 case EK_REWIND_TO_SETJMP:
|
|
1182 break;
|
|
1183
|
|
1184 case EK_WARNING:
|
|
1185 /* Always show the final "warning" event in the path. */
|
|
1186 break;
|
|
1187 }
|
|
1188 idx--;
|
|
1189 }
|
|
1190 }
|
|
1191
|
|
1192 /* Second pass of diagnostic_manager::prune_path: remove redundant
|
|
1193 interprocedural information.
|
|
1194
|
|
1195 For example, given:
|
|
1196 (1)- calling "f2" from "f1"
|
|
1197 (2)--- entry to "f2"
|
|
1198 (3)--- calling "f3" from "f2"
|
|
1199 (4)----- entry to "f3"
|
|
1200 (5)--- returning to "f2" to "f3"
|
|
1201 (6)- returning to "f1" to "f2"
|
|
1202 with no other intervening events, then none of these events are
|
|
1203 likely to be interesting to the user.
|
|
1204
|
|
1205 Prune [..., call, function-entry, return, ...] triples repeatedly
|
|
1206 until nothing has changed. For the example above, this would
|
|
1207 remove events (3, 4, 5), and then remove events (1, 2, 6). */
|
|
1208
|
|
1209 void
|
|
1210 diagnostic_manager::prune_interproc_events (checker_path *path) const
|
|
1211 {
|
|
1212 bool changed = false;
|
|
1213 do
|
|
1214 {
|
|
1215 changed = false;
|
|
1216 int idx = path->num_events () - 1;
|
|
1217 while (idx >= 0)
|
|
1218 {
|
|
1219 /* Prune [..., call, function-entry, return, ...] triples. */
|
|
1220 if (idx + 2 < (signed)path->num_events ()
|
|
1221 && path->get_checker_event (idx)->is_call_p ()
|
|
1222 && path->get_checker_event (idx + 1)->is_function_entry_p ()
|
|
1223 && path->get_checker_event (idx + 2)->is_return_p ())
|
|
1224 {
|
|
1225 if (get_logger ())
|
|
1226 {
|
|
1227 label_text desc
|
|
1228 (path->get_checker_event (idx)->get_desc (false));
|
|
1229 log ("filtering events %i-%i:"
|
|
1230 " irrelevant call/entry/return: %s",
|
|
1231 idx, idx + 2, desc.m_buffer);
|
|
1232 desc.maybe_free ();
|
|
1233 }
|
|
1234 path->delete_event (idx + 2);
|
|
1235 path->delete_event (idx + 1);
|
|
1236 path->delete_event (idx);
|
|
1237 changed = true;
|
|
1238 idx--;
|
|
1239 continue;
|
|
1240 }
|
|
1241
|
|
1242 /* Prune [..., call, return, ...] pairs
|
|
1243 (for -fanalyzer-verbosity=0). */
|
|
1244 if (idx + 1 < (signed)path->num_events ()
|
|
1245 && path->get_checker_event (idx)->is_call_p ()
|
|
1246 && path->get_checker_event (idx + 1)->is_return_p ())
|
|
1247 {
|
|
1248 if (get_logger ())
|
|
1249 {
|
|
1250 label_text desc
|
|
1251 (path->get_checker_event (idx)->get_desc (false));
|
|
1252 log ("filtering events %i-%i:"
|
|
1253 " irrelevant call/return: %s",
|
|
1254 idx, idx + 1, desc.m_buffer);
|
|
1255 desc.maybe_free ();
|
|
1256 }
|
|
1257 path->delete_event (idx + 1);
|
|
1258 path->delete_event (idx);
|
|
1259 changed = true;
|
|
1260 idx--;
|
|
1261 continue;
|
|
1262 }
|
|
1263
|
|
1264 idx--;
|
|
1265 }
|
|
1266
|
|
1267 }
|
|
1268 while (changed);
|
|
1269 }
|
|
1270
|
|
1271 /* Final pass of diagnostic_manager::prune_path.
|
|
1272
|
|
1273 If all we're left with is in one function, then filter function entry
|
|
1274 events. */
|
|
1275
|
|
1276 void
|
|
1277 diagnostic_manager::finish_pruning (checker_path *path) const
|
|
1278 {
|
|
1279 if (!path->interprocedural_p ())
|
|
1280 {
|
|
1281 int idx = path->num_events () - 1;
|
|
1282 while (idx >= 0 && idx < (signed)path->num_events ())
|
|
1283 {
|
|
1284 checker_event *base_event = path->get_checker_event (idx);
|
|
1285 if (base_event->m_kind == EK_FUNCTION_ENTRY)
|
|
1286 {
|
|
1287 log ("filtering event %i:"
|
|
1288 " function entry for purely intraprocedural path", idx);
|
|
1289 path->delete_event (idx);
|
|
1290 }
|
|
1291 idx--;
|
|
1292 }
|
|
1293 }
|
|
1294 }
|
|
1295
|
|
1296 } // namespace ana
|
|
1297
|
|
1298 #endif /* #if ENABLE_ANALYZER */
|