comparison contrib/testsuite-management/validate_failures.py @ 111:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents
children
comparison
equal deleted inserted replaced
68:561a7518be6b 111:04ced10e8804
1 #!/usr/bin/python
2
3 # Script to compare testsuite failures against a list of known-to-fail
4 # tests.
5 #
6 # NOTE: This script is used in installations that are running Python 2.4.
7 # Please stick to syntax features available in 2.4 and earlier
8 # versions.
9
10 # Contributed by Diego Novillo <dnovillo@google.com>
11 #
12 # Copyright (C) 2011-2013 Free Software Foundation, Inc.
13 #
14 # This file is part of GCC.
15 #
16 # GCC is free software; you can redistribute it and/or modify
17 # it under the terms of the GNU General Public License as published by
18 # the Free Software Foundation; either version 3, or (at your option)
19 # any later version.
20 #
21 # GCC is distributed in the hope that it will be useful,
22 # but WITHOUT ANY WARRANTY; without even the implied warranty of
23 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 # GNU General Public License for more details.
25 #
26 # You should have received a copy of the GNU General Public License
27 # along with GCC; see the file COPYING. If not, write to
28 # the Free Software Foundation, 51 Franklin Street, Fifth Floor,
29 # Boston, MA 02110-1301, USA.
30
31 """This script provides a coarser XFAILing mechanism that requires no
32 detailed DejaGNU markings. This is useful in a variety of scenarios:
33
34 - Development branches with many known failures waiting to be fixed.
35 - Release branches with known failures that are not considered
36 important for the particular release criteria used in that branch.
37
38 The script must be executed from the toplevel build directory. When
39 executed it will:
40
41 1- Determine the target built: TARGET
42 2- Determine the source directory: SRCDIR
43 3- Look for a failure manifest file in
44 <SRCDIR>/<MANIFEST_SUBDIR>/<MANIFEST_NAME>.xfail
45 4- Collect all the <tool>.sum files from the build tree.
46 5- Produce a report stating:
47 a- Failures expected in the manifest but not present in the build.
48 b- Failures in the build not expected in the manifest.
49 6- If all the build failures are expected in the manifest, it exits
50 with exit code 0. Otherwise, it exits with error code 1.
51
52 Manifest files contain expected DejaGNU results that are otherwise
53 treated as failures.
54 They may also contain additional text:
55
56 # This is a comment. - self explanatory
57 @include file - the file is a path relative to the includer
58 @remove result text - result text is removed from the expected set
59 """
60
61 import datetime
62 import optparse
63 import os
64 import re
65 import sys
66
67 # Handled test results.
68 _VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
69 _VALID_TEST_RESULTS_REX = re.compile("%s" % "|".join(_VALID_TEST_RESULTS))
70
71 # Subdirectory of srcdir in which to find the manifest file.
72 _MANIFEST_SUBDIR = 'contrib/testsuite-management'
73
74 # Pattern for naming manifest files.
75 # The first argument should be the toplevel GCC(/GNU tool) source directory.
76 # The second argument is the manifest subdir.
77 # The third argument is the manifest target, which defaults to the target
78 # triplet used during the build.
79 _MANIFEST_PATH_PATTERN = '%s/%s/%s.xfail'
80
81 # The options passed to the program.
82 _OPTIONS = None
83
84 def Error(msg):
85 print >>sys.stderr, 'error: %s' % msg
86 sys.exit(1)
87
88
89 class TestResult(object):
90 """Describes a single DejaGNU test result as emitted in .sum files.
91
92 We are only interested in representing unsuccessful tests. So, only
93 a subset of all the tests are loaded.
94
95 The summary line used to build the test result should have this format:
96
97 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
98 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
99 optional state name description
100 attributes
101
102 Attributes:
103 attrlist: A comma separated list of attributes.
104 Valid values:
105 flaky Indicates that this test may not always fail. These
106 tests are reported, but their presence does not affect
107 the results.
108
109 expire=YYYYMMDD After this date, this test will produce an error
110 whether it is in the manifest or not.
111
112 state: One of UNRESOLVED, XPASS or FAIL.
113 name: File name for the test.
114 description: String describing the test (flags used, dejagnu message, etc)
115 ordinal: Monotonically increasing integer.
116 It is used to keep results for one .exp file sorted
117 by the order the tests were run.
118 """
119
120 def __init__(self, summary_line, ordinal=-1):
121 try:
122 (self.attrs, summary_line) = SplitAttributesFromSummaryLine(summary_line)
123 try:
124 (self.state,
125 self.name,
126 self.description) = re.match(r'([A-Z]+):\s*(\S+)\s*(.*)',
127 summary_line).groups()
128 except:
129 print 'Failed to parse summary line: "%s"' % summary_line
130 raise
131 self.ordinal = ordinal
132 except ValueError:
133 Error('Cannot parse summary line "%s"' % summary_line)
134
135 if self.state not in _VALID_TEST_RESULTS:
136 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
137 self.state, summary_line, self))
138
139 def __lt__(self, other):
140 return (self.name < other.name or
141 (self.name == other.name and self.ordinal < other.ordinal))
142
143 def __hash__(self):
144 return hash(self.state) ^ hash(self.name) ^ hash(self.description)
145
146 def __eq__(self, other):
147 return (self.state == other.state and
148 self.name == other.name and
149 self.description == other.description)
150
151 def __ne__(self, other):
152 return not (self == other)
153
154 def __str__(self):
155 attrs = ''
156 if self.attrs:
157 attrs = '%s | ' % self.attrs
158 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
159
160 def ExpirationDate(self):
161 # Return a datetime.date object with the expiration date for this
162 # test result. Return None, if no expiration has been set.
163 if re.search(r'expire=', self.attrs):
164 expiration = re.search(r'expire=(\d\d\d\d)(\d\d)(\d\d)', self.attrs)
165 if not expiration:
166 Error('Invalid expire= format in "%s". Must be of the form '
167 '"expire=YYYYMMDD"' % self)
168 return datetime.date(int(expiration.group(1)),
169 int(expiration.group(2)),
170 int(expiration.group(3)))
171 return None
172
173 def HasExpired(self):
174 # Return True if the expiration date of this result has passed.
175 expiration_date = self.ExpirationDate()
176 if expiration_date:
177 now = datetime.date.today()
178 return now > expiration_date
179
180
181 def GetMakefileValue(makefile_name, value_name):
182 if os.path.exists(makefile_name):
183 makefile = open(makefile_name)
184 for line in makefile:
185 if line.startswith(value_name):
186 (_, value) = line.split('=', 1)
187 value = value.strip()
188 makefile.close()
189 return value
190 makefile.close()
191 return None
192
193
194 def ValidBuildDirectory(builddir):
195 if (not os.path.exists(builddir) or
196 not os.path.exists('%s/Makefile' % builddir)):
197 return False
198 return True
199
200
201 def IsComment(line):
202 """Return True if line is a comment."""
203 return line.startswith('#')
204
205
206 def SplitAttributesFromSummaryLine(line):
207 """Splits off attributes from a summary line, if present."""
208 if '|' in line and not _VALID_TEST_RESULTS_REX.match(line):
209 (attrs, line) = line.split('|', 1)
210 attrs = attrs.strip()
211 else:
212 attrs = ''
213 line = line.strip()
214 return (attrs, line)
215
216
217 def IsInterestingResult(line):
218 """Return True if line is one of the summary lines we care about."""
219 (_, line) = SplitAttributesFromSummaryLine(line)
220 return bool(_VALID_TEST_RESULTS_REX.match(line))
221
222
223 def IsInclude(line):
224 """Return True if line is an include of another file."""
225 return line.startswith("@include ")
226
227
228 def GetIncludeFile(line, includer):
229 """Extract the name of the include file from line."""
230 includer_dir = os.path.dirname(includer)
231 include_file = line[len("@include "):]
232 return os.path.join(includer_dir, include_file.strip())
233
234
235 def IsNegativeResult(line):
236 """Return True if line should be removed from the expected results."""
237 return line.startswith("@remove ")
238
239
240 def GetNegativeResult(line):
241 """Extract the name of the negative result from line."""
242 line = line[len("@remove "):]
243 return line.strip()
244
245
246 def ParseManifestWorker(result_set, manifest_path):
247 """Read manifest_path, adding the contents to result_set."""
248 if _OPTIONS.verbosity >= 1:
249 print 'Parsing manifest file %s.' % manifest_path
250 manifest_file = open(manifest_path)
251 for line in manifest_file:
252 line = line.strip()
253 if line == "":
254 pass
255 elif IsComment(line):
256 pass
257 elif IsNegativeResult(line):
258 result_set.remove(TestResult(GetNegativeResult(line)))
259 elif IsInclude(line):
260 ParseManifestWorker(result_set, GetIncludeFile(line, manifest_path))
261 elif IsInterestingResult(line):
262 result_set.add(TestResult(line))
263 else:
264 Error('Unrecognized line in manifest file: %s' % line)
265 manifest_file.close()
266
267
268 def ParseManifest(manifest_path):
269 """Create a set of TestResult instances from the given manifest file."""
270 result_set = set()
271 ParseManifestWorker(result_set, manifest_path)
272 return result_set
273
274
275 def ParseSummary(sum_fname):
276 """Create a set of TestResult instances from the given summary file."""
277 result_set = set()
278 # ordinal is used when sorting the results so that tests within each
279 # .exp file are kept sorted.
280 ordinal=0
281 sum_file = open(sum_fname)
282 for line in sum_file:
283 if IsInterestingResult(line):
284 result = TestResult(line, ordinal)
285 ordinal += 1
286 if result.HasExpired():
287 # Tests that have expired are not added to the set of expected
288 # results. If they are still present in the set of actual results,
289 # they will cause an error to be reported.
290 print 'WARNING: Expected failure "%s" has expired.' % line.strip()
291 continue
292 result_set.add(result)
293 sum_file.close()
294 return result_set
295
296
297 def GetManifest(manifest_path):
298 """Build a set of expected failures from the manifest file.
299
300 Each entry in the manifest file should have the format understood
301 by the TestResult constructor.
302
303 If no manifest file exists for this target, it returns an empty set.
304 """
305 if os.path.exists(manifest_path):
306 return ParseManifest(manifest_path)
307 else:
308 return set()
309
310
311 def CollectSumFiles(builddir):
312 sum_files = []
313 for root, dirs, files in os.walk(builddir):
314 for ignored in ('.svn', '.git'):
315 if ignored in dirs:
316 dirs.remove(ignored)
317 for fname in files:
318 if fname.endswith('.sum'):
319 sum_files.append(os.path.join(root, fname))
320 return sum_files
321
322
323 def GetResults(sum_files):
324 """Collect all the test results from the given .sum files."""
325 build_results = set()
326 for sum_fname in sum_files:
327 print '\t%s' % sum_fname
328 build_results |= ParseSummary(sum_fname)
329 return build_results
330
331
332 def CompareResults(manifest, actual):
333 """Compare sets of results and return two lists:
334 - List of results present in ACTUAL but missing from MANIFEST.
335 - List of results present in MANIFEST but missing from ACTUAL.
336 """
337 # Collect all the actual results not present in the manifest.
338 # Results in this set will be reported as errors.
339 actual_vs_manifest = set()
340 for actual_result in actual:
341 if actual_result not in manifest:
342 actual_vs_manifest.add(actual_result)
343
344 # Collect all the tests in the manifest that were not found
345 # in the actual results.
346 # Results in this set will be reported as warnings (since
347 # they are expected failures that are not failing anymore).
348 manifest_vs_actual = set()
349 for expected_result in manifest:
350 # Ignore tests marked flaky.
351 if 'flaky' in expected_result.attrs:
352 continue
353 if expected_result not in actual:
354 manifest_vs_actual.add(expected_result)
355
356 return actual_vs_manifest, manifest_vs_actual
357
358
359 def GetManifestPath(srcdir, target, user_provided_must_exist):
360 """Return the full path to the manifest file."""
361 manifest_path = _OPTIONS.manifest
362 if manifest_path:
363 if user_provided_must_exist and not os.path.exists(manifest_path):
364 Error('Manifest does not exist: %s' % manifest_path)
365 return manifest_path
366 else:
367 if not srcdir:
368 Error('Could not determine the location of GCC\'s source tree. '
369 'The Makefile does not contain a definition for "srcdir".')
370 if not target:
371 Error('Could not determine the target triplet for this build. '
372 'The Makefile does not contain a definition for "target_alias".')
373 return _MANIFEST_PATH_PATTERN % (srcdir, _MANIFEST_SUBDIR, target)
374
375
376 def GetBuildData():
377 if not ValidBuildDirectory(_OPTIONS.build_dir):
378 # If we have been given a set of results to use, we may
379 # not be inside a valid GCC build directory. In that case,
380 # the user must provide both a manifest file and a set
381 # of results to check against it.
382 if not _OPTIONS.results or not _OPTIONS.manifest:
383 Error('%s is not a valid GCC top level build directory. '
384 'You must use --manifest and --results to do the validation.' %
385 _OPTIONS.build_dir)
386 else:
387 return None, None
388 srcdir = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'srcdir =')
389 target = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'target_alias=')
390 print 'Source directory: %s' % srcdir
391 print 'Build target: %s' % target
392 return srcdir, target
393
394
395 def PrintSummary(msg, summary):
396 print '\n\n%s' % msg
397 for result in sorted(summary):
398 print result
399
400
401 def GetSumFiles(results, build_dir):
402 if not results:
403 print 'Getting actual results from build directory %s' % build_dir
404 sum_files = CollectSumFiles(build_dir)
405 else:
406 print 'Getting actual results from user-provided results'
407 sum_files = results.split()
408 return sum_files
409
410
411 def PerformComparison(expected, actual, ignore_missing_failures):
412 actual_vs_expected, expected_vs_actual = CompareResults(expected, actual)
413
414 tests_ok = True
415 if len(actual_vs_expected) > 0:
416 PrintSummary('Unexpected results in this build (new failures)',
417 actual_vs_expected)
418 tests_ok = False
419
420 if not ignore_missing_failures and len(expected_vs_actual) > 0:
421 PrintSummary('Expected results not present in this build (fixed tests)'
422 '\n\nNOTE: This is not a failure. It just means that these '
423 'tests were expected\nto fail, but either they worked in '
424 'this configuration or they were not\npresent at all.\n',
425 expected_vs_actual)
426
427 if tests_ok:
428 print '\nSUCCESS: No unexpected failures.'
429
430 return tests_ok
431
432
433 def CheckExpectedResults():
434 srcdir, target = GetBuildData()
435 manifest_path = GetManifestPath(srcdir, target, True)
436 print 'Manifest: %s' % manifest_path
437 manifest = GetManifest(manifest_path)
438 sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
439 actual = GetResults(sum_files)
440
441 if _OPTIONS.verbosity >= 1:
442 PrintSummary('Tests expected to fail', manifest)
443 PrintSummary('\nActual test results', actual)
444
445 return PerformComparison(manifest, actual, _OPTIONS.ignore_missing_failures)
446
447
448 def ProduceManifest():
449 (srcdir, target) = GetBuildData()
450 manifest_path = GetManifestPath(srcdir, target, False)
451 print 'Manifest: %s' % manifest_path
452 if os.path.exists(manifest_path) and not _OPTIONS.force:
453 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
454 manifest_path)
455
456 sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
457 actual = GetResults(sum_files)
458 manifest_file = open(manifest_path, 'w')
459 for result in sorted(actual):
460 print result
461 manifest_file.write('%s\n' % result)
462 manifest_file.close()
463
464 return True
465
466
467 def CompareBuilds():
468 (srcdir, target) = GetBuildData()
469
470 sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
471 actual = GetResults(sum_files)
472
473 clean_sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.clean_build)
474 clean = GetResults(clean_sum_files)
475
476 return PerformComparison(clean, actual, _OPTIONS.ignore_missing_failures)
477
478
479 def Main(argv):
480 parser = optparse.OptionParser(usage=__doc__)
481
482 # Keep the following list sorted by option name.
483 parser.add_option('--build_dir', action='store', type='string',
484 dest='build_dir', default='.',
485 help='Build directory to check (default = .)')
486 parser.add_option('--clean_build', action='store', type='string',
487 dest='clean_build', default=None,
488 help='Compare test results from this build against '
489 'those of another (clean) build. Use this option '
490 'when comparing the test results of your patch versus '
491 'the test results of a clean build without your patch. '
492 'You must provide the path to the top directory of your '
493 'clean build.')
494 parser.add_option('--force', action='store_true', dest='force',
495 default=False, help='When used with --produce_manifest, '
496 'it will overwrite an existing manifest file '
497 '(default = False)')
498 parser.add_option('--ignore_missing_failures', action='store_true',
499 dest='ignore_missing_failures', default=False,
500 help='When a failure is expected in the manifest but '
501 'it is not found in the actual results, the script '
502 'produces a note alerting to this fact. This means '
503 'that the expected failure has been fixed, or '
504 'it did not run, or it may simply be flaky '
505 '(default = False)')
506 parser.add_option('--manifest', action='store', type='string',
507 dest='manifest', default=None,
508 help='Name of the manifest file to use (default = '
509 'taken from '
510 'contrib/testsuite-managment/<target_alias>.xfail)')
511 parser.add_option('--produce_manifest', action='store_true',
512 dest='produce_manifest', default=False,
513 help='Produce the manifest for the current '
514 'build (default = False)')
515 parser.add_option('--results', action='store', type='string',
516 dest='results', default=None, help='Space-separated list '
517 'of .sum files with the testing results to check. The '
518 'only content needed from these files are the lines '
519 'starting with FAIL, XPASS or UNRESOLVED (default = '
520 '.sum files collected from the build directory).')
521 parser.add_option('--verbosity', action='store', dest='verbosity',
522 type='int', default=0, help='Verbosity level (default = 0)')
523 global _OPTIONS
524 (_OPTIONS, _) = parser.parse_args(argv[1:])
525
526 if _OPTIONS.produce_manifest:
527 retval = ProduceManifest()
528 elif _OPTIONS.clean_build:
529 retval = CompareBuilds()
530 else:
531 retval = CheckExpectedResults()
532
533 if retval:
534 return 0
535 else:
536 return 1
537
538
539 if __name__ == '__main__':
540 retval = Main(sys.argv)
541 sys.exit(retval)