Stop accepting results without recorded trigger

In situations where we don't have an up to date existing results.cache, the
fallback for handling test results would attribute old test results to new
requests, as we don't have a solid way to map them. This is detrimental for ad
hoc britney instances, like for testing PPAs, and also when we need to rebuild
our cache.

Ignore test results without a package trigger, and drop the code for handling
those.

The main risk here is that if we need to rebuild the cache from scratch we
might miss historic "PASS" results which haven't run since we switched to
recording triggers two months ago. But in these two months most of the
interesting packages have run, in particular for the development series and for
stable kernels, and non-kernel SRUs are not auto-promoted anyway.
bzr-import-20160707
Martin Pitt 9 years ago
parent 501aa206d8
commit 2ebfc0bb8f

@ -351,7 +351,7 @@ class AutoPackageTest(object):
self.requested_tests.setdefault(src, {}).setdefault( self.requested_tests.setdefault(src, {}).setdefault(
ver, {}).setdefault(arch, set()).add((trigsrc, trigver)) ver, {}).setdefault(arch, set()).add((trigsrc, trigver))
def fetch_swift_results(self, swift_url, src, arch, trigger=None): def fetch_swift_results(self, swift_url, src, arch):
'''Download new results for source package/arch from swift''' '''Download new results for source package/arch from swift'''
# prepare query: get all runs with a timestamp later than latest_stamp # prepare query: get all runs with a timestamp later than latest_stamp
@ -387,14 +387,12 @@ class AutoPackageTest(object):
for p in result_paths: for p in result_paths:
self.fetch_one_result( self.fetch_one_result(
os.path.join(swift_url, 'autopkgtest-' + self.series, p, 'result.tar'), os.path.join(swift_url, 'autopkgtest-' + self.series, p, 'result.tar'), src, arch)
src, arch, trigger)
def fetch_one_result(self, url, src, arch, trigger=None): def fetch_one_result(self, url, src, arch):
'''Download one result URL for source/arch '''Download one result URL for source/arch
Remove matching pending_tests entries. If trigger is given (src, ver) Remove matching pending_tests entries.
it is added to the triggers of that result.
''' '''
try: try:
f = urlopen(url) f = urlopen(url)
@ -413,11 +411,7 @@ class AutoPackageTest(object):
exitcode = int(tar.extractfile('exitcode').read().strip()) exitcode = int(tar.extractfile('exitcode').read().strip())
srcver = tar.extractfile('testpkg-version').read().decode().strip() srcver = tar.extractfile('testpkg-version').read().decode().strip()
(ressrc, ver) = srcver.split() (ressrc, ver) = srcver.split()
try:
testinfo = json.loads(tar.extractfile('testinfo.json').read().decode()) testinfo = json.loads(tar.extractfile('testinfo.json').read().decode())
except KeyError:
self.log_error('warning: %s does not have a testinfo.json' % url)
testinfo = {}
except (KeyError, ValueError, tarfile.TarError) as e: except (KeyError, ValueError, tarfile.TarError) as e:
self.log_error('%s is damaged, ignoring: %s' % (url, str(e))) self.log_error('%s is damaged, ignoring: %s' % (url, str(e)))
# ignore this; this will leave an orphaned request in pending.txt # ignore this; this will leave an orphaned request in pending.txt
@ -431,13 +425,13 @@ class AutoPackageTest(object):
return return
# parse recorded triggers in test result # parse recorded triggers in test result
if 'custom_environment' in testinfo: for e in testinfo.get('custom_environment', []):
for e in testinfo['custom_environment']:
if e.startswith('ADT_TEST_TRIGGERS='): if e.startswith('ADT_TEST_TRIGGERS='):
result_triggers = [tuple(i.split('/', 1)) for i in e.split('=', 1)[1].split() if '/' in i] result_triggers = [tuple(i.split('/', 1)) for i in e.split('=', 1)[1].split() if '/' in i]
break break
else: else:
result_triggers = None self.log_error('%s result has no ADT_TEST_TRIGGERS, ignoring')
return
stamp = os.path.basename(os.path.dirname(url)) stamp = os.path.basename(os.path.dirname(url))
# allow some skipped tests, but nothing else # allow some skipped tests, but nothing else
@ -446,20 +440,14 @@ class AutoPackageTest(object):
self.log_verbose('Fetched test result for %s/%s/%s %s (triggers: %s): %s' % ( self.log_verbose('Fetched test result for %s/%s/%s %s (triggers: %s): %s' % (
src, ver, arch, stamp, result_triggers, passed and 'pass' or 'fail')) src, ver, arch, stamp, result_triggers, passed and 'pass' or 'fail'))
# remove matching test requests, remember triggers # remove matching test requests
satisfied_triggers = set()
for request_map in [self.requested_tests, self.pending_tests]: for request_map in [self.requested_tests, self.pending_tests]:
for pending_ver, pending_archinfo in request_map.get(src, {}).copy().items(): for pending_ver, pending_archinfo in request_map.get(src, {}).copy().items():
# don't consider newer requested versions # don't consider newer requested versions
if apt_pkg.version_compare(pending_ver, ver) > 0: if apt_pkg.version_compare(pending_ver, ver) > 0:
continue continue
if result_triggers:
# explicitly recording/retrieving test triggers is the
# preferred (and robust) way of matching results to pending
# requests
for result_trigger in result_triggers: for result_trigger in result_triggers:
satisfied_triggers.add(result_trigger)
try: try:
request_map[src][pending_ver][arch].remove(result_trigger) request_map[src][pending_ver][arch].remove(result_trigger)
self.log_verbose('-> matches pending request %s/%s/%s for trigger %s' % self.log_verbose('-> matches pending request %s/%s/%s for trigger %s' %
@ -467,47 +455,34 @@ class AutoPackageTest(object):
except (KeyError, ValueError): except (KeyError, ValueError):
self.log_verbose('-> does not match any pending request for %s/%s/%s' % self.log_verbose('-> does not match any pending request for %s/%s/%s' %
(src, pending_ver, arch)) (src, pending_ver, arch))
else:
# ... but we still need to support results without
# testinfo.json and recorded triggers until we stop caring about
# existing wily and trusty results; match the latest result to all
# triggers for src that have at least the requested version
try:
t = pending_archinfo[arch]
self.log_verbose('-> matches pending request %s/%s for triggers %s' %
(src, pending_ver, str(t)))
satisfied_triggers.update(t)
del request_map[src][pending_ver][arch]
except KeyError:
self.log_verbose('-> does not match any pending request for %s/%s' %
(src, pending_ver))
# FIXME: this is a hack that mostly applies to re-running tests
# manually without giving a trigger. Tests which don't get
# triggered by a particular kernel version are fine with that, so
# add some heuristic once we drop the above code.
if trigger:
satisfied_triggers.add(trigger)
# add this result # add this result
src_arch_results = self.test_results.setdefault(src, {}).setdefault(arch, [stamp, {}, False]) src_arch_results = self.test_results.setdefault(src, {}).setdefault(arch, [stamp, {}, False])
if passed: trigmap = src_arch_results[1].setdefault(ver, {})
for trig in result_triggers:
trig_idx = trig[0] + '/' + trig[1]
# If a test runs because of its own package (newer version), ensure
# that we got a new enough version; FIXME: this should be done more
# generically by matching against testpkg-versions
if trig[0] == src and apt_pkg.version_compare(ver, trig[1]) < 0:
self.log_error('test trigger %s, but run for older version %s, ignoring' %
(trig_idx, ver))
continue
# passed results are always good, but don't clobber existing passed
# results with failures from re-runs
if passed or trig_idx not in trigmap:
trigmap[trig_idx] = passed
# update ever_passed field, unless we got triggered from # update ever_passed field, unless we got triggered from
# linux-meta*: we trigger separate per-kernel tests for reverse # linux-meta*: we trigger separate per-kernel tests for reverse
# test dependencies, and we don't want to track per-trigger # test dependencies, and we don't want to track per-trigger
# ever_passed. This would be wrong for everything except the # ever_passed. This would be wrong for everything except the
# kernel, and the kernel team tracks per-kernel regressions already # kernel, and the kernel team tracks per-kernel regressions already
if not result_triggers or not result_triggers[0][0].startswith('linux-meta'): if passed and not result_triggers[0][0].startswith('linux-meta'):
src_arch_results[2] = True src_arch_results[2] = True
if satisfied_triggers:
for trig in satisfied_triggers:
src_arch_results[1].setdefault(ver, {})[trig[0] + '/' + trig[1]] = passed
else:
# this result did not match any triggers? then we are in backwards
# compat mode for results without recorded triggers; update all
# results
for trig in src_arch_results[1].setdefault(ver, {}):
src_arch_results[1][ver][trig] = passed
# update latest_stamp # update latest_stamp
if stamp > src_arch_results[0]: if stamp > src_arch_results[0]:
src_arch_results[0] = stamp src_arch_results[0] = stamp
@ -640,7 +615,7 @@ class AutoPackageTest(object):
if arch not in self.pending_tests.get(trigpkg, {}).get(trigver, {}): if arch not in self.pending_tests.get(trigpkg, {}).get(trigver, {}):
self.log_verbose('Checking for new results for failed %s on %s for trigger %s/%s' % self.log_verbose('Checking for new results for failed %s on %s for trigger %s/%s' %
(pkg, arch, trigpkg, trigver)) (pkg, arch, trigpkg, trigver))
self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch, (trigpkg, trigver)) self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch)
# update the results cache # update the results cache
with open(self.results_cache_file + '.new', 'w') as f: with open(self.results_cache_file + '.new', 'w') as f:

@ -303,7 +303,7 @@ lightgreen 1 i386 green 2
res = json.load(f) res = json.load(f)
self.assertEqual(res['green']['i386'], self.assertEqual(res['green']['i386'],
['20150101_100200@', ['20150101_100200@',
{'1': {}, '2': {'green/2': True}}, {'1': {'passedbefore/1': True}, '2': {'green/2': True}},
True]) True])
self.assertEqual(res['lightgreen']['amd64'], self.assertEqual(res['lightgreen']['amd64'],
['20150101_100101@', ['20150101_100101@',
@ -369,47 +369,31 @@ lightgreen 1 i386 green 2
self.assertIn('darkgreen 1 amd64 green 2', self.pending_requests) self.assertIn('darkgreen 1 amd64 green 2', self.pending_requests)
self.assertIn('lightgreen 1 i386 green 2', self.pending_requests) self.assertIn('lightgreen 1 i386 green 2', self.pending_requests)
def test_multi_rdepends_with_tests_mixed_no_recorded_triggers(self): def test_results_without_triggers(self):
'''Multiple reverse dependencies with tests (mixed results), no recorded triggers''' '''Old results without recorded triggers'''
# green has passed before on i386 only, therefore ALWAYSFAILED on amd64
self.swift.set_results({'autopkgtest-series': {
'series/i386/g/green/20150101_100000@': (0, 'green 1', tr('passedbefore/1')),
}})
# first run requests tests and marks them as pending
self.do_test(
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')],
{'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'},
'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'},
'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'},
})
},
{'green': [('old-version', '1'), ('new-version', '2')]})
# second run collects the results
self.swift.set_results({'autopkgtest-series': { self.swift.set_results({'autopkgtest-series': {
'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1'), 'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1'),
'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1'), 'series/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1'),
'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1'), 'series/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1'),
'series/i386/g/green/20150101_100100@': (0, 'green 1', tr('passedbefore/1')),
'series/i386/g/green/20150101_100200@': (0, 'green 2'), 'series/i386/g/green/20150101_100200@': (0, 'green 2'),
'series/amd64/g/green/20150101_100201@': (4, 'green 2'), 'series/amd64/g/green/20150101_100201@': (4, 'green 2'),
}}) }})
out = self.do_test( # none of the above results should be accepted
[], self.do_test(
{'green': (False, {'green 2': {'amd64': 'ALWAYSFAIL', 'i386': 'PASS'}, [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')],
'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'RUNNING-ALWAYSFAILED'}, {'green': (False, {'green 2': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING'},
'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'PASS'}, 'lightgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'},
'darkgreen 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'RUNNING-ALWAYSFAILED'},
}) })
}) })
# not expecting any failures to retrieve from swift
self.assertNotIn('Failure', out, out)
# there should be some pending ones # there should be some pending ones
self.assertIn('darkgreen 1 amd64 green 2', self.pending_requests) self.assertIn('darkgreen 1 amd64 green 2', self.pending_requests)
self.assertIn('lightgreen 1 i386 green 2', self.pending_requests) self.assertIn('lightgreen 1 i386 green 2', self.pending_requests)
self.assertIn('green 2 i386 green 2', self.pending_requests)
def test_multi_rdepends_with_tests_regression(self): def test_multi_rdepends_with_tests_regression(self):
'''Multiple reverse dependencies with tests (regression)''' '''Multiple reverse dependencies with tests (regression)'''
@ -1452,12 +1436,13 @@ fancy 1 i386 linux-meta-lts-grumpy 1
('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None),
], ],
{'linux-meta': (True, {'fancy 1': {'amd64': 'PASS', 'i386': 'PASS'}}), {'linux-meta': (True, {'fancy 1': {'amd64': 'PASS', 'i386': 'PASS'}}),
# we don't have an explicit result for amd64, so the old one counts # we don't have an explicit result for amd64
'linux-meta-lts-grumpy': (True, {'fancy 1': {'amd64': 'ALWAYSFAIL', 'i386': 'ALWAYSFAIL'}}), 'linux-meta-lts-grumpy': (True, {'fancy 1': {'amd64': 'RUNNING-ALWAYSFAILED', 'i386': 'ALWAYSFAIL'}}),
'linux-meta-64only': (True, {'fancy 1': {'amd64': 'PASS'}}), 'linux-meta-64only': (True, {'fancy 1': {'amd64': 'PASS'}}),
}) })
self.assertEqual(self.pending_requests, '') self.assertEqual(self.pending_requests,
'fancy 1 amd64 linux-meta-lts-grumpy 1\n')
def test_kernel_triggered_tests(self): def test_kernel_triggered_tests(self):
'''linux, lxc, glibc tests get triggered by linux-meta* uploads''' '''linux, lxc, glibc tests get triggered by linux-meta* uploads'''

Loading…
Cancel
Save