Autopkgtest: Show force-badtest results more prominently

Add new state "IGNORE-FAIL" for regressions which have a 'force' or
'force-badtest' hint. In the HTML, show them as yellow "Ignored failure"
(without a retry link) instead of "Regression", and drop the separate
"Should wait for ..." reason, as that is hard to read for packages with a long
list of tests.

This also makes retry-autopkgtest-regressions more useful as this will now only
run the "real" regressions.
bzr-import-20160707
Martin Pitt 9 years ago
parent ca4ed75019
commit cacd7d7667

@ -29,6 +29,8 @@ from urllib.request import urlopen
import apt_pkg import apt_pkg
import amqplib.client_0_8 as amqp import amqplib.client_0_8 as amqp
from britney_util import same_source
from consts import (AUTOPKGTEST, BINARIES, DEPENDS, RDEPENDS, SOURCE, VERSION) from consts import (AUTOPKGTEST, BINARIES, DEPENDS, RDEPENDS, SOURCE, VERSION)
@ -560,7 +562,7 @@ class AutoPackageTest(object):
'''Return test results for triggering package '''Return test results for triggering package
Return (passed, src, ver, arch -> Return (passed, src, ver, arch ->
(ALWAYSFAIL|PASS|REGRESSION|RUNNING|RUNNING-ALWAYSFAIL, log_url)) (ALWAYSFAIL|PASS|REGRESSION|IGNORE-FAIL|RUNNING|RUNNING-ALWAYSFAIL, log_url))
iterable for all package tests that got triggered by trigsrc/trigver. iterable for all package tests that got triggered by trigsrc/trigver.
''' '''
# (src, ver) -> arch -> ALWAYSFAIL|PASS|REGRESSION|RUNNING|RUNNING-ALWAYSFAIL # (src, ver) -> arch -> ALWAYSFAIL|PASS|REGRESSION|RUNNING|RUNNING-ALWAYSFAIL
@ -589,7 +591,19 @@ class AutoPackageTest(object):
if trigsrc.startswith('linux-meta') or trigsrc == 'linux': if trigsrc.startswith('linux-meta') or trigsrc == 'linux':
ever_passed = False ever_passed = False
result = ever_passed and 'REGRESSION' or 'ALWAYSFAIL' if ever_passed:
# do we have a force{,-badtest} hint?
hints = self.britney.hints.search('force-badtest', package=testsrc)
hints.extend(self.britney.hints.search('force', package=testsrc))
for h in hints:
if same_source(h.version, testver):
result = 'IGNORE-FAIL'
break
else:
result = 'REGRESSION'
else:
result = 'ALWAYSFAIL'
url = os.path.join(self.britney.options.adt_swift_url, url = os.path.join(self.britney.options.adt_swift_url,
self.swift_container, self.swift_container,
self.series, self.series,

@ -1968,24 +1968,6 @@ class Britney(object):
[('ppa', p) for p in self.options.adt_ppas]) [('ppa', p) for p in self.options.adt_ppas])
e.addtest('autopkgtest', '%s %s' % (adtsrc, adtver), e.addtest('autopkgtest', '%s %s' % (adtsrc, adtver),
arch, status, log_url, **kwargs) arch, status, log_url, **kwargs)
# hints can override failures
if not passed:
hints = self.hints.search(
'force-badtest', package=adtsrc)
hints.extend(
self.hints.search('force', package=adtsrc))
forces = [
x for x in hints
if same_source(adtver, x.version) ]
if forces:
e.force()
e.addreason('badtest %s %s' % (adtsrc, adtver))
e.addhtml(
"Should wait for %s %s test, but forced by "
"%s" % (adtsrc, adtver, forces[0].user))
passed = True
if not passed: if not passed:
adtpass = False adtpass = False

@ -21,6 +21,7 @@ EXCUSES_LABELS = {
"FAIL": '<span style="background:#ff6666">Failed</span>', "FAIL": '<span style="background:#ff6666">Failed</span>',
"ALWAYSFAIL": '<span style="background:#e5c545">Always failed</span>', "ALWAYSFAIL": '<span style="background:#e5c545">Always failed</span>',
"REGRESSION": '<span style="background:#ff6666">Regression</span>', "REGRESSION": '<span style="background:#ff6666">Regression</span>',
"IGNORE-FAIL": '<span style="background:#e5c545">Ignored failure</span>',
"RUNNING": '<span style="background:#99ddff">Test in progress</span>', "RUNNING": '<span style="background:#99ddff">Test in progress</span>',
"RUNNING-ALWAYSFAIL": '<span style="background:#99ddff">Test in progress (always failed)</span>', "RUNNING-ALWAYSFAIL": '<span style="background:#99ddff">Test in progress (always failed)</span>',
} }

@ -1255,13 +1255,11 @@ class T(TestBase):
self.do_test( self.do_test(
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')],
{'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'}, {'green': (True, {'green 2': {'amd64': 'PASS', 'i386': 'PASS'},
'lightgreen 1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, 'lightgreen 1': {'amd64': 'IGNORE-FAIL', 'i386': 'IGNORE-FAIL'},
'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'}, 'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'},
}), }),
}, },
{'green': [('old-version', '1'), ('new-version', '2'), {'green': [('old-version', '1'), ('new-version', '2')]
('forced-reason', 'badtest lightgreen 1'),
('excuses', 'Should wait for lightgreen 1 test, but forced by pitti')]
}) })
def test_hint_force_badtest_different_version(self): def test_hint_force_badtest_different_version(self):

Loading…
Cancel
Save