merge lp:~jibel/britney/fix_missing_results

This commit is contained in:
Colin Watson 2014-05-12 15:40:45 +01:00
commit a6fbf59cbc
3 changed files with 240 additions and 69 deletions

View File

@ -19,18 +19,24 @@ from __future__ import print_function
from collections import defaultdict
from contextlib import closing
import logging
import os
import subprocess
import tempfile
from textwrap import dedent
import time
import apt_pkg
adt_britney = os.path.expanduser("~/auto-package-testing/jenkins/adt-britney")
ADT_PASS = ["PASS", "ALWAYSFAIL"]
ADT_EXCUSES_LABELS = {
"PASS": '<span style="background:#87d96c">Pass</span>',
"ALWAYSFAIL": '<span style="background:#e5c545">Always failed</span>',
"REGRESSION": '<span style="background:#ff6666">Regression</span>',
"RUNNING": '<span style="background:#99ddff">Test in progress</span>',
}
class AutoPackageTest(object):
"""autopkgtest integration
@ -62,7 +68,7 @@ class AutoPackageTest(object):
components: main restricted universe multiverse
rsync_host: rsync://tachash.ubuntu-ci/adt/
datadir: ~/proposed-migration/autopkgtest/data""" %
(self.series, self.series, home)), file=rc_file)
(self.series, self.series, home)), file=rc_file)
@property
def _request_path(self):
@ -85,38 +91,39 @@ class AutoPackageTest(object):
continue
linebits = line.split()
if len(linebits) < 2:
logging.warning(
"Invalid line format: '%s', skipped" % line)
print("W: Invalid line format: '%s', skipped" % line)
continue
yield linebits
def read(self):
'''Loads a list of results
This function loads a list of results returned by __parse() and builds
2 lists:
- a list of source package/version with all the causes that
triggered a test and the result of the test for this trigger.
- a list of packages/version that triggered a test with the source
package/version and result triggered by this package.
These lists will be used in result() called from britney.py to generate
excuses and now which uploads passed, caused regression or which tests
have always been failing
'''
self.pkglist = defaultdict(dict)
self.pkgcauses = defaultdict(lambda: defaultdict(list))
for linebits in self._parse(self._result_path):
src = linebits.pop(0)
ver = linebits.pop(0)
self.pkglist[src][ver] = {
"status": "NEW",
"causes": {},
(src, ver, status) = linebits[:3]
if not (src in self.pkglist and ver in self.pkglist[src]):
self.pkglist[src][ver] = {
"status": status,
"causes": {}
}
try:
status = linebits.pop(0).upper()
self.pkglist[src][ver]["status"] = status
while True:
trigsrc = linebits.pop(0)
trigver = linebits.pop(0)
self.pkglist[src][ver]["causes"][trigsrc] = trigver
except IndexError:
# End of the list
pass
for src in self.pkglist:
all_vers = sorted(self.pkglist[src], cmp=apt_pkg.version_compare)
for ver in self.pkglist[src]:
status = self.pkglist[src][ver]["status"]
for trigsrc, trigver in \
self.pkglist[src][ver]["causes"].items():
self.pkgcauses[trigsrc][trigver].append((status, src, ver))
i = iter(linebits[3:])
for trigsrc, trigver in zip(i, i):
self.pkglist[src][ver]['causes'].setdefault(
trigsrc, []).append((trigver, status))
self.pkgcauses[trigsrc][trigver].append((status, src, ver))
def _adt_britney(self, *args):
command = [
@ -197,12 +204,29 @@ class AutoPackageTest(object):
self.read()
if self.britney.options.verbose:
for src in sorted(self.pkglist):
for ver in self.pkglist[src]:
print("I: [%s] - Collected autopkgtest status for %s_%s: "
"%s" %
(time.asctime(), src, ver,
self.pkglist[src][ver]["status"]))
for ver in sorted(self.pkglist[src],
cmp=apt_pkg.version_compare):
for trigsrc in sorted(self.pkglist[src][ver]['causes']):
for trigver, status \
in self.pkglist[src][ver]['causes'][trigsrc]:
print("I: [%s] - Collected autopkgtest status "
"for %s_%s/%s_%s: " "%s" % (
time.asctime(), src, ver, trigsrc,
trigver, status))
def results(self, trigsrc, trigver):
for status, src, ver in self.pkgcauses[trigsrc][trigver]:
# Check for regresssion
if status == 'FAIL':
passed_once = False
for ver in self.pkglist[src]:
for trigsrc in self.pkglist[src][ver]['causes']:
for trigver, status \
in self.pkglist[src][ver]['causes'][trigsrc]:
if status == 'PASS':
passed_once = True
if not passed_once:
status = 'ALWAYSFAIL'
else:
status = 'REGRESSION'
yield status, src, ver

View File

@ -222,7 +222,7 @@ from britney_util import (old_libraries_format, same_source, undo_changes,
from consts import (VERSION, SECTION, BINARIES, MAINTAINER, FAKESRC,
SOURCE, SOURCEVER, ARCHITECTURE, DEPENDS, CONFLICTS,
PROVIDES, RDEPENDS, RCONFLICTS, MULTIARCH)
from autopkgtest import AutoPackageTest
from autopkgtest import AutoPackageTest, ADT_PASS, ADT_EXCUSES_LABELS
__author__ = 'Fabio Tranchitella and the Debian Release Team'
__version__ = '2.0'
@ -1756,18 +1756,19 @@ class Britney(object):
adtpass = True
for status, adtsrc, adtver in autopkgtest.results(
e.name, e.ver[1]):
public_url = "%s/%s-adt-%s/" % (
public_url = "%s/%s-adt-%s/lastBuild" % (
jenkins_public, self.options.adt_series,
adtsrc.replace("+", "-"))
private_url = "%s/%s-adt-%s/" % (
private_url = "%s/%s-adt-%s/lastBuild" % (
jenkins_private, self.options.adt_series,
adtsrc.replace("+", "-"))
adt_label = ADT_EXCUSES_LABELS.get(status, status)
e.addhtml(
"autopkgtest for %s %s: %s (Jenkins: "
"<a href=\"%s\">public</a>, "
"<a href=\"%s\">private</a>)" %
(adtsrc, adtver, status, public_url, private_url))
if status != "PASS":
(adtsrc, adtver, adt_label, public_url, private_url))
if status not in ADT_PASS:
hints = self.hints.search(
'force-badtest', package=adtsrc)
hints.extend(

210
tests/autopkgtest.py → tests/test_autopkgtest.py Executable file → Normal file
View File

@ -12,7 +12,10 @@ import os
import sys
import subprocess
import unittest
import apt_pkg
import operator
apt_pkg.init()
architectures = ['amd64', 'arm64', 'armhf', 'i386', 'powerpc', 'ppc64el']
my_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@ -20,6 +23,9 @@ my_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
NOT_CONSIDERED = False
VALID_CANDIDATE = True
sys.path.insert(0, my_dir)
from autopkgtest import ADT_EXCUSES_LABELS
class TestData:
def __init__(self):
@ -157,7 +163,29 @@ echo "$@" >> /%s/adt-britney.log ''' % self.data.path)
def tearDown(self):
del self.data
def make_adt_britney(self, request):
def __merge_records(self, results, history=""):
'''Merges a list of results with records in history.
This function merges results from a collect with records already in
history and sort records by version/name of causes and version/name of
source packages with tests. This should be done in the fake
adt-britney but it is more convenient to just pass a static list of
records and make adt-britney just return this list.
'''
if history is None:
history = ""
records = [x.split() for x in (results.strip() + '\n' +
history.strip()).split('\n') if x]
records.sort(cmp=apt_pkg.version_compare, key=operator.itemgetter(4))
records.sort(key=operator.itemgetter(3))
records.sort(cmp=apt_pkg.version_compare, key=operator.itemgetter(1))
records.sort()
return "\n".join([' '.join(x) for x in records])
def make_adt_britney(self, request, history=""):
with open(self.adt_britney, 'w') as f:
f.write('''#!%(py)s
import argparse, shutil,sys
@ -175,7 +203,7 @@ def submit():
def collect():
with open(args.output, 'w') as f:
f.write("""%(rq)s""")
f.write("""%(res)s""")
p = argparse.ArgumentParser()
p.add_argument('-c', '--config')
@ -202,7 +230,9 @@ pcollect.set_defaults(func=collect)
args = p.parse_args()
args.func()
''' % {'py': sys.executable, 'path': self.data.path, 'rq': request})
''' % {'py': sys.executable, 'path': self.data.path,
'rq': request,
'res': self.__merge_records(request, history)})
def run_britney(self, args=[]):
'''Run britney.
@ -245,9 +275,19 @@ args.func()
'green 1.1~beta RUNNING green 1.1~beta\n',
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
'<li>autopkgtest for green 1.1~beta: RUNNING'])
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['RUNNING']])
def test_request_for_installable_fail(self):
def test_request_for_installable_first_fail(self):
'''Requests a test for an installable package. No history and first result is a failure'''
self.do_test(
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
'green 1.1~beta FAIL green 1.1~beta\n',
VALID_CANDIDATE,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']])
def test_request_for_installable_fail_regression(self):
'''Requests a test for an installable package, test fail'''
self.do_test(
@ -255,7 +295,8 @@ args.func()
'green 1.1~beta FAIL green 1.1~beta\n',
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
'<li>autopkgtest for green 1.1~beta: FAIL'])
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
history='green 1.0~beta PASS green 1.0~beta\n')
def test_request_for_installable_pass(self):
'''Requests a test for an installable package, test pass'''
@ -265,7 +306,7 @@ args.func()
'green 1.1~beta PASS green 1.1~beta\n',
VALID_CANDIDATE,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
'<li>autopkgtest for green 1.1~beta: PASS'])
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['PASS']])
def test_multi_rdepends_with_tests_running(self):
'''Multiple reverse dependencies with tests (still running)'''
@ -276,10 +317,22 @@ args.func()
'darkgreen 1 RUNNING green 2\n',
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>2<',
'<li>autopkgtest for lightgreen 1: PASS',
'<li>autopkgtest for darkgreen 1: RUNNING'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['RUNNING']])
def test_multi_rdepends_with_tests_fail(self):
def test_multi_rdepends_with_tests_fail_always(self):
'''Multiple reverse dependencies with tests (fail)'''
self.do_test(
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
'lightgreen 1 PASS green 2\n'
'darkgreen 1 FAIL green 2\n',
VALID_CANDIDATE,
[r'\bgreen\b.*>1</a> to .*>2<',
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']])
def test_multi_rdepends_with_tests_fail_regression(self):
'''Multiple reverse dependencies with tests (fail)'''
self.do_test(
@ -288,8 +341,9 @@ args.func()
'darkgreen 1 FAIL green 2\n',
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>2<',
'<li>autopkgtest for lightgreen 1: PASS',
'<li>autopkgtest for darkgreen 1: FAIL'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
history='darkgreen 1 PASS green 1\n')
def test_multi_rdepends_with_tests_pass(self):
'''Multiple reverse dependencies with tests (pass)'''
@ -300,8 +354,8 @@ args.func()
'darkgreen 1 PASS green 2\n',
VALID_CANDIDATE,
[r'\bgreen\b.*>1</a> to .*>2<',
'<li>autopkgtest for lightgreen 1: PASS',
'<li>autopkgtest for darkgreen 1: PASS'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['PASS']])
def test_multi_rdepends_with_some_tests_running(self):
'''Multiple reverse dependencies with some tests (running)'''
@ -315,10 +369,25 @@ args.func()
'darkgreen 1 RUNNING green 2\n',
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>2<',
'<li>autopkgtest for lightgreen 1: RUNNING',
'<li>autopkgtest for darkgreen 1: RUNNING'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['RUNNING'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['RUNNING']])
def test_multi_rdepends_with_some_tests_fail(self):
def test_multi_rdepends_with_some_tests_fail_always(self):
'''Multiple reverse dependencies with some tests (fail)'''
# add a third reverse dependency to libgreen1 which does not have a test
self.data.add('mint', False, {'Depends': 'libgreen1'})
self.do_test(
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
'lightgreen 1 PASS green 2\n'
'darkgreen 1 FAIL green 2\n',
VALID_CANDIDATE,
[r'\bgreen\b.*>1</a> to .*>2<',
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']])
def test_multi_rdepends_with_some_tests_fail_regression(self):
'''Multiple reverse dependencies with some tests (fail)'''
# add a third reverse dependency to libgreen1 which does not have a test
@ -330,8 +399,9 @@ args.func()
'darkgreen 1 FAIL green 2\n',
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>2<',
'<li>autopkgtest for lightgreen 1: PASS',
'<li>autopkgtest for darkgreen 1: FAIL'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
history='darkgreen 1 PASS green 1\n')
def test_multi_rdepends_with_some_tests_pass(self):
'''Multiple reverse dependencies with some tests (pass)'''
@ -345,8 +415,8 @@ args.func()
'darkgreen 1 PASS green 2\n',
VALID_CANDIDATE,
[r'\bgreen\b.*>1</a> to .*>2<',
'<li>autopkgtest for lightgreen 1: PASS',
'<li>autopkgtest for darkgreen 1: PASS'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['PASS']])
def test_binary_from_new_source_package_running(self):
'''building an existing binary for a new source package (running)'''
@ -357,10 +427,22 @@ args.func()
'darkgreen 1 RUNNING newgreen 2\n',
NOT_CONSIDERED,
[r'\bnewgreen\b.*\(- to .*>2<',
'<li>autopkgtest for lightgreen 1: PASS',
'<li>autopkgtest for darkgreen 1: RUNNING'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['RUNNING']])
def test_binary_from_new_source_package_fail(self):
def test_binary_from_new_source_package_fail_always(self):
'''building an existing binary for a new source package (fail)'''
self.do_test(
[('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'})],
'lightgreen 1 PASS newgreen 2\n'
'darkgreen 1 FAIL newgreen 2\n',
VALID_CANDIDATE,
[r'\bnewgreen\b.*\(- to .*>2<',
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']])
def test_binary_from_new_source_package_fail_regression(self):
'''building an existing binary for a new source package (fail)'''
self.do_test(
@ -369,8 +451,9 @@ args.func()
'darkgreen 1 FAIL newgreen 2\n',
NOT_CONSIDERED,
[r'\bnewgreen\b.*\(- to .*>2<',
'<li>autopkgtest for lightgreen 1: PASS',
'<li>autopkgtest for darkgreen 1: FAIL'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
history='darkgreen 1 PASS green 1\n')
def test_binary_from_new_source_package_pass(self):
'''building an existing binary for a new source package (pass)'''
@ -381,8 +464,8 @@ args.func()
'darkgreen 1 PASS newgreen 2\n',
VALID_CANDIDATE,
[r'\bnewgreen\b.*\(- to .*>2<',
'<li>autopkgtest for lightgreen 1: PASS',
'<li>autopkgtest for darkgreen 1: PASS'])
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['PASS']])
def test_binary_from_new_source_package_uninst(self):
'''building an existing binary for a new source package (uninstallable)'''
@ -406,14 +489,76 @@ args.func()
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
# it's not entirely clear what precisely it should say here
'<li>autopkgtest for green 1.1~beta: RUNNING'])
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['RUNNING']])
def test_request_for_installable_fail_regression_promoted(self):
'''Requests a test for an installable package, test fail, is a regression.
This test verifies a bug in britney where a package was promoted if latest test
appeared before previous result in history, only the last result in
alphabetic order was taken into account. For example:
A 1 FAIL B 1
A 1 PASS A 1
In this case results for A 1 didn't appear in the list of results
triggered by the upload of B 1 and B 1 was promoted
'''
self.do_test(
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
'lightgreen 1 FAIL green 1.1~beta\n',
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
history="lightgreen 1 PASS lightgreen 1"
)
def test_history_always_passed(self):
'''All the results in history are PASS, and test passed
'''
self.do_test(
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
'lightgreen 1 PASS green 1.1~beta\n',
VALID_CANDIDATE,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS']],
history="lightgreen 1 PASS lightgreen 1"
)
def test_history_always_failed(self):
'''All the results in history are FAIL, test fails. not a regression.
'''
self.do_test(
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
'lightgreen 1 FAIL green 1.1~beta\n',
VALID_CANDIDATE,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']],
history="lightgreen 1 FAIL lightgreen 1"
)
def test_history_regression(self):
'''All the results in history are PASS, test fails. Blocked.
'''
self.do_test(
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
'lightgreen 1 FAIL green 1.1~beta\n',
NOT_CONSIDERED,
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
history="lightgreen 1 PASS lightgreen 1"
)
def do_test(self, unstable_add, adt_request, considered, expect=None,
no_expect=None):
no_expect=None, history=""):
for (pkg, fields) in unstable_add:
self.data.add(pkg, True, fields)
self.make_adt_britney(adt_request)
self.make_adt_britney(adt_request, history)
(excuses, out) = self.run_britney()
#print('-------\nexcuses: %s\n-----' % excuses)
@ -437,7 +582,8 @@ args.func()
self.data.add('yellow', True, {'Version': '1.1~beta',
'Depends': 'libc6 (>= 0.9), nosuchpkg'})
self.make_adt_britney('yellow 1.1~beta RUNNING yellow 1.1~beta\n')
self.make_adt_britney('yellow 1.1~beta RUNNING yellow 1.1~beta\n',
'purple 2 FAIL pink 3.0.~britney\n')
print('run:\n%s -c %s\n' % (self.britney, self.britney_conf))
subprocess.call(['bash', '-i'], cwd=self.data.path)