mirror of
https://git.launchpad.net/~ubuntu-release/britney/+git/britney2-ubuntu
synced 2025-08-02 03:04:06 +00:00
This is necessary so that we can properly match requested to received results when the latter arrive in different runs for different architectures. This also opens up the possibility of per-arch blacklisting later.
640 lines
26 KiB
Python
640 lines
26 KiB
Python
#!/usr/bin/python
|
|
# (C) 2014 Canonical Ltd.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
# (at your option) any later version.
|
|
|
|
import apt_pkg
|
|
import operator
|
|
import os
|
|
import sys
|
|
import subprocess
|
|
import fileinput
|
|
import unittest
|
|
|
|
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
sys.path.insert(0, PROJECT_DIR)
|
|
|
|
from autopkgtest import ADT_EXCUSES_LABELS
|
|
from tests import TestBase
|
|
|
|
NOT_CONSIDERED = False
|
|
VALID_CANDIDATE = True
|
|
|
|
|
|
apt_pkg.init()
|
|
|
|
|
|
class TestAutoPkgTest(TestBase):
|
|
'''AMQP/cloud interface'''
|
|
|
|
def setUp(self):
|
|
super(TestAutoPkgTest, self).setUp()
|
|
self.fake_amqp = os.path.join(self.data.path, 'amqp')
|
|
|
|
# Disable boottests and set fake AMQP server
|
|
for line in fileinput.input(self.britney_conf, inplace=True):
|
|
if line.startswith('BOOTTEST_ENABLE'):
|
|
print('BOOTTEST_ENABLE = no')
|
|
elif line.startswith('ADT_AMQP'):
|
|
print('ADT_AMQP = file://%s' % self.fake_amqp)
|
|
else:
|
|
sys.stdout.write(line)
|
|
|
|
# fake adt-britney script; necessary until we drop that code
|
|
self.adt_britney = os.path.join(
|
|
self.data.home, 'auto-package-testing', 'jenkins', 'adt-britney')
|
|
os.makedirs(os.path.dirname(self.adt_britney))
|
|
with open(self.adt_britney, 'w') as f:
|
|
f.write('''#!/bin/sh -e
|
|
touch $HOME/proposed-migration/autopkgtest/work/adt.request.series
|
|
echo "$@" >> /%s/adt-britney.log ''' % self.data.path)
|
|
os.chmod(self.adt_britney, 0o755)
|
|
|
|
# add a bunch of packages to testing to avoid repetition
|
|
self.data.add('libc6', False)
|
|
self.data.add('libgreen1', False, {'Source': 'green',
|
|
'Depends': 'libc6 (>= 0.9)'})
|
|
self.data.add('green', False, {'Depends': 'libc6 (>= 0.9), libgreen1',
|
|
'Conflicts': 'blue'},
|
|
testsuite='autopkgtest')
|
|
self.data.add('lightgreen', False, {'Depends': 'libgreen1'},
|
|
testsuite='autopkgtest')
|
|
# autodep8 or similar test
|
|
self.data.add('darkgreen', False, {'Depends': 'libgreen1'},
|
|
testsuite='autopkgtest-pkg-foo')
|
|
self.data.add('blue', False, {'Depends': 'libc6 (>= 0.9)',
|
|
'Conflicts': 'green'},
|
|
testsuite='specialtest')
|
|
self.data.add('justdata', False, {'Architecture': 'all'})
|
|
|
|
def do_test(self, unstable_add, considered, excuses_expect=None, excuses_no_expect=None):
|
|
for (pkg, fields, testsuite) in unstable_add:
|
|
self.data.add(pkg, True, fields, True, testsuite)
|
|
|
|
(excuses, out) = self.run_britney()
|
|
#print('-------\nexcuses: %s\n-----' % excuses)
|
|
#print('-------\nout: %s\n-----' % out)
|
|
#print('run:\n%s -c %s\n' % (self.britney, self.britney_conf))
|
|
#subprocess.call(['bash', '-i'], cwd=self.data.path)
|
|
if considered:
|
|
self.assertIn('Valid candidate', excuses)
|
|
else:
|
|
self.assertIn('Not considered', excuses)
|
|
|
|
if excuses_expect:
|
|
for re in excuses_expect:
|
|
self.assertRegexpMatches(excuses, re)
|
|
if excuses_no_expect:
|
|
for re in excuses_no_expect:
|
|
self.assertNotRegexpMatches(excuses, re)
|
|
|
|
self.amqp_requests = set()
|
|
try:
|
|
with open(self.fake_amqp) as f:
|
|
for line in f:
|
|
self.amqp_requests.add(line.strip())
|
|
except IOError:
|
|
pass
|
|
|
|
try:
|
|
with open(os.path.join(self.data.path, 'data/series-proposed/autopkgtest/pending.txt')) as f:
|
|
self.pending_requests = f.read()
|
|
except IOError:
|
|
self.pending_requests = None
|
|
|
|
def test_multi_rdepends_with_tests_all_running(self):
|
|
'''Multiple reverse dependencies with tests (all running)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')],
|
|
# FIXME: while we only submit requests through AMQP, but don't consider
|
|
# their results, we don't expect this to hold back stuff.
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>2<'])
|
|
|
|
# we expect the package's and its reverse dependencies' tests to get
|
|
# triggered
|
|
self.assertEqual(
|
|
self.amqp_requests,
|
|
set(['debci-series-i386:green', 'debci-series-amd64:green',
|
|
'debci-series-i386:lightgreen', 'debci-series-amd64:lightgreen',
|
|
'debci-series-i386:darkgreen', 'debci-series-amd64:darkgreen',
|
|
]))
|
|
os.unlink(self.fake_amqp)
|
|
|
|
# ... and that they get recorded as pending
|
|
expected_pending = '''darkgreen 1 amd64 green 2
|
|
darkgreen 1 i386 green 2
|
|
green 2 amd64 green 2
|
|
green 2 i386 green 2
|
|
lightgreen 1 amd64 green 2
|
|
lightgreen 1 i386 green 2
|
|
'''
|
|
self.assertEqual(self.pending_requests, expected_pending)
|
|
|
|
# if we run britney again this should *not* trigger any new tests
|
|
self.do_test([], VALID_CANDIDATE, [r'\bgreen\b.*>1</a> to .*>2<'])
|
|
self.assertEqual(self.amqp_requests, set())
|
|
# but the set of pending tests doesn't change
|
|
self.assertEqual(self.pending_requests, expected_pending)
|
|
|
|
def test_package_pair_running(self):
|
|
'''Two packages in unstable that need to go in together (running)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'),
|
|
('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 2)'}, 'autopkgtest')],
|
|
# FIXME: while we only submit requests through AMQP, but don't consider
|
|
# their results, we don't expect this to hold back stuff.
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
r'\blightgreen\b.*>1</a> to .*>2<'])
|
|
|
|
# we expect the package's and its reverse dependencies' tests to get
|
|
# triggered; lightgreen should be triggered only once
|
|
self.assertEqual(
|
|
self.amqp_requests,
|
|
set(['debci-series-i386:green', 'debci-series-amd64:green',
|
|
'debci-series-i386:lightgreen', 'debci-series-amd64:lightgreen',
|
|
'debci-series-i386:darkgreen', 'debci-series-amd64:darkgreen',
|
|
]))
|
|
os.unlink(self.fake_amqp)
|
|
|
|
# ... and that they get recorded as pending
|
|
expected_pending = '''darkgreen 1 amd64 green 2
|
|
darkgreen 1 i386 green 2
|
|
green 2 amd64 green 2
|
|
green 2 i386 green 2
|
|
lightgreen 2 amd64 green 2
|
|
lightgreen 2 amd64 lightgreen 2
|
|
lightgreen 2 i386 green 2
|
|
lightgreen 2 i386 lightgreen 2
|
|
'''
|
|
self.assertEqual(self.pending_requests, expected_pending)
|
|
|
|
def test_no_amqp_config(self):
|
|
'''Run without autopkgtest requests'''
|
|
|
|
# Disable AMQP server config
|
|
for line in fileinput.input(self.britney_conf, inplace=True):
|
|
if not line.startswith('ADT_AMQP'):
|
|
sys.stdout.write(line)
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')],
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>2<'], ['autopkgtest'])
|
|
|
|
self.assertEqual(self.amqp_requests, set())
|
|
self.assertEqual(self.pending_requests, None)
|
|
|
|
|
|
class TestAdtBritney(TestBase):
|
|
'''Legacy adt-britney/lp:auto-package-testing interface'''
|
|
|
|
def setUp(self):
|
|
super(TestAdtBritney, self).setUp()
|
|
|
|
# Mofify configuration according to the test context.
|
|
with open(self.britney_conf, 'r') as fp:
|
|
original_config = fp.read()
|
|
# Disable boottests.
|
|
new_config = original_config.replace(
|
|
'BOOTTEST_ENABLE = yes', 'BOOTTEST_ENABLE = no')
|
|
with open(self.britney_conf, 'w') as fp:
|
|
fp.write(new_config)
|
|
|
|
# fake adt-britney script
|
|
self.adt_britney = os.path.join(
|
|
self.data.home, 'auto-package-testing', 'jenkins', 'adt-britney')
|
|
os.makedirs(os.path.dirname(self.adt_britney))
|
|
|
|
with open(self.adt_britney, 'w') as f:
|
|
f.write('''#!/bin/sh -e
|
|
echo "$@" >> /%s/adt-britney.log ''' % self.data.path)
|
|
os.chmod(self.adt_britney, 0o755)
|
|
|
|
# add a bunch of packages to testing to avoid repetition
|
|
self.data.add('libc6', False)
|
|
self.data.add('libgreen1', False, {'Source': 'green',
|
|
'Depends': 'libc6 (>= 0.9)'})
|
|
self.data.add('green', False, {'Depends': 'libc6 (>= 0.9), libgreen1',
|
|
'Conflicts': 'blue'})
|
|
self.data.add('lightgreen', False, {'Depends': 'libgreen1'})
|
|
self.data.add('darkgreen', False, {'Depends': 'libgreen1'})
|
|
self.data.add('blue', False, {'Depends': 'libc6 (>= 0.9)',
|
|
'Conflicts': 'green'})
|
|
self.data.add('justdata', False, {'Architecture': 'all'})
|
|
|
|
def __merge_records(self, results, history=""):
|
|
'''Merges a list of results with records in history.
|
|
|
|
This function merges results from a collect with records already in
|
|
history and sort records by version/name of causes and version/name of
|
|
source packages with tests. This should be done in the fake
|
|
adt-britney but it is more convenient to just pass a static list of
|
|
records and make adt-britney just return this list.
|
|
'''
|
|
|
|
if history is None:
|
|
history = ""
|
|
records = [x.split() for x in (results.strip() + '\n' +
|
|
history.strip()).split('\n') if x]
|
|
|
|
records.sort(cmp=apt_pkg.version_compare, key=operator.itemgetter(4))
|
|
records.sort(key=operator.itemgetter(3))
|
|
records.sort(cmp=apt_pkg.version_compare, key=operator.itemgetter(1))
|
|
records.sort()
|
|
|
|
return "\n".join([' '.join(x) for x in records])
|
|
|
|
def make_adt_britney(self, request, history=""):
|
|
with open(self.adt_britney, 'w') as f:
|
|
f.write('''#!%(py)s
|
|
import argparse, shutil,sys
|
|
|
|
def request():
|
|
if args.req:
|
|
shutil.copy(args.req, '%(path)s/adt-britney.requestarg')
|
|
with open(args.output, 'w') as f:
|
|
f.write("""%(rq)s""".replace('PASS', 'NEW').replace('FAIL', 'NEW').replace('RUNNING', 'NEW'))
|
|
|
|
def submit():
|
|
with open(args.req, 'w') as f:
|
|
f.write("""%(rq)s""".replace('PASS', 'RUNNING').
|
|
replace('FAIL', 'RUNNING'))
|
|
|
|
def collect():
|
|
with open(args.output, 'w') as f:
|
|
f.write("""%(res)s""")
|
|
|
|
p = argparse.ArgumentParser()
|
|
p.add_argument('-c', '--config')
|
|
p.add_argument('-a', '--arch')
|
|
p.add_argument('-r', '--release')
|
|
p.add_argument('-P', '--use-proposed', action='store_true')
|
|
p.add_argument('-d', '--debug', action='store_true')
|
|
p.add_argument('-U', '--no-update', action='store_true')
|
|
sp = p.add_subparsers()
|
|
|
|
prequest = sp.add_parser('request')
|
|
prequest.add_argument('-O', '--output')
|
|
prequest.add_argument('req', nargs='?')
|
|
prequest.set_defaults(func=request)
|
|
|
|
psubmit = sp.add_parser('submit')
|
|
psubmit.add_argument('req')
|
|
psubmit.set_defaults(func=submit)
|
|
|
|
pcollect = sp.add_parser('collect')
|
|
pcollect.add_argument('-O', '--output')
|
|
pcollect.add_argument('-n', '--new-only', action='store_true', default=False)
|
|
pcollect.set_defaults(func=collect)
|
|
|
|
args = p.parse_args()
|
|
args.func()
|
|
''' % {'py': sys.executable, 'path': self.data.path,
|
|
'rq': request,
|
|
'res': self.__merge_records(request, history)})
|
|
|
|
def do_test(self, unstable_add, adt_request, considered, expect=None,
|
|
no_expect=None, history=""):
|
|
for (pkg, fields) in unstable_add:
|
|
self.data.add(pkg, True, fields)
|
|
|
|
self.make_adt_britney(adt_request, history)
|
|
|
|
(excuses, out) = self.run_britney()
|
|
#print('-------\nexcuses: %s\n-----' % excuses)
|
|
#print('-------\nout: %s\n-----' % out)
|
|
#print('run:\n%s -c %s\n' % (self.britney, self.britney_conf))
|
|
#subprocess.call(['bash', '-i'], cwd=self.data.path)
|
|
if considered:
|
|
self.assertIn('Valid candidate', excuses)
|
|
else:
|
|
self.assertIn('Not considered', excuses)
|
|
|
|
if expect:
|
|
for re in expect:
|
|
self.assertRegexpMatches(excuses, re)
|
|
if no_expect:
|
|
for re in no_expect:
|
|
self.assertNotRegexpMatches(excuses, re)
|
|
|
|
def test_no_request_for_uninstallable(self):
|
|
'''Does not request a test for an uninstallable package'''
|
|
|
|
self.do_test(
|
|
# uninstallable unstable version
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1 (>= 2)'})],
|
|
'green 1.1~beta RUNNING green 1.1~beta\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'green/amd64 unsatisfiable Depends: libgreen1 \(>= 2\)'],
|
|
# autopkgtest should not be triggered for uninstallable pkg
|
|
['autopkgtest'])
|
|
|
|
def test_request_for_installable_running(self):
|
|
'''Requests a test for an installable package, test still running'''
|
|
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'green 1.1~beta RUNNING green 1.1~beta\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['RUNNING']])
|
|
|
|
def test_request_for_installable_first_fail(self):
|
|
'''Requests a test for an installable package. No history and first result is a failure'''
|
|
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'green 1.1~beta FAIL green 1.1~beta\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']])
|
|
|
|
def test_request_for_installable_fail_regression(self):
|
|
'''Requests a test for an installable package, test fail'''
|
|
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'green 1.1~beta FAIL green 1.1~beta\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
|
|
history='green 1.0~beta PASS green 1.0~beta\n')
|
|
|
|
def test_request_for_installable_pass(self):
|
|
'''Requests a test for an installable package, test pass'''
|
|
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'green 1.1~beta PASS green 1.1~beta\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['PASS']])
|
|
|
|
def test_multi_rdepends_with_tests_running(self):
|
|
'''Multiple reverse dependencies with tests (still running)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS green 2\n'
|
|
'darkgreen 1 RUNNING green 2\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['RUNNING']])
|
|
|
|
def test_multi_rdepends_with_tests_fail_always(self):
|
|
'''Multiple reverse dependencies with tests (fail)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS green 2\n'
|
|
'darkgreen 1 FAIL green 2\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']])
|
|
|
|
def test_multi_rdepends_with_tests_fail_regression(self):
|
|
'''Multiple reverse dependencies with tests (fail)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS green 2\n'
|
|
'darkgreen 1 FAIL green 2\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
|
|
history='darkgreen 1 PASS green 1\n')
|
|
|
|
def test_multi_rdepends_with_tests_pass(self):
|
|
'''Multiple reverse dependencies with tests (pass)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS green 2\n'
|
|
'darkgreen 1 PASS green 2\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['PASS']])
|
|
|
|
def test_multi_rdepends_with_some_tests_running(self):
|
|
'''Multiple reverse dependencies with some tests (running)'''
|
|
|
|
# add a third reverse dependency to libgreen1 which does not have a test
|
|
self.data.add('mint', False, {'Depends': 'libgreen1'})
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
|
|
'lightgreen 1 RUNNING green 2\n'
|
|
'darkgreen 1 RUNNING green 2\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['RUNNING'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['RUNNING']])
|
|
|
|
def test_multi_rdepends_with_some_tests_fail_always(self):
|
|
'''Multiple reverse dependencies with some tests (fail)'''
|
|
|
|
# add a third reverse dependency to libgreen1 which does not have a test
|
|
self.data.add('mint', False, {'Depends': 'libgreen1'})
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS green 2\n'
|
|
'darkgreen 1 FAIL green 2\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']])
|
|
|
|
def test_multi_rdepends_with_some_tests_fail_regression(self):
|
|
'''Multiple reverse dependencies with some tests (fail)'''
|
|
|
|
# add a third reverse dependency to libgreen1 which does not have a test
|
|
self.data.add('mint', False, {'Depends': 'libgreen1'})
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS green 2\n'
|
|
'darkgreen 1 FAIL green 2\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
|
|
history='darkgreen 1 PASS green 1\n')
|
|
|
|
def test_multi_rdepends_with_some_tests_pass(self):
|
|
'''Multiple reverse dependencies with some tests (pass)'''
|
|
|
|
# add a third reverse dependency to libgreen1 which does not have a test
|
|
self.data.add('mint', False, {'Depends': 'libgreen1'})
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS green 2\n'
|
|
'darkgreen 1 PASS green 2\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['PASS']])
|
|
|
|
def test_binary_from_new_source_package_running(self):
|
|
'''building an existing binary for a new source package (running)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS newgreen 2\n'
|
|
'darkgreen 1 RUNNING newgreen 2\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bnewgreen\b.*\(- to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['RUNNING']])
|
|
|
|
def test_binary_from_new_source_package_fail_always(self):
|
|
'''building an existing binary for a new source package (fail)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS newgreen 2\n'
|
|
'darkgreen 1 FAIL newgreen 2\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bnewgreen\b.*\(- to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']])
|
|
|
|
def test_binary_from_new_source_package_fail_regression(self):
|
|
'''building an existing binary for a new source package (fail)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS newgreen 2\n'
|
|
'darkgreen 1 FAIL newgreen 2\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bnewgreen\b.*\(- to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
|
|
history='darkgreen 1 PASS green 1\n')
|
|
|
|
def test_binary_from_new_source_package_pass(self):
|
|
'''building an existing binary for a new source package (pass)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'})],
|
|
'lightgreen 1 PASS newgreen 2\n'
|
|
'darkgreen 1 PASS newgreen 2\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bnewgreen\b.*\(- to .*>2<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS'],
|
|
'<li>autopkgtest for darkgreen 1: %s' % ADT_EXCUSES_LABELS['PASS']])
|
|
|
|
def test_binary_from_new_source_package_uninst(self):
|
|
'''building an existing binary for a new source package (uninstallable)'''
|
|
|
|
self.do_test(
|
|
[('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6, nosuchpkg'})],
|
|
'darkgreen 1 FAIL newgreen 2\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bnewgreen\b.*\(- to .*>2<',
|
|
'libgreen1/amd64 unsatisfiable Depends: nosuchpkg'],
|
|
# autopkgtest should not be triggered for uninstallable pkg
|
|
['autopkgtest'])
|
|
|
|
@unittest.expectedFailure
|
|
def test_result_from_older_version(self):
|
|
'''test result from older version than the uploaded one'''
|
|
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'green 1.1~alpha PASS green 1.1~beta\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
# it's not entirely clear what precisely it should say here
|
|
'<li>autopkgtest for green 1.1~beta: %s' % ADT_EXCUSES_LABELS['RUNNING']])
|
|
|
|
def test_request_for_installable_fail_regression_promoted(self):
|
|
'''Requests a test for an installable package, test fail, is a regression.
|
|
|
|
This test verifies a bug in britney where a package was promoted if latest test
|
|
appeared before previous result in history, only the last result in
|
|
alphabetic order was taken into account. For example:
|
|
A 1 FAIL B 1
|
|
A 1 PASS A 1
|
|
In this case results for A 1 didn't appear in the list of results
|
|
triggered by the upload of B 1 and B 1 was promoted
|
|
'''
|
|
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'lightgreen 1 FAIL green 1.1~beta\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
|
|
history="lightgreen 1 PASS lightgreen 1"
|
|
)
|
|
|
|
def test_history_always_passed(self):
|
|
'''All the results in history are PASS, and test passed
|
|
|
|
'''
|
|
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'lightgreen 1 PASS green 1.1~beta\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['PASS']],
|
|
history="lightgreen 1 PASS lightgreen 1"
|
|
)
|
|
|
|
def test_history_always_failed(self):
|
|
'''All the results in history are FAIL, test fails. not a regression.
|
|
|
|
'''
|
|
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'lightgreen 1 FAIL green 1.1~beta\n',
|
|
VALID_CANDIDATE,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['ALWAYSFAIL']],
|
|
history="lightgreen 1 FAIL lightgreen 1"
|
|
)
|
|
|
|
def test_history_regression(self):
|
|
'''All the results in history are PASS, test fails. Blocked.
|
|
|
|
'''
|
|
self.do_test(
|
|
[('green', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1'})],
|
|
'lightgreen 1 FAIL green 1.1~beta\n',
|
|
NOT_CONSIDERED,
|
|
[r'\bgreen\b.*>1</a> to .*>1.1~beta<',
|
|
'<li>autopkgtest for lightgreen 1: %s' % ADT_EXCUSES_LABELS['REGRESSION']],
|
|
history="lightgreen 1 PASS lightgreen 1"
|
|
)
|
|
|
|
def shell(self):
|
|
# uninstallable unstable version
|
|
self.data.add('yellow', True, {'Version': '1.1~beta',
|
|
'Depends': 'libc6 (>= 0.9), nosuchpkg'})
|
|
|
|
self.make_adt_britney('yellow 1.1~beta RUNNING yellow 1.1~beta\n',
|
|
'purple 2 FAIL pink 3.0.~britney\n')
|
|
|
|
print('run:\n%s -c %s\n' % (self.britney, self.britney_conf))
|
|
subprocess.call(['bash', '-i'], cwd=self.data.path)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|