mirror of
https://git.launchpad.net/~ubuntu-release/britney/+git/britney2-ubuntu
synced 2025-05-28 02:41:35 +00:00
Autopkgtest: Reorganize pending test maps
- Invert the map to go from triggers to tested packages, instead of the other way around. This is the lookup and update mode that we usually want, which simplifies the code and speeds up lookups. The one exception is for fetching results (as that is per tested source package, not per trigger), but there is a FIXME to get rid of the "triggers" argument completely. - Stop tracking tested package versions. We don't actually care about it anywhere, as the important piece of data is the trigger. - Drop our home-grown pending.txt format and write pending.json instead. ATTENTION: This changes the on-disk cache format for pending tests, so pending.txt needs to be cleaned up manually and any pending tests at the time of upgrading to this revision will be re-run.
This commit is contained in:
parent
b426244840
commit
ba77e95c04
231
autopkgtest.py
231
autopkgtest.py
@ -21,7 +21,6 @@ import time
|
|||||||
import json
|
import json
|
||||||
import tarfile
|
import tarfile
|
||||||
import io
|
import io
|
||||||
import copy
|
|
||||||
import re
|
import re
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
from urllib.request import urlopen
|
from urllib.request import urlopen
|
||||||
@ -59,11 +58,11 @@ class AutoPackageTest(object):
|
|||||||
self.test_state_dir = os.path.join(britney.options.unstable,
|
self.test_state_dir = os.path.join(britney.options.unstable,
|
||||||
'autopkgtest')
|
'autopkgtest')
|
||||||
# map of requested tests from request()
|
# map of requested tests from request()
|
||||||
# src -> ver -> arch -> {(triggering-src1, ver1), ...}
|
# trigger -> src -> [arch]
|
||||||
self.requested_tests = {}
|
self.requested_tests = {}
|
||||||
# same map for tests requested in previous runs
|
# same map for tests requested in previous runs
|
||||||
self.pending_tests = None
|
self.pending_tests = None
|
||||||
self.pending_tests_file = os.path.join(self.test_state_dir, 'pending.txt')
|
self.pending_tests_file = os.path.join(self.test_state_dir, 'pending.json')
|
||||||
|
|
||||||
if not os.path.isdir(self.test_state_dir):
|
if not os.path.isdir(self.test_state_dir):
|
||||||
os.mkdir(self.test_state_dir)
|
os.mkdir(self.test_state_dir)
|
||||||
@ -239,59 +238,41 @@ class AutoPackageTest(object):
|
|||||||
def read_pending_tests(self):
|
def read_pending_tests(self):
|
||||||
'''Read pending test requests from previous britney runs
|
'''Read pending test requests from previous britney runs
|
||||||
|
|
||||||
Read UNSTABLE/autopkgtest/requested.txt with the format:
|
|
||||||
srcpkg srcver triggering-srcpkg triggering-srcver
|
|
||||||
|
|
||||||
Initialize self.pending_tests with that data.
|
Initialize self.pending_tests with that data.
|
||||||
'''
|
'''
|
||||||
assert self.pending_tests is None, 'already initialized'
|
assert self.pending_tests is None, 'already initialized'
|
||||||
self.pending_tests = {}
|
|
||||||
if not os.path.exists(self.pending_tests_file):
|
if not os.path.exists(self.pending_tests_file):
|
||||||
self.log_verbose('No %s, starting with no pending tests' %
|
self.log_verbose('No %s, starting with no pending tests' %
|
||||||
self.pending_tests_file)
|
self.pending_tests_file)
|
||||||
|
self.pending_tests = {}
|
||||||
return
|
return
|
||||||
with open(self.pending_tests_file) as f:
|
with open(self.pending_tests_file) as f:
|
||||||
for l in f:
|
self.pending_tests = json.load(f)
|
||||||
l = l.strip()
|
|
||||||
if not l:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
(src, ver, arch, trigsrc, trigver) = l.split()
|
|
||||||
except ValueError:
|
|
||||||
self.log_error('ignoring malformed line in %s: %s' %
|
|
||||||
(self.pending_tests_file, l))
|
|
||||||
continue
|
|
||||||
self.pending_tests.setdefault(src, {}).setdefault(
|
|
||||||
ver, {}).setdefault(arch, set()).add((trigsrc, trigver))
|
|
||||||
self.log_verbose('Read pending requested tests from %s: %s' %
|
self.log_verbose('Read pending requested tests from %s: %s' %
|
||||||
(self.pending_tests_file, self.pending_tests))
|
(self.pending_tests_file, self.pending_tests))
|
||||||
|
|
||||||
def update_pending_tests(self):
|
def update_pending_tests(self):
|
||||||
'''Update pending tests after submitting requested tests
|
'''Update pending tests after submitting requested tests'''
|
||||||
|
|
||||||
Update UNSTABLE/autopkgtest/requested.txt, see read_pending_tests() for
|
|
||||||
the format.
|
|
||||||
'''
|
|
||||||
# merge requested_tests into pending_tests
|
# merge requested_tests into pending_tests
|
||||||
for src, verinfo in self.requested_tests.items():
|
for trigger, srcmap in self.requested_tests.items():
|
||||||
for ver, archinfo in verinfo.items():
|
for src, archlist in srcmap.items():
|
||||||
for arch, triggers in archinfo.items():
|
try:
|
||||||
self.pending_tests.setdefault(src, {}).setdefault(
|
arches = set(self.pending_tests[trigger][src])
|
||||||
ver, {}).setdefault(arch, set()).update(triggers)
|
except KeyError:
|
||||||
self.requested_tests = {}
|
arches = set()
|
||||||
|
arches.update(archlist)
|
||||||
|
self.pending_tests.setdefault(trigger, {})[src] = sorted(arches)
|
||||||
|
self.requested_tests.clear()
|
||||||
|
|
||||||
# write it
|
# write it
|
||||||
with open(self.pending_tests_file + '.new', 'w') as f:
|
with open(self.pending_tests_file + '.new', 'w') as f:
|
||||||
for src in sorted(self.pending_tests):
|
json.dump(self.pending_tests, f, indent=2)
|
||||||
for ver in sorted(self.pending_tests[src]):
|
|
||||||
for arch in sorted(self.pending_tests[src][ver]):
|
|
||||||
for (trigsrc, trigver) in sorted(self.pending_tests[src][ver][arch]):
|
|
||||||
f.write('%s %s %s %s %s\n' % (src, ver, arch, trigsrc, trigver))
|
|
||||||
os.rename(self.pending_tests_file + '.new', self.pending_tests_file)
|
os.rename(self.pending_tests_file + '.new', self.pending_tests_file)
|
||||||
self.log_verbose('Updated pending requested tests in %s' %
|
self.log_verbose('Updated pending requested tests in %s' %
|
||||||
self.pending_tests_file)
|
self.pending_tests_file)
|
||||||
|
|
||||||
def add_test_request(self, src, ver, arch, trigsrc, trigver):
|
def add_test_request(self, src, arch, trigger):
|
||||||
'''Add one test request to the local self.requested_tests queue
|
'''Add one test request to the local self.requested_tests queue
|
||||||
|
|
||||||
trigger is "pkgname/version" of the package that triggers the testing
|
trigger is "pkgname/version" of the package that triggers the testing
|
||||||
@ -303,26 +284,26 @@ class AutoPackageTest(object):
|
|||||||
'''
|
'''
|
||||||
# Don't re-request if we already have a result
|
# Don't re-request if we already have a result
|
||||||
try:
|
try:
|
||||||
self.test_results[trigsrc + '/' + trigver][src][arch]
|
self.test_results[trigger][src][arch]
|
||||||
self.log_verbose('There already is a result for %s/%s triggered by %s/%s' %
|
self.log_verbose('There already is a result for %s/%s triggered by %s' %
|
||||||
(src, arch, trigsrc, trigver))
|
(src, arch, trigger))
|
||||||
return
|
return
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Don't re-request if it's already pending
|
# Don't re-request if it's already pending
|
||||||
if (trigsrc, trigver) in self.pending_tests.get(src, {}).get(
|
if arch in self.pending_tests.get(trigger, {}).get(src, []):
|
||||||
ver, {}).get(arch, set()):
|
self.log_verbose('test %s/%s for %s is already pending, not queueing' %
|
||||||
self.log_verbose('test %s/%s/%s for %s/%s is already pending, not queueing' %
|
(src, arch, trigger))
|
||||||
(src, ver, arch, trigsrc, trigver))
|
else:
|
||||||
return
|
arch_list = self.requested_tests.setdefault(trigger, {}).setdefault(src, [])
|
||||||
self.requested_tests.setdefault(src, {}).setdefault(
|
assert arch not in arch_list
|
||||||
ver, {}).setdefault(arch, set()).add((trigsrc, trigver))
|
arch_list.append(arch)
|
||||||
|
|
||||||
def fetch_swift_results(self, swift_url, src, arch, triggers):
|
def fetch_swift_results(self, swift_url, src, arch, triggers):
|
||||||
'''Download new results for source package/arch from swift
|
'''Download new results for source package/arch from swift
|
||||||
|
|
||||||
triggers is an iterable of (trigsrc, trigver) for which to check
|
triggers is an iterable of triggers for which to check
|
||||||
results.
|
results.
|
||||||
'''
|
'''
|
||||||
# prepare query: get all runs with a timestamp later than the latest
|
# prepare query: get all runs with a timestamp later than the latest
|
||||||
@ -336,9 +317,9 @@ class AutoPackageTest(object):
|
|||||||
# FIXME: consider dropping "triggers" arg again and iterate over all
|
# FIXME: consider dropping "triggers" arg again and iterate over all
|
||||||
# results, if that's not too slow; cache the results?
|
# results, if that's not too slow; cache the results?
|
||||||
latest_run_id = ''
|
latest_run_id = ''
|
||||||
for trigsrc, trigver in triggers:
|
for trigger in triggers:
|
||||||
try:
|
try:
|
||||||
run_id = self.test_results[trigsrc + '/' + trigver][src][arch][2]
|
run_id = self.test_results[trigger][src][arch][2]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
continue
|
continue
|
||||||
if run_id > latest_run_id:
|
if run_id > latest_run_id:
|
||||||
@ -394,7 +375,7 @@ class AutoPackageTest(object):
|
|||||||
testinfo = json.loads(tar.extractfile('testinfo.json').read().decode())
|
testinfo = json.loads(tar.extractfile('testinfo.json').read().decode())
|
||||||
except (KeyError, ValueError, tarfile.TarError) as e:
|
except (KeyError, ValueError, tarfile.TarError) as e:
|
||||||
self.log_error('%s is damaged, ignoring: %s' % (url, str(e)))
|
self.log_error('%s is damaged, ignoring: %s' % (url, str(e)))
|
||||||
# ignore this; this will leave an orphaned request in pending.txt
|
# ignore this; this will leave an orphaned request in pending.json
|
||||||
# and thus require manual retries after fixing the tmpfail, but we
|
# and thus require manual retries after fixing the tmpfail, but we
|
||||||
# can't just blindly attribute it to some pending test.
|
# can't just blindly attribute it to some pending test.
|
||||||
return
|
return
|
||||||
@ -407,7 +388,7 @@ class AutoPackageTest(object):
|
|||||||
# parse recorded triggers in test result
|
# parse recorded triggers in test result
|
||||||
for e in testinfo.get('custom_environment', []):
|
for e in testinfo.get('custom_environment', []):
|
||||||
if e.startswith('ADT_TEST_TRIGGERS='):
|
if e.startswith('ADT_TEST_TRIGGERS='):
|
||||||
result_triggers = [tuple(i.split('/', 1)) for i in e.split('=', 1)[1].split() if '/' in i]
|
result_triggers = [i for i in e.split('=', 1)[1].split() if '/' in i]
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
self.log_error('%s result has no ADT_TEST_TRIGGERS, ignoring')
|
self.log_error('%s result has no ADT_TEST_TRIGGERS, ignoring')
|
||||||
@ -421,32 +402,30 @@ class AutoPackageTest(object):
|
|||||||
src, ver, arch, stamp, result_triggers, passed and 'pass' or 'fail'))
|
src, ver, arch, stamp, result_triggers, passed and 'pass' or 'fail'))
|
||||||
|
|
||||||
# remove matching test requests
|
# remove matching test requests
|
||||||
for request_map in [self.requested_tests, self.pending_tests]:
|
for trigger in result_triggers:
|
||||||
for pending_ver, pending_archinfo in request_map.get(src, {}).copy().items():
|
for request_map in [self.requested_tests, self.pending_tests]:
|
||||||
# don't consider newer requested versions
|
try:
|
||||||
if apt_pkg.version_compare(pending_ver, ver) > 0:
|
arch_list = request_map[trigger][src]
|
||||||
continue
|
arch_list.remove(arch)
|
||||||
|
if not arch_list:
|
||||||
for result_trigger in result_triggers:
|
del request_map[trigger][src]
|
||||||
try:
|
if not request_map[trigger]:
|
||||||
request_map[src][pending_ver][arch].remove(result_trigger)
|
del request_map[trigger]
|
||||||
self.log_verbose('-> matches pending request %s/%s/%s for trigger %s' %
|
self.log_verbose('-> matches pending request %s/%s for trigger %s' % (src, arch, trigger))
|
||||||
(src, pending_ver, arch, str(result_trigger)))
|
except (KeyError, ValueError):
|
||||||
except (KeyError, ValueError):
|
self.log_verbose('-> does not match any pending request for %s/%s' % (src, arch))
|
||||||
self.log_verbose('-> does not match any pending request for %s/%s/%s' %
|
|
||||||
(src, pending_ver, arch))
|
|
||||||
|
|
||||||
# add this result
|
# add this result
|
||||||
for (trigsrc, trigver) in result_triggers:
|
for trigger in result_triggers:
|
||||||
# If a test runs because of its own package (newer version), ensure
|
# If a test runs because of its own package (newer version), ensure
|
||||||
# that we got a new enough version; FIXME: this should be done more
|
# that we got a new enough version; FIXME: this should be done more
|
||||||
# generically by matching against testpkg-versions
|
# generically by matching against testpkg-versions
|
||||||
|
(trigsrc, trigver) = trigger.split('/', 1)
|
||||||
if trigsrc == src and apt_pkg.version_compare(ver, trigver) < 0:
|
if trigsrc == src and apt_pkg.version_compare(ver, trigver) < 0:
|
||||||
self.log_error('test trigger %s/%s, but run for older version %s, ignoring' %
|
self.log_error('test trigger %s, but run for older version %s, ignoring' % (trigger, ver))
|
||||||
(trigsrc, trigver, ver))
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
result = self.test_results.setdefault(trigsrc + '/' + trigver, {}).setdefault(
|
result = self.test_results.setdefault(trigger, {}).setdefault(
|
||||||
src, {}).setdefault(arch, [False, None, ''])
|
src, {}).setdefault(arch, [False, None, ''])
|
||||||
|
|
||||||
# don't clobber existing passed results with failures from re-runs
|
# don't clobber existing passed results with failures from re-runs
|
||||||
@ -455,11 +434,11 @@ class AutoPackageTest(object):
|
|||||||
if stamp > result[2]:
|
if stamp > result[2]:
|
||||||
result[2] = stamp
|
result[2] = stamp
|
||||||
|
|
||||||
def failed_tests_for_trigger(self, trigsrc, trigver):
|
def failed_tests_for_trigger(self, trigger):
|
||||||
'''Return (src, arch) set for failed tests for given trigger pkg'''
|
'''Return (src, arch) set for failed tests for given trigger'''
|
||||||
|
|
||||||
failed = set()
|
failed = set()
|
||||||
for src, srcinfo in self.test_results.get(trigsrc + '/' + trigver, {}).items():
|
for src, srcinfo in self.test_results.get(trigger, {}).items():
|
||||||
for arch, result in srcinfo.items():
|
for arch, result in srcinfo.items():
|
||||||
if not result[0]:
|
if not result[0]:
|
||||||
failed.add((src, arch))
|
failed.add((src, arch))
|
||||||
@ -490,56 +469,27 @@ class AutoPackageTest(object):
|
|||||||
for src, ver in packages:
|
for src, ver in packages:
|
||||||
for arch in self.britney.options.adt_arches:
|
for arch in self.britney.options.adt_arches:
|
||||||
for (testsrc, testver) in self.tests_for_source(src, ver, arch):
|
for (testsrc, testver) in self.tests_for_source(src, ver, arch):
|
||||||
self.add_test_request(testsrc, testver, arch, src, ver)
|
self.add_test_request(testsrc, arch, src + '/' + ver)
|
||||||
|
|
||||||
if self.britney.options.verbose:
|
if self.britney.options.verbose:
|
||||||
for src, verinfo in self.requested_tests.items():
|
for trigger, srcmap in self.requested_tests.items():
|
||||||
for ver, archinfo in verinfo.items():
|
for src, archlist in srcmap.items():
|
||||||
for arch, triggers in archinfo.items():
|
self.log_verbose('Requesting %s autopkgtest on %s to verify %s' %
|
||||||
self.log_verbose('Requesting %s/%s/%s autopkgtest to verify %s' %
|
(src, ' '.join(archlist), trigger))
|
||||||
(src, ver, arch, ', '.join(['%s/%s' % i for i in triggers])))
|
|
||||||
|
|
||||||
def submit(self):
|
def submit(self):
|
||||||
|
|
||||||
def _arches(verinfo):
|
|
||||||
res = set()
|
|
||||||
for archinfo in verinfo.values():
|
|
||||||
res.update(archinfo.keys())
|
|
||||||
return res
|
|
||||||
|
|
||||||
def _trigsources(verinfo, arch):
|
|
||||||
'''Calculate the triggers for a given verinfo map
|
|
||||||
|
|
||||||
verinfo is ver -> arch -> {(triggering-src1, ver1), ...}, i. e. an
|
|
||||||
entry of self.requested_tests[arch]
|
|
||||||
|
|
||||||
Return {trigger1, ...}) set.
|
|
||||||
'''
|
|
||||||
triggers = set()
|
|
||||||
for archinfo in verinfo.values():
|
|
||||||
for (t, v) in archinfo.get(arch, []):
|
|
||||||
triggers.add(t + '/' + v)
|
|
||||||
return triggers
|
|
||||||
|
|
||||||
# build per-queue request strings for new test requests
|
# build per-queue request strings for new test requests
|
||||||
# TODO: Once we support version constraints in AMQP requests, add them
|
# TODO: Once we support version constraints in AMQP requests, add them
|
||||||
# arch → (queue_name, [(pkg, params), ...])
|
# queue_name -> [(pkg, params), ...])
|
||||||
arch_queues = {}
|
queues = {}
|
||||||
for arch in self.britney.options.adt_arches:
|
for trigger, srcmap in self.requested_tests.items():
|
||||||
requests = []
|
params = {'triggers': [trigger]}
|
||||||
for pkg, verinfo in self.requested_tests.items():
|
if self.britney.options.adt_ppas:
|
||||||
if arch in _arches(verinfo):
|
params['ppas'] = self.britney.options.adt_ppas
|
||||||
# if a package gets triggered by several sources, we can
|
for src, archlist in srcmap.items():
|
||||||
# run just one test for all triggers; but for proposed
|
for arch in archlist:
|
||||||
# kernels we want to run a separate test for each, so that
|
qname = 'debci-%s-%s' % (self.series, arch)
|
||||||
# the test runs under that particular kernel
|
queues.setdefault(qname, []).append((src, json.dumps(params)))
|
||||||
triggers = _trigsources(verinfo, arch)
|
|
||||||
for t in sorted(triggers):
|
|
||||||
params = {'triggers': [t]}
|
|
||||||
if self.britney.options.adt_ppas:
|
|
||||||
params['ppas'] = self.britney.options.adt_ppas
|
|
||||||
requests.append((pkg, json.dumps(params)))
|
|
||||||
arch_queues[arch] = ('debci-%s-%s' % (self.series, arch), requests)
|
|
||||||
|
|
||||||
amqp_url = self.britney.options.adt_amqp
|
amqp_url = self.britney.options.adt_amqp
|
||||||
|
|
||||||
@ -549,14 +499,14 @@ class AutoPackageTest(object):
|
|||||||
with amqp.Connection(creds.hostname, userid=creds.username,
|
with amqp.Connection(creds.hostname, userid=creds.username,
|
||||||
password=creds.password) as amqp_con:
|
password=creds.password) as amqp_con:
|
||||||
with amqp_con.channel() as ch:
|
with amqp_con.channel() as ch:
|
||||||
for arch, (queue, requests) in arch_queues.items():
|
for queue, requests in queues.items():
|
||||||
for (pkg, params) in requests:
|
for (pkg, params) in requests:
|
||||||
ch.basic_publish(amqp.Message(pkg + '\n' + params),
|
ch.basic_publish(amqp.Message(pkg + '\n' + params),
|
||||||
routing_key=queue)
|
routing_key=queue)
|
||||||
elif amqp_url.startswith('file://'):
|
elif amqp_url.startswith('file://'):
|
||||||
# in testing mode, adt_amqp will be a file:// URL
|
# in testing mode, adt_amqp will be a file:// URL
|
||||||
with open(amqp_url[7:], 'a') as f:
|
with open(amqp_url[7:], 'a') as f:
|
||||||
for arch, (queue, requests) in arch_queues.items():
|
for queue, requests in queues.items():
|
||||||
for (pkg, params) in requests:
|
for (pkg, params) in requests:
|
||||||
f.write('%s:%s %s\n' % (queue, pkg, params))
|
f.write('%s:%s %s\n' % (queue, pkg, params))
|
||||||
else:
|
else:
|
||||||
@ -574,28 +524,42 @@ class AutoPackageTest(object):
|
|||||||
happens when you have to blow away results.cache and let it rebuild
|
happens when you have to blow away results.cache and let it rebuild
|
||||||
from scratch.
|
from scratch.
|
||||||
'''
|
'''
|
||||||
for pkg, verinfo in copy.deepcopy(self.requested_tests).items():
|
# build src -> arch -> triggers inverted map
|
||||||
for archinfo in verinfo.values():
|
requests_by_src = {}
|
||||||
for arch, triggers in archinfo.items():
|
for trigger, srcmap in self.requested_tests.items():
|
||||||
self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch, triggers)
|
for src, archlist in srcmap.items():
|
||||||
|
for arch in archlist:
|
||||||
|
requests_by_src.setdefault(src, {}).setdefault(arch, set()).add(trigger)
|
||||||
|
|
||||||
|
for src, archmap in requests_by_src.items():
|
||||||
|
for arch, triggers in archmap.items():
|
||||||
|
self.fetch_swift_results(self.britney.options.adt_swift_url, src, arch, triggers)
|
||||||
|
|
||||||
def collect(self, packages):
|
def collect(self, packages):
|
||||||
'''Update results from swift for all pending packages
|
'''Update results from swift for all pending packages
|
||||||
|
|
||||||
Remove pending tests for which we have results.
|
Remove pending tests for which we have results.
|
||||||
'''
|
'''
|
||||||
for pkg, verinfo in copy.deepcopy(self.pending_tests).items():
|
# build src -> arch -> triggers inverted map
|
||||||
for archinfo in verinfo.values():
|
requests_by_src = {}
|
||||||
for arch, triggers in archinfo.items():
|
for trigger, srcmap in self.pending_tests.items():
|
||||||
self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch, triggers)
|
for src, archlist in srcmap.items():
|
||||||
|
for arch in archlist:
|
||||||
|
requests_by_src.setdefault(src, {}).setdefault(arch, set()).add(trigger)
|
||||||
|
|
||||||
|
for src, archmap in requests_by_src.items():
|
||||||
|
for arch, triggers in archmap.items():
|
||||||
|
self.fetch_swift_results(self.britney.options.adt_swift_url, src, arch, triggers)
|
||||||
|
|
||||||
# also update results for excuses whose tests failed, in case a
|
# also update results for excuses whose tests failed, in case a
|
||||||
# manual retry worked
|
# manual retry worked
|
||||||
for (trigpkg, trigver) in packages:
|
for (trigpkg, trigver) in packages:
|
||||||
for (pkg, arch) in self.failed_tests_for_trigger(trigpkg, trigver):
|
trigger = trigpkg + '/' + trigver
|
||||||
if arch not in self.pending_tests.get(trigpkg, {}).get(trigver, {}):
|
for (src, arch) in self.failed_tests_for_trigger(trigger):
|
||||||
self.log_verbose('Checking for new results for failed %s on %s for trigger %s/%s' %
|
if arch not in self.pending_tests.get(trigger, {}).get(src, []):
|
||||||
(pkg, arch, trigpkg, trigver))
|
self.log_verbose('Checking for new results for failed %s on %s for trigger %s' %
|
||||||
self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch, [(trigpkg, trigver)])
|
(src, arch, trigger))
|
||||||
|
self.fetch_swift_results(self.britney.options.adt_swift_url, src, arch, [trigger])
|
||||||
|
|
||||||
# update the results cache
|
# update the results cache
|
||||||
with open(self.results_cache_file + '.new', 'w') as f:
|
with open(self.results_cache_file + '.new', 'w') as f:
|
||||||
@ -639,11 +603,8 @@ class AutoPackageTest(object):
|
|||||||
result = ever_passed and 'REGRESSION' or 'ALWAYSFAIL'
|
result = ever_passed and 'REGRESSION' or 'ALWAYSFAIL'
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# no result for testsrc/arch; still running?
|
# no result for testsrc/arch; still running?
|
||||||
result = None
|
if arch in self.pending_tests.get(trigger, {}).get(testsrc, []):
|
||||||
for arch_map in self.pending_tests.get(testsrc, {}).values():
|
result = ever_passed and 'RUNNING' or 'RUNNING-ALWAYSFAIL'
|
||||||
if (trigsrc, trigver) in arch_map.get(arch, set()):
|
|
||||||
result = ever_passed and 'RUNNING' or 'RUNNING-ALWAYSFAIL'
|
|
||||||
break
|
|
||||||
else:
|
else:
|
||||||
# ignore if adt or swift results are disabled,
|
# ignore if adt or swift results are disabled,
|
||||||
# otherwise this is unexpected
|
# otherwise this is unexpected
|
||||||
|
@ -6,9 +6,6 @@
|
|||||||
# the Free Software Foundation; either version 2 of the License, or
|
# the Free Software Foundation; either version 2 of the License, or
|
||||||
# (at your option) any later version.
|
# (at your option) any later version.
|
||||||
|
|
||||||
from textwrap import dedent
|
|
||||||
|
|
||||||
import apt_pkg
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import fileinput
|
import fileinput
|
||||||
@ -16,6 +13,7 @@ import unittest
|
|||||||
import json
|
import json
|
||||||
import pprint
|
import pprint
|
||||||
|
|
||||||
|
import apt_pkg
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
@ -140,8 +138,8 @@ class TestAutoPkgTest(TestBase):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(os.path.join(self.data.path, 'data/series-proposed/autopkgtest/pending.txt')) as f:
|
with open(os.path.join(self.data.path, 'data/series-proposed/autopkgtest/pending.json')) as f:
|
||||||
self.pending_requests = f.read()
|
self.pending_requests = json.load(f)
|
||||||
except IOError:
|
except IOError:
|
||||||
self.pending_requests = None
|
self.pending_requests = None
|
||||||
|
|
||||||
@ -168,7 +166,7 @@ class TestAutoPkgTest(TestBase):
|
|||||||
# autopkgtest should not be triggered for uninstallable pkg
|
# autopkgtest should not be triggered for uninstallable pkg
|
||||||
self.assertEqual(exc['lightgreen']['tests'], {})
|
self.assertEqual(exc['lightgreen']['tests'], {})
|
||||||
|
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
|
|
||||||
def test_no_wait_for_always_failed_test(self):
|
def test_no_wait_for_always_failed_test(self):
|
||||||
@ -184,7 +182,7 @@ class TestAutoPkgTest(TestBase):
|
|||||||
[('darkgreen', {'Version': '2'}, 'autopkgtest')],
|
[('darkgreen', {'Version': '2'}, 'autopkgtest')],
|
||||||
{'darkgreen': (True, {'darkgreen 2': {'i386': 'RUNNING-ALWAYSFAIL',
|
{'darkgreen': (True, {'darkgreen 2': {'i386': 'RUNNING-ALWAYSFAIL',
|
||||||
'amd64': 'RUNNING-ALWAYSFAIL'}})}
|
'amd64': 'RUNNING-ALWAYSFAIL'}})}
|
||||||
)[1]
|
)[1]
|
||||||
|
|
||||||
# the test should still be triggered though
|
# the test should still be triggered though
|
||||||
self.assertEqual(exc['darkgreen']['tests'], {'autopkgtest':
|
self.assertEqual(exc['darkgreen']['tests'], {'autopkgtest':
|
||||||
@ -194,17 +192,13 @@ class TestAutoPkgTest(TestBase):
|
|||||||
'i386': ['RUNNING-ALWAYSFAIL',
|
'i386': ['RUNNING-ALWAYSFAIL',
|
||||||
'http://autopkgtest.ubuntu.com/packages/d/darkgreen/series/i386']}}})
|
'http://autopkgtest.ubuntu.com/packages/d/darkgreen/series/i386']}}})
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(self.pending_requests,
|
||||||
self.pending_requests, dedent('''\
|
{'darkgreen/2': {'darkgreen': ['amd64', 'i386']}})
|
||||||
darkgreen 2 amd64 darkgreen 2
|
|
||||||
darkgreen 2 i386 darkgreen 2
|
|
||||||
'''))
|
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.amqp_requests,
|
self.amqp_requests,
|
||||||
set(['debci-series-amd64:darkgreen {"triggers": ["darkgreen/2"]}',
|
set(['debci-series-amd64:darkgreen {"triggers": ["darkgreen/2"]}',
|
||||||
'debci-series-i386:darkgreen {"triggers": ["darkgreen/2"]}']))
|
'debci-series-i386:darkgreen {"triggers": ["darkgreen/2"]}']))
|
||||||
|
|
||||||
|
|
||||||
def test_multi_rdepends_with_tests_all_running(self):
|
def test_multi_rdepends_with_tests_all_running(self):
|
||||||
'''Multiple reverse dependencies with tests (all running)'''
|
'''Multiple reverse dependencies with tests (all running)'''
|
||||||
@ -235,13 +229,9 @@ class TestAutoPkgTest(TestBase):
|
|||||||
'debci-series-amd64:darkgreen {"triggers": ["green/2"]}']))
|
'debci-series-amd64:darkgreen {"triggers": ["green/2"]}']))
|
||||||
|
|
||||||
# ... and that they get recorded as pending
|
# ... and that they get recorded as pending
|
||||||
expected_pending = '''darkgreen 1 amd64 green 2
|
expected_pending = {'green/2': {'darkgreen': ['amd64', 'i386'],
|
||||||
darkgreen 1 i386 green 2
|
'green': ['amd64', 'i386'],
|
||||||
green 2 amd64 green 2
|
'lightgreen': ['amd64', 'i386']}}
|
||||||
green 2 i386 green 2
|
|
||||||
lightgreen 1 amd64 green 2
|
|
||||||
lightgreen 1 i386 green 2
|
|
||||||
'''
|
|
||||||
self.assertEqual(self.pending_requests, expected_pending)
|
self.assertEqual(self.pending_requests, expected_pending)
|
||||||
|
|
||||||
# if we run britney again this should *not* trigger any new tests
|
# if we run britney again this should *not* trigger any new tests
|
||||||
@ -293,7 +283,7 @@ lightgreen 1 i386 green 2
|
|||||||
)[0]
|
)[0]
|
||||||
|
|
||||||
# all tests ran, there should be no more pending ones
|
# all tests ran, there should be no more pending ones
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# not expecting any failures to retrieve from swift
|
# not expecting any failures to retrieve from swift
|
||||||
self.assertNotIn('Failure', out, out)
|
self.assertNotIn('Failure', out, out)
|
||||||
@ -318,7 +308,7 @@ lightgreen 1 i386 green 2
|
|||||||
})
|
})
|
||||||
})[0]
|
})[0]
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
self.assertNotIn('Failure', out, out)
|
self.assertNotIn('Failure', out, out)
|
||||||
|
|
||||||
def test_multi_rdepends_with_tests_mixed(self):
|
def test_multi_rdepends_with_tests_mixed(self):
|
||||||
@ -363,8 +353,8 @@ lightgreen 1 i386 green 2
|
|||||||
self.assertNotIn('Failure', out, out)
|
self.assertNotIn('Failure', out, out)
|
||||||
|
|
||||||
# there should be some pending ones
|
# there should be some pending ones
|
||||||
self.assertIn('darkgreen 1 amd64 green 2', self.pending_requests)
|
self.assertEqual(self.pending_requests,
|
||||||
self.assertIn('lightgreen 1 i386 green 2', self.pending_requests)
|
{'green/2': {'darkgreen': ['amd64'], 'lightgreen': ['i386']}})
|
||||||
|
|
||||||
def test_results_without_triggers(self):
|
def test_results_without_triggers(self):
|
||||||
'''Old results without recorded triggers'''
|
'''Old results without recorded triggers'''
|
||||||
@ -388,9 +378,10 @@ lightgreen 1 i386 green 2
|
|||||||
})
|
})
|
||||||
|
|
||||||
# there should be some pending ones
|
# there should be some pending ones
|
||||||
self.assertIn('darkgreen 1 amd64 green 2', self.pending_requests)
|
self.assertEqual(self.pending_requests,
|
||||||
self.assertIn('lightgreen 1 i386 green 2', self.pending_requests)
|
{'green/2': {'lightgreen': ['amd64', 'i386'],
|
||||||
self.assertIn('green 2 i386 green 2', self.pending_requests)
|
'green': ['amd64', 'i386'],
|
||||||
|
'darkgreen': ['amd64', 'i386']}})
|
||||||
|
|
||||||
def test_multi_rdepends_with_tests_regression(self):
|
def test_multi_rdepends_with_tests_regression(self):
|
||||||
'''Multiple reverse dependencies with tests (regression)'''
|
'''Multiple reverse dependencies with tests (regression)'''
|
||||||
@ -420,7 +411,7 @@ lightgreen 1 i386 green 2
|
|||||||
# we already had all results before the run, so this should not trigger
|
# we already had all results before the run, so this should not trigger
|
||||||
# any new requests
|
# any new requests
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# not expecting any failures to retrieve from swift
|
# not expecting any failures to retrieve from swift
|
||||||
self.assertNotIn('Failure', out, out)
|
self.assertNotIn('Failure', out, out)
|
||||||
@ -451,7 +442,7 @@ lightgreen 1 i386 green 2
|
|||||||
{'green': [('old-version', '1'), ('new-version', '2')]}
|
{'green': [('old-version', '1'), ('new-version', '2')]}
|
||||||
)[0]
|
)[0]
|
||||||
|
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
# not expecting any failures to retrieve from swift
|
# not expecting any failures to retrieve from swift
|
||||||
self.assertNotIn('Failure', out, out)
|
self.assertNotIn('Failure', out, out)
|
||||||
|
|
||||||
@ -480,7 +471,7 @@ lightgreen 1 i386 green 2
|
|||||||
{'green': [('old-version', '1'), ('new-version', '2')]}
|
{'green': [('old-version', '1'), ('new-version', '2')]}
|
||||||
)[0]
|
)[0]
|
||||||
|
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
# not expecting any failures to retrieve from swift
|
# not expecting any failures to retrieve from swift
|
||||||
self.assertNotIn('Failure', out, out)
|
self.assertNotIn('Failure', out, out)
|
||||||
|
|
||||||
@ -516,8 +507,11 @@ lightgreen 1 i386 green 2
|
|||||||
'debci-series-amd64:darkgreen {"triggers": ["green/2"]}',
|
'debci-series-amd64:darkgreen {"triggers": ["green/2"]}',
|
||||||
'debci-series-amd64:green64 {"triggers": ["green/2"]}']))
|
'debci-series-amd64:green64 {"triggers": ["green/2"]}']))
|
||||||
|
|
||||||
self.assertIn('green64 1 amd64', self.pending_requests)
|
self.assertEqual(self.pending_requests,
|
||||||
self.assertNotIn('green64 1 i386', self.pending_requests)
|
{'green/2': {'lightgreen': ['amd64', 'i386'],
|
||||||
|
'darkgreen': ['amd64', 'i386'],
|
||||||
|
'green64': ['amd64'],
|
||||||
|
'green': ['amd64', 'i386']}})
|
||||||
|
|
||||||
# second run collects the results
|
# second run collects the results
|
||||||
self.swift.set_results({'autopkgtest-series': {
|
self.swift.set_results({'autopkgtest-series': {
|
||||||
@ -548,7 +542,7 @@ lightgreen 1 i386 green 2
|
|||||||
|
|
||||||
# all tests ran, there should be no more pending ones
|
# all tests ran, there should be no more pending ones
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# not expecting any failures to retrieve from swift
|
# not expecting any failures to retrieve from swift
|
||||||
self.assertNotIn('Failure', out, out)
|
self.assertNotIn('Failure', out, out)
|
||||||
@ -603,12 +597,12 @@ lightgreen 1 i386 green 2
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# next run should not trigger any new requests
|
# next run should not trigger any new requests
|
||||||
self.do_test([], {'green': (False, {}), 'lightgreen': (False, {})})
|
self.do_test([], {'green': (False, {}), 'lightgreen': (False, {})})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# now lightgreen 2 gets built, should trigger a new test run
|
# now lightgreen 2 gets built, should trigger a new test run
|
||||||
self.data.remove_all(True)
|
self.data.remove_all(True)
|
||||||
@ -639,7 +633,7 @@ lightgreen 1 i386 green 2
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
def test_rdepends_unbuilt_unstable_only(self):
|
def test_rdepends_unbuilt_unstable_only(self):
|
||||||
'''Unbuilt reverse dependency which is not in testing'''
|
'''Unbuilt reverse dependency which is not in testing'''
|
||||||
@ -713,7 +707,7 @@ lightgreen 1 i386 green 2
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# lightgreen 2 stays unbuilt in britney, but we get a test result for it
|
# lightgreen 2 stays unbuilt in britney, but we get a test result for it
|
||||||
self.swift.set_results({'autopkgtest-series': {
|
self.swift.set_results({'autopkgtest-series': {
|
||||||
@ -734,12 +728,12 @@ lightgreen 1 i386 green 2
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# next run should not trigger any new requests
|
# next run should not trigger any new requests
|
||||||
self.do_test([], {'green': (True, {}), 'lightgreen': (False, {})})
|
self.do_test([], {'green': (True, {}), 'lightgreen': (False, {})})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
def test_rdepends_unbuilt_new_version_fail(self):
|
def test_rdepends_unbuilt_new_version_fail(self):
|
||||||
'''Unbuilt reverse dependency gets failure for newer version'''
|
'''Unbuilt reverse dependency gets failure for newer version'''
|
||||||
@ -790,11 +784,11 @@ lightgreen 1 i386 green 2
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# next run should not trigger any new requests
|
# next run should not trigger any new requests
|
||||||
self.do_test([], {'green': (False, {}), 'lightgreen': (False, {})})
|
self.do_test([], {'green': (False, {}), 'lightgreen': (False, {})})
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
|
|
||||||
def test_package_pair_running(self):
|
def test_package_pair_running(self):
|
||||||
@ -832,16 +826,11 @@ lightgreen 1 i386 green 2
|
|||||||
'debci-series-amd64:darkgreen {"triggers": ["green/2"]}']))
|
'debci-series-amd64:darkgreen {"triggers": ["green/2"]}']))
|
||||||
|
|
||||||
# ... and that they get recorded as pending
|
# ... and that they get recorded as pending
|
||||||
expected_pending = '''darkgreen 1 amd64 green 2
|
self.assertEqual(self.pending_requests,
|
||||||
darkgreen 1 i386 green 2
|
{'lightgreen/2': {'lightgreen': ['amd64', 'i386']},
|
||||||
green 2 amd64 green 2
|
'green/2': {'darkgreen': ['amd64', 'i386'],
|
||||||
green 2 i386 green 2
|
'green': ['amd64', 'i386'],
|
||||||
lightgreen 2 amd64 green 2
|
'lightgreen': ['amd64', 'i386']}})
|
||||||
lightgreen 2 amd64 lightgreen 2
|
|
||||||
lightgreen 2 i386 green 2
|
|
||||||
lightgreen 2 i386 lightgreen 2
|
|
||||||
'''
|
|
||||||
self.assertEqual(self.pending_requests, expected_pending)
|
|
||||||
|
|
||||||
def test_binary_from_new_source_package_running(self):
|
def test_binary_from_new_source_package_running(self):
|
||||||
'''building an existing binary for a new source package (running)'''
|
'''building an existing binary for a new source package (running)'''
|
||||||
@ -856,16 +845,11 @@ lightgreen 2 i386 lightgreen 2
|
|||||||
{'newgreen': [('old-version', '-'), ('new-version', '2')]})
|
{'newgreen': [('old-version', '-'), ('new-version', '2')]})
|
||||||
|
|
||||||
self.assertEqual(len(self.amqp_requests), 8)
|
self.assertEqual(len(self.amqp_requests), 8)
|
||||||
expected_pending = '''darkgreen 1 amd64 newgreen 2
|
self.assertEqual(self.pending_requests,
|
||||||
darkgreen 1 i386 newgreen 2
|
{'newgreen/2': {'darkgreen': ['amd64', 'i386'],
|
||||||
green 1 amd64 newgreen 2
|
'green': ['amd64', 'i386'],
|
||||||
green 1 i386 newgreen 2
|
'lightgreen': ['amd64', 'i386'],
|
||||||
lightgreen 1 amd64 newgreen 2
|
'newgreen': ['amd64', 'i386']}})
|
||||||
lightgreen 1 i386 newgreen 2
|
|
||||||
newgreen 2 amd64 newgreen 2
|
|
||||||
newgreen 2 i386 newgreen 2
|
|
||||||
'''
|
|
||||||
self.assertEqual(self.pending_requests, expected_pending)
|
|
||||||
|
|
||||||
def test_binary_from_new_source_package_pass(self):
|
def test_binary_from_new_source_package_pass(self):
|
||||||
'''building an existing binary for a new source package (pass)'''
|
'''building an existing binary for a new source package (pass)'''
|
||||||
@ -892,7 +876,7 @@ newgreen 2 i386 newgreen 2
|
|||||||
{'newgreen': [('old-version', '-'), ('new-version', '2')]})
|
{'newgreen': [('old-version', '-'), ('new-version', '2')]})
|
||||||
|
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
def test_result_from_older_version(self):
|
def test_result_from_older_version(self):
|
||||||
'''test result from older version than the uploaded one'''
|
'''test result from older version than the uploaded one'''
|
||||||
@ -911,7 +895,7 @@ newgreen 2 i386 newgreen 2
|
|||||||
set(['debci-series-i386:darkgreen {"triggers": ["darkgreen/2"]}',
|
set(['debci-series-i386:darkgreen {"triggers": ["darkgreen/2"]}',
|
||||||
'debci-series-amd64:darkgreen {"triggers": ["darkgreen/2"]}']))
|
'debci-series-amd64:darkgreen {"triggers": ["darkgreen/2"]}']))
|
||||||
self.assertEqual(self.pending_requests,
|
self.assertEqual(self.pending_requests,
|
||||||
'darkgreen 2 amd64 darkgreen 2\ndarkgreen 2 i386 darkgreen 2\n')
|
{'darkgreen/2': {'darkgreen': ['amd64', 'i386']}})
|
||||||
|
|
||||||
# second run gets the results for darkgreen 2
|
# second run gets the results for darkgreen 2
|
||||||
self.swift.set_results({'autopkgtest-series': {
|
self.swift.set_results({'autopkgtest-series': {
|
||||||
@ -922,7 +906,7 @@ newgreen 2 i386 newgreen 2
|
|||||||
[],
|
[],
|
||||||
{'darkgreen': (True, {'darkgreen 2': {'amd64': 'PASS', 'i386': 'PASS'}})})
|
{'darkgreen': (True, {'darkgreen 2': {'amd64': 'PASS', 'i386': 'PASS'}})})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# next run sees a newer darkgreen, should re-run tests
|
# next run sees a newer darkgreen, should re-run tests
|
||||||
self.data.remove_all(True)
|
self.data.remove_all(True)
|
||||||
@ -934,7 +918,7 @@ newgreen 2 i386 newgreen 2
|
|||||||
set(['debci-series-i386:darkgreen {"triggers": ["darkgreen/3"]}',
|
set(['debci-series-i386:darkgreen {"triggers": ["darkgreen/3"]}',
|
||||||
'debci-series-amd64:darkgreen {"triggers": ["darkgreen/3"]}']))
|
'debci-series-amd64:darkgreen {"triggers": ["darkgreen/3"]}']))
|
||||||
self.assertEqual(self.pending_requests,
|
self.assertEqual(self.pending_requests,
|
||||||
'darkgreen 3 amd64 darkgreen 3\ndarkgreen 3 i386 darkgreen 3\n')
|
{'darkgreen/3': {'darkgreen': ['amd64', 'i386']}})
|
||||||
|
|
||||||
def test_old_result_from_rdep_version(self):
|
def test_old_result_from_rdep_version(self):
|
||||||
'''re-runs reverse dependency test on new versions'''
|
'''re-runs reverse dependency test on new versions'''
|
||||||
@ -959,7 +943,7 @@ newgreen 2 i386 newgreen 2
|
|||||||
})
|
})
|
||||||
|
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
self.data.remove_all(True)
|
self.data.remove_all(True)
|
||||||
|
|
||||||
# second run: new version re-triggers all tests
|
# second run: new version re-triggers all tests
|
||||||
@ -972,15 +956,10 @@ newgreen 2 i386 newgreen 2
|
|||||||
})
|
})
|
||||||
|
|
||||||
self.assertEqual(len(self.amqp_requests), 6)
|
self.assertEqual(len(self.amqp_requests), 6)
|
||||||
|
self.assertEqual(self.pending_requests,
|
||||||
expected_pending = '''darkgreen 1 amd64 green 3
|
{'green/3': {'darkgreen': ['amd64', 'i386'],
|
||||||
darkgreen 1 i386 green 3
|
'green': ['amd64', 'i386'],
|
||||||
green 3 amd64 green 3
|
'lightgreen': ['amd64', 'i386']}})
|
||||||
green 3 i386 green 3
|
|
||||||
lightgreen 1 amd64 green 3
|
|
||||||
lightgreen 1 i386 green 3
|
|
||||||
'''
|
|
||||||
self.assertEqual(self.pending_requests, expected_pending)
|
|
||||||
|
|
||||||
# third run gets the results for green and lightgreen, darkgreen is
|
# third run gets the results for green and lightgreen, darkgreen is
|
||||||
# still running
|
# still running
|
||||||
@ -999,7 +978,7 @@ lightgreen 1 i386 green 3
|
|||||||
})
|
})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests,
|
self.assertEqual(self.pending_requests,
|
||||||
'darkgreen 1 amd64 green 3\ndarkgreen 1 i386 green 3\n')
|
{'green/3': {'darkgreen': ['amd64', 'i386']}})
|
||||||
|
|
||||||
# fourth run finally gets the new darkgreen result
|
# fourth run finally gets the new darkgreen result
|
||||||
self.swift.set_results({'autopkgtest-series': {
|
self.swift.set_results({'autopkgtest-series': {
|
||||||
@ -1014,7 +993,7 @@ lightgreen 1 i386 green 3
|
|||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
def test_tmpfail(self):
|
def test_tmpfail(self):
|
||||||
'''tmpfail results'''
|
'''tmpfail results'''
|
||||||
@ -1030,7 +1009,8 @@ lightgreen 1 i386 green 3
|
|||||||
self.do_test(
|
self.do_test(
|
||||||
[('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 1)'}, 'autopkgtest')],
|
[('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 1)'}, 'autopkgtest')],
|
||||||
{'lightgreen': (False, {'lightgreen 2': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}})})
|
{'lightgreen': (False, {'lightgreen 2': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}})})
|
||||||
self.assertEqual(self.pending_requests, 'lightgreen 2 i386 lightgreen 2\n')
|
self.assertEqual(self.pending_requests,
|
||||||
|
{'lightgreen/2': {'lightgreen': ['i386']}})
|
||||||
|
|
||||||
# one more tmpfail result, should not confuse britney with None version
|
# one more tmpfail result, should not confuse britney with None version
|
||||||
self.swift.set_results({'autopkgtest-series': {
|
self.swift.set_results({'autopkgtest-series': {
|
||||||
@ -1068,7 +1048,7 @@ lightgreen 1 i386 green 3
|
|||||||
'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'},
|
'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'},
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# re-running test manually succeeded (note: darkgreen result should be
|
# re-running test manually succeeded (note: darkgreen result should be
|
||||||
# cached already)
|
# cached already)
|
||||||
@ -1085,7 +1065,7 @@ lightgreen 1 i386 green 3
|
|||||||
'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'},
|
'darkgreen 1': {'amd64': 'PASS', 'i386': 'PASS'},
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
def test_new_runs_dont_clobber_pass(self):
|
def test_new_runs_dont_clobber_pass(self):
|
||||||
'''passing once is sufficient
|
'''passing once is sufficient
|
||||||
@ -1104,7 +1084,7 @@ lightgreen 1 i386 green 3
|
|||||||
self.do_test(
|
self.do_test(
|
||||||
[('libc6', {'Version': '2'}, None)],
|
[('libc6', {'Version': '2'}, None)],
|
||||||
{'libc6': (True, {'green 1': {'amd64': 'PASS', 'i386': 'PASS'}})})
|
{'libc6': (True, {'green 1': {'amd64': 'PASS', 'i386': 'PASS'}})})
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
|
|
||||||
# new green fails; that's not libc6's fault though, so it should stay
|
# new green fails; that's not libc6's fault though, so it should stay
|
||||||
# valid
|
# valid
|
||||||
@ -1148,7 +1128,7 @@ lightgreen 1 i386 green 3
|
|||||||
'lightgreen 2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'},
|
'lightgreen 2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'},
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
|
|
||||||
# remove new lightgreen by resetting archive indexes, and re-adding
|
# remove new lightgreen by resetting archive indexes, and re-adding
|
||||||
@ -1171,7 +1151,7 @@ lightgreen 1 i386 green 3
|
|||||||
self.assertNotIn('lightgreen 2', exc['green']['tests']['autopkgtest'])
|
self.assertNotIn('lightgreen 2', exc['green']['tests']['autopkgtest'])
|
||||||
|
|
||||||
# should not trigger new requests
|
# should not trigger new requests
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
|
|
||||||
# but the next run should not trigger anything new
|
# but the next run should not trigger anything new
|
||||||
@ -1181,7 +1161,7 @@ lightgreen 1 i386 green 3
|
|||||||
'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'},
|
'lightgreen 1': {'amd64': 'PASS', 'i386': 'PASS'},
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
self.assertEqual(self.pending_requests, '')
|
self.assertEqual(self.pending_requests, {})
|
||||||
self.assertEqual(self.amqp_requests, set())
|
self.assertEqual(self.amqp_requests, set())
|
||||||
|
|
||||||
def test_multiarch_dep(self):
|
def test_multiarch_dep(self):
|
||||||
@ -1350,13 +1330,10 @@ lightgreen 1 i386 green 3
|
|||||||
'debci-series-amd64:fancy {"triggers": ["linux-meta-64only/1"]}']))
|
'debci-series-amd64:fancy {"triggers": ["linux-meta-64only/1"]}']))
|
||||||
|
|
||||||
# ... and that they get recorded as pending
|
# ... and that they get recorded as pending
|
||||||
expected_pending = '''fancy 1 amd64 linux-meta 1
|
self.assertEqual(self.pending_requests,
|
||||||
fancy 1 amd64 linux-meta-64only 1
|
{'linux-meta-lts-grumpy/1': {'fancy': ['amd64', 'i386']},
|
||||||
fancy 1 amd64 linux-meta-lts-grumpy 1
|
'linux-meta/1': {'fancy': ['amd64', 'i386']},
|
||||||
fancy 1 i386 linux-meta 1
|
'linux-meta-64only/1': {'fancy': ['amd64']}})
|
||||||
fancy 1 i386 linux-meta-lts-grumpy 1
|
|
||||||
'''
|
|
||||||
self.assertEqual(self.pending_requests, expected_pending)
|
|
||||||
|
|
||||||
def test_dkms_results_per_kernel(self):
|
def test_dkms_results_per_kernel(self):
|
||||||
'''DKMS results get mapped to the triggering kernel version'''
|
'''DKMS results get mapped to the triggering kernel version'''
|
||||||
@ -1384,7 +1361,8 @@ fancy 1 i386 linux-meta-lts-grumpy 1
|
|||||||
'linux-meta-64only': (True, {'fancy 1': {'amd64': 'PASS'}}),
|
'linux-meta-64only': (True, {'fancy 1': {'amd64': 'PASS'}}),
|
||||||
})
|
})
|
||||||
|
|
||||||
self.assertEqual(self.pending_requests, 'fancy 1 amd64 linux-meta-lts-grumpy 1\n')
|
self.assertEqual(self.pending_requests,
|
||||||
|
{'linux-meta-lts-grumpy/1': {'fancy': ['amd64']}})
|
||||||
|
|
||||||
def test_dkms_results_per_kernel_old_results(self):
|
def test_dkms_results_per_kernel_old_results(self):
|
||||||
'''DKMS results get mapped to the triggering kernel version, old results'''
|
'''DKMS results get mapped to the triggering kernel version, old results'''
|
||||||
@ -1417,7 +1395,7 @@ fancy 1 i386 linux-meta-lts-grumpy 1
|
|||||||
})
|
})
|
||||||
|
|
||||||
self.assertEqual(self.pending_requests,
|
self.assertEqual(self.pending_requests,
|
||||||
'fancy 1 amd64 linux-meta-lts-grumpy 1\n')
|
{'linux-meta-lts-grumpy/1': {'fancy': ['amd64']}})
|
||||||
|
|
||||||
def test_kernel_triggered_tests(self):
|
def test_kernel_triggered_tests(self):
|
||||||
'''linux, lxc, glibc tests get triggered by linux-meta* uploads'''
|
'''linux, lxc, glibc tests get triggered by linux-meta* uploads'''
|
||||||
@ -1506,7 +1484,6 @@ fancy 1 i386 linux-meta-lts-grumpy 1
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
################################################################
|
################################################################
|
||||||
# Tests for special-cased packages
|
# Tests for special-cased packages
|
||||||
################################################################
|
################################################################
|
||||||
|
Loading…
x
Reference in New Issue
Block a user