Add autopkgtest integration

Add new module autopkgtest.py with the logic for determining the tests for a
source package, requesting tests via AMQP, fetching results from swift, and
keeping track of pending tests between run. This also caches the downloaded
results from swift, as re-dowloading them all is very expensive.

Integrate this into britney.py:

 * In should_upgrade_src(), check whether a package is built everywhere and
   installable (run_autopkgtest), and good enough to run autopkgtests for it
and its reverse dependencies.

 * In write_excuses(), generate test requests for all excuses and create blocks
   for those that cause test regresssions.

This introduces two new hints:

 * force-badtest pkg/ver: Failing results for that package will be ignored.
   This is useful to deal with broken tests that get imported from Debian or
   are from under-maintained packages, or broke due to some infrastructure
   changes. These are long-lived usually.

 * force-skiptest pkg/ver: Test results *triggered by* that package (i. e.
   reverse dependencies) will be ignored. This is mostly useful for landing
   packages that trigger a huge amount of tests (glibc, perl) where some tests
   are just too flaky to get them all passing, and one just wants to land it
   after the remaining failures have been checked. This should be used rarely
   and the hints should be removed immediately again.

Add integration tests that call britney in various scenarios on constructed
fake archives, with mocked AMQP and Swift results.
pre-rebase-2016-10-25
Martin Pitt 9 years ago
parent d99e769988
commit 943621deb2

@ -0,0 +1,669 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2013 - 2015 Canonical Ltd.
# Authors:
# Colin Watson <cjwatson@ubuntu.com>
# Jean-Baptiste Lallement <jean-baptiste.lallement@canonical.com>
# Martin Pitt <martin.pitt@ubuntu.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import time
import json
import tarfile
import io
import re
import sys
import urllib.parse
from urllib.request import urlopen
import apt_pkg
import amqplib.client_0_8 as amqp
from consts import VERSION
def srchash(src):
'''archive hash prefix for source package'''
if src.startswith('lib'):
return src[:4]
else:
return src[0]
class AutoPackageTest(object):
"""autopkgtest integration
Look for autopkgtest jobs to run for each update that is otherwise a
valid candidate, and collect the results. If an update causes any
autopkgtest jobs to be run, then they must all pass before the update is
accepted.
"""
def __init__(self, britney, distribution, series, debug=False):
self.britney = britney
self.distribution = distribution
self.series = series
self.debug = debug
self.excludes = set()
self.test_state_dir = os.path.join(britney.options.unstable,
'autopkgtest')
# tests requested in this and previous runs
# trigger -> src -> [arch]
self.pending_tests = None
self.pending_tests_file = os.path.join(self.test_state_dir, 'pending.json')
if not os.path.isdir(self.test_state_dir):
os.mkdir(self.test_state_dir)
self.read_pending_tests()
# results map: trigger -> src -> arch -> [passed, version, run_id]
# - trigger is "source/version" of an unstable package that triggered
# this test run.
# - "passed" is a bool
# - "version" is the package version of "src" of that test
# - "run_id" is an opaque ID that identifies a particular test run for
# a given src/arch. It's usually a time stamp like "20150120_125959".
# This is also used for tracking the latest seen time stamp for
# requesting only newer results.
self.test_results = {}
if self.britney.options.adt_shared_results_cache:
self.results_cache_file = self.britney.options.adt_shared_results_cache
else:
self.results_cache_file = os.path.join(self.test_state_dir, 'results.cache')
self.swift_container = 'autopkgtest-' + self.series
if self.britney.options.adt_ppas:
self.swift_container += '-' + self.britney.options.adt_ppas[-1].replace('/', '-')
# read the cached results that we collected so far
if os.path.exists(self.results_cache_file):
with open(self.results_cache_file) as f:
self.test_results = json.load(f)
self.log_verbose('Read previous results from %s' % self.results_cache_file)
else:
self.log_verbose('%s does not exist, re-downloading all results '
'from swift' % self.results_cache_file)
self.setup_amqp()
def setup_amqp(self):
'''Initialize AMQP connection'''
self.amqp_channel = None
self.amqp_file = None
if self.britney.options.dry_run:
# in dry-run mode, don't issue any requests
return
amqp_url = self.britney.options.adt_amqp
if amqp_url.startswith('amqp://'):
# in production mode, connect to AMQP server
creds = urllib.parse.urlsplit(amqp_url, allow_fragments=False)
self.amqp_con = amqp.Connection(creds.hostname, userid=creds.username,
password=creds.password)
self.amqp_channel = self.amqp_con.channel()
self.log_verbose('Connected to AMQP server')
elif amqp_url.startswith('file://'):
# in testing mode, adt_amqp will be a file:// URL
self.amqp_file = amqp_url[7:]
else:
raise RuntimeError('Unknown ADT_AMQP schema %s' % amqp_url.split(':', 1)[0])
def log_verbose(self, msg):
if self.britney.options.verbose:
print('I: [%s] - %s' % (time.asctime(), msg))
def log_error(self, msg):
print('E: [%s] - %s' % (time.asctime(), msg))
@classmethod
def has_autodep8(kls, srcinfo, binaries):
'''Check if package is covered by autodep8
srcinfo is an item from self.britney.sources
binaries is self.britney.binaries['unstable'][arch][0]
'''
# autodep8?
for t in srcinfo.testsuite:
if t.startswith('autopkgtest-pkg'):
return True
# DKMS: some binary depends on "dkms"
for pkg_id in srcinfo.binaries:
try:
bininfo = binaries[pkg_id.package_name]
except KeyError:
continue
if 'dkms' in (bininfo.depends or ''):
return True
return False
def tests_for_source(self, src, ver, arch):
'''Iterate over all tests that should be run for given source and arch'''
sources_info = self.britney.sources['testing']
binaries_info = self.britney.binaries['testing'][arch][0]
reported_pkgs = set()
tests = []
# hack for vivid's gccgo-5 and xenial's gccgo-6; these build libgcc1
# too, so test some Go and some libgcc1 consumers
if src in ['gccgo-5', 'gccgo-6']:
for test in ['juju-mongodb', 'mongodb', 'libreoffice']:
try:
tests.append((test, self.britney.sources['testing'][test][VERSION]))
except KeyError:
# no package in that series? *shrug*, then not (mostly for testing)
pass
return tests
# gcc-N triggers tons of tests via libgcc1, but this is mostly in vain:
# gcc already tests itself during build, and it is being used from
# -proposed, so holding it back on a dozen unrelated test failures
# serves no purpose. Just check some key packages which actually use
# gcc during the test, and libreoffice as an example for a libgcc user.
if src.startswith('gcc-'):
if re.match('gcc-\d$', src):
for test in ['binutils', 'fglrx-installer', 'libreoffice', 'linux']:
try:
tests.append((test, self.britney.sources['testing'][test][VERSION]))
except KeyError:
# no package in that series? *shrug*, then not (mostly for testing)
pass
return tests
else:
# for other compilers such as gcc-snapshot etc. we don't need
# to trigger anything
return []
# for linux themselves we don't want to trigger tests -- these should
# all come from linux-meta*. A new kernel ABI without a corresponding
# -meta won't be installed and thus we can't sensibly run tests against
# it.
if src.startswith('linux') and src.replace('linux', 'linux-meta') in self.britney.sources['testing']:
return []
# we want to test the package itself, if it still has a test in unstable
srcinfo = self.britney.sources['unstable'][src]
if 'autopkgtest' in srcinfo.testsuite or self.has_autodep8(srcinfo, binaries_info):
reported_pkgs.add(src)
tests.append((src, ver))
extra_bins = []
# Hack: For new kernels trigger all DKMS packages by pretending that
# linux-meta* builds a "dkms" binary as well. With that we ensure that we
# don't regress DKMS drivers with new kernel versions.
if src.startswith('linux-meta'):
# does this have any image on this arch?
for pkg_id in srcinfo.binaries:
if pkg_id.architecture == arch and '-image' in pkg_id.package_name:
try:
extra_bins.append(binaries_info['dkms'].pkg_id)
except KeyError:
pass
# plus all direct reverse dependencies and test triggers of its
# binaries which have an autopkgtest
for binary in srcinfo.binaries + extra_bins:
rdeps = self.britney._inst_tester.reverse_dependencies_of(binary)
for rdep in rdeps:
try:
rdep_src = binaries_info[rdep.package_name].source
except KeyError:
self.log_verbose('%s on %s has no source (NBS?)' % (rdep.package_name, arch))
continue
# if rdep_src/unstable is known to be not built yet or
# uninstallable, try to run tests against testing; if that
# works, then the unstable src does not break the testing
# rdep_src and is fine
if rdep_src in self.excludes:
try:
rdep_src_info = self.britney.sources['testing'][rdep_src]
self.log_verbose('Reverse dependency %s of %s/%s is unbuilt or uninstallable, running test against testing version %s' %
(rdep_src, src, ver, rdep_src_info[VERSION]))
except KeyError:
self.log_verbose('Reverse dependency %s of %s/%s is unbuilt or uninstallable and not present in testing, ignoring' %
(rdep_src, src, ver))
continue
else:
rdep_src_info = sources_info[rdep_src]
if 'autopkgtest' in rdep_src_info.testsuite or self.has_autodep8(rdep_src_info, binaries_info):
if rdep_src not in reported_pkgs:
tests.append((rdep_src, rdep_src_info[VERSION]))
reported_pkgs.add(rdep_src)
for tdep_src in self.britney.testsuite_triggers.get(binary.package_name, set()):
if tdep_src not in reported_pkgs:
try:
tdep_src_info = sources_info[tdep_src]
except KeyError:
continue
if 'autopkgtest' in tdep_src_info.testsuite or self.has_autodep8(tdep_src_info, binaries_info):
tests.append((tdep_src, tdep_src_info[VERSION]))
reported_pkgs.add(tdep_src)
# Hardcode linux-meta → linux, lxc, glibc, systemd triggers until we get a more flexible
# implementation: https://bugs.debian.org/779559
if src.startswith('linux-meta'):
for pkg in ['lxc', 'lxd', 'glibc', src.replace('linux-meta', 'linux'), 'systemd']:
if pkg not in reported_pkgs:
# does this have any image on this arch?
for pkg_id in srcinfo.binaries:
if pkg_id.architecture == arch and '-image' in pkg_id.package_name:
try:
tests.append((pkg, self.britney.sources['unstable'][pkg][VERSION]))
except KeyError:
try:
tests.append((pkg, self.britney.sources['testing'][pkg][VERSION]))
except KeyError:
# package not in that series? *shrug*, then not
pass
break
tests.sort(key=lambda s_v: s_v[0])
return tests
#
# AMQP/cloud interface helpers
#
def read_pending_tests(self):
'''Read pending test requests from previous britney runs
Initialize self.pending_tests with that data.
'''
assert self.pending_tests is None, 'already initialized'
if not os.path.exists(self.pending_tests_file):
self.log_verbose('No %s, starting with no pending tests' %
self.pending_tests_file)
self.pending_tests = {}
return
with open(self.pending_tests_file) as f:
self.pending_tests = json.load(f)
self.log_verbose('Read pending requested tests from %s: %s' %
(self.pending_tests_file, self.pending_tests))
def latest_run_for_package(self, src, arch):
'''Return latest run ID for src on arch'''
# this requires iterating over all triggers and thus is expensive;
# cache the results
try:
return self.latest_run_for_package._cache[src][arch]
except KeyError:
pass
latest_run_id = ''
for srcmap in self.test_results.values():
try:
run_id = srcmap[src][arch][2]
except KeyError:
continue
if run_id > latest_run_id:
latest_run_id = run_id
self.latest_run_for_package._cache.setdefault(src, {})[arch] = latest_run_id
return latest_run_id
latest_run_for_package._cache = {}
def fetch_swift_results(self, swift_url, src, arch):
'''Download new results for source package/arch from swift'''
# Download results for one particular src/arch at most once in every
# run, as this is expensive
done_entry = src + '/' + arch
if done_entry in self.fetch_swift_results._done:
return
self.fetch_swift_results._done.add(done_entry)
# prepare query: get all runs with a timestamp later than the latest
# run_id for this package/arch; '@' is at the end of each run id, to
# mark the end of a test run directory path
# example: <autopkgtest-wily>wily/amd64/libp/libpng/20150630_054517@/result.tar
query = {'delimiter': '@',
'prefix': '%s/%s/%s/%s/' % (self.series, arch, srchash(src), src)}
# determine latest run_id from results
if not self.britney.options.adt_shared_results_cache:
latest_run_id = self.latest_run_for_package(src, arch)
if latest_run_id:
query['marker'] = query['prefix'] + latest_run_id
# request new results from swift
url = os.path.join(swift_url, self.swift_container)
url += '?' + urllib.parse.urlencode(query)
try:
f = urlopen(url, timeout=30)
if f.getcode() == 200:
result_paths = f.read().decode().strip().splitlines()
elif f.getcode() == 204: # No content
result_paths = []
else:
# we should not ever end up here as we expect a HTTPError in
# other cases; e. g. 3XX is something that tells us to adjust
# our URLS, so fail hard on those
raise NotImplementedError('fetch_swift_results(%s): cannot handle HTTP code %i' %
(url, f.getcode()))
f.close()
except IOError as e:
# 401 "Unauthorized" is swift's way of saying "container does not exist"
if hasattr(e, 'code') and e.code == 401:
self.log_verbose('fetch_swift_results: %s does not exist yet or is inaccessible' % url)
return
# Other status codes are usually a transient
# network/infrastructure failure. Ignoring this can lead to
# re-requesting tests which we already have results for, so
# fail hard on this and let the next run retry.
self.log_error('FATAL: Failure to fetch swift results from %s: %s' % (url, str(e)))
sys.exit(1)
for p in result_paths:
self.fetch_one_result(
os.path.join(swift_url, self.swift_container, p, 'result.tar'), src, arch)
fetch_swift_results._done = set()
def fetch_one_result(self, url, src, arch):
'''Download one result URL for source/arch
Remove matching pending_tests entries.
'''
try:
f = urlopen(url, timeout=30)
if f.getcode() == 200:
tar_bytes = io.BytesIO(f.read())
f.close()
else:
raise NotImplementedError('fetch_one_result(%s): cannot handle HTTP code %i' %
(url, f.getcode()))
except IOError as e:
self.log_error('Failure to fetch %s: %s' % (url, str(e)))
# we tolerate "not found" (something went wrong on uploading the
# result), but other things indicate infrastructure problems
if hasattr(e, 'code') and e.code == 404:
return
sys.exit(1)
try:
with tarfile.open(None, 'r', tar_bytes) as tar:
exitcode = int(tar.extractfile('exitcode').read().strip())
srcver = tar.extractfile('testpkg-version').read().decode().strip()
(ressrc, ver) = srcver.split()
testinfo = json.loads(tar.extractfile('testinfo.json').read().decode())
except (KeyError, ValueError, tarfile.TarError) as e:
self.log_error('%s is damaged, ignoring: %s' % (url, str(e)))
# ignore this; this will leave an orphaned request in pending.json
# and thus require manual retries after fixing the tmpfail, but we
# can't just blindly attribute it to some pending test.
return
if src != ressrc:
self.log_error('%s is a result for package %s, but expected package %s' %
(url, ressrc, src))
return
# parse recorded triggers in test result
for e in testinfo.get('custom_environment', []):
if e.startswith('ADT_TEST_TRIGGERS='):
result_triggers = [i for i in e.split('=', 1)[1].split() if '/' in i]
break
else:
self.log_error('%s result has no ADT_TEST_TRIGGERS, ignoring')
return
stamp = os.path.basename(os.path.dirname(url))
# allow some skipped tests, but nothing else
passed = exitcode in [0, 2]
self.log_verbose('Fetched test result for %s/%s/%s %s (triggers: %s): %s' % (
src, ver, arch, stamp, result_triggers, passed and 'pass' or 'fail'))
# remove matching test requests
for trigger in result_triggers:
try:
arch_list = self.pending_tests[trigger][src]
arch_list.remove(arch)
if not arch_list:
del self.pending_tests[trigger][src]
if not self.pending_tests[trigger]:
del self.pending_tests[trigger]
self.log_verbose('-> matches pending request %s/%s for trigger %s' % (src, arch, trigger))
except (KeyError, ValueError):
self.log_verbose('-> does not match any pending request for %s/%s' % (src, arch))
# add this result
for trigger in result_triggers:
# If a test runs because of its own package (newer version), ensure
# that we got a new enough version; FIXME: this should be done more
# generically by matching against testpkg-versions
(trigsrc, trigver) = trigger.split('/', 1)
if trigsrc == src and apt_pkg.version_compare(ver, trigver) < 0:
self.log_error('test trigger %s, but run for older version %s, ignoring' % (trigger, ver))
continue
result = self.test_results.setdefault(trigger, {}).setdefault(
src, {}).setdefault(arch, [False, None, ''])
# don't clobber existing passed results with failures from re-runs
if passed or not result[0]:
result[0] = passed
result[1] = ver
result[2] = stamp
def send_test_request(self, src, arch, trigger):
'''Send out AMQP request for testing src/arch for trigger'''
if self.britney.options.dry_run:
return
params = {'triggers': [trigger]}
if self.britney.options.adt_ppas:
params['ppas'] = self.britney.options.adt_ppas
qname = 'debci-ppa-%s-%s' % (self.series, arch)
else:
qname = 'debci-%s-%s' % (self.series, arch)
params = json.dumps(params)
if self.amqp_channel:
self.amqp_channel.basic_publish(amqp.Message(src + '\n' + params), routing_key=qname)
else:
assert self.amqp_file
with open(self.amqp_file, 'a') as f:
f.write('%s:%s %s\n' % (qname, src, params))
def pkg_test_request(self, src, arch, trigger):
'''Request one package test for one particular trigger
trigger is "pkgname/version" of the package that triggers the testing
of src.
This will only be done if that test wasn't already requested in a
previous run (i. e. not already in self.pending_tests) or there already
is a result for it. This ensures to download current results for this
package before requesting any test.
'''
# Don't re-request if we already have a result
try:
passed = self.test_results[trigger][src][arch][0]
if passed:
self.log_verbose('%s/%s triggered by %s already passed' % (src, arch, trigger))
return
self.log_verbose('Checking for new results for failed %s/%s for trigger %s' %
(src, arch, trigger))
raise KeyError # fall through
except KeyError:
self.fetch_swift_results(self.britney.options.adt_swift_url, src, arch)
# do we have one now?
try:
self.test_results[trigger][src][arch]
return
except KeyError:
pass
# Don't re-request if it's already pending
arch_list = self.pending_tests.setdefault(trigger, {}).setdefault(src, [])
if arch in arch_list:
self.log_verbose('Test %s/%s for %s is already pending, not queueing' %
(src, arch, trigger))
else:
self.log_verbose('Requesting %s autopkgtest on %s to verify %s' %
(src, arch, trigger))
arch_list.append(arch)
arch_list.sort()
self.send_test_request(src, arch, trigger)
def check_ever_passed(self, src, arch):
'''Check if tests for src ever passed on arch'''
# FIXME: add caching
for srcmap in self.test_results.values():
try:
if srcmap[src][arch][0]:
return True
except KeyError:
pass
return False
def has_force_badtest(self, src, ver, arch):
'''Check if src/ver/arch has a force-badtest hint'''
hints = self.britney.hints.search('force-badtest', package=src)
hints.extend(self.britney.hints.search('force', package=src))
if hints:
self.log_verbose('Checking hints for %s/%s/%s: %s' % (src, ver, arch, [str(h) for h in hints]))
for hint in hints:
if [mi for mi in hint.packages if mi.architecture in ['source', arch] and
(mi.version == 'all' or apt_pkg.version_compare(ver, mi.version) <= 0)]:
return True
return False
#
# Public API
#
def request(self, packages, excludes=None):
'''Request test runs for verifying packages
"packages" is a list of (trigsrc, trigver) pairs with the packages in
unstable (the "triggers") that need to be tested against their reverse
dependencies (and also their own tests).
"excludes" is an iterable of packages that britney determined to be
uninstallable.
'''
if excludes:
self.excludes.update(excludes)
self.log_verbose('Requesting autopkgtests for %s, exclusions: %s' %
(['%s/%s' % i for i in packages], str(self.excludes)))
for src, ver in packages:
for arch in self.britney.options.adt_arches:
for (testsrc, _) in self.tests_for_source(src, ver, arch):
self.pkg_test_request(testsrc, arch, src + '/' + ver)
# update the results on-disk cache, unless we are using a r/o shared one
if not self.britney.options.adt_shared_results_cache:
self.log_verbose('Updating results cache')
with open(self.results_cache_file + '.new', 'w') as f:
json.dump(self.test_results, f, indent=2)
os.rename(self.results_cache_file + '.new', self.results_cache_file)
# update the pending tests on-disk cache
self.log_verbose('Updated pending requested tests in %s' % self.pending_tests_file)
with open(self.pending_tests_file + '.new', 'w') as f:
json.dump(self.pending_tests, f, indent=2)
os.rename(self.pending_tests_file + '.new', self.pending_tests_file)
def results(self, trigsrc, trigver):
'''Return test results for triggering package
Return (passed, src, ver, arch ->
(ALWAYSFAIL|PASS|REGRESSION|IGNORE-FAIL|RUNNING|RUNNING-ALWAYSFAIL, log_url))
iterable for all package tests that got triggered by trigsrc/trigver.
ver is None if tests are still running on all architectures, otherwise
the actually tested version of src.
'''
# (src, ver) -> arch -> ALWAYSFAIL|PASS|REGRESSION|RUNNING|RUNNING-ALWAYSFAIL
pkg_arch_result = {}
trigger = trigsrc + '/' + trigver
for arch in self.britney.options.adt_arches:
for testsrc, testver in self.tests_for_source(trigsrc, trigver, arch):
ever_passed = self.check_ever_passed(testsrc, arch)
url = None
# Do we have a result already? (possibly for an older or newer
# version, that's okay)
try:
r = self.test_results[trigger][testsrc][arch]
testver = r[1]
run_id = r[2]
if r[0]:
result = 'PASS'
else:
# Special-case triggers from linux-meta*: we cannot compare
# results against different kernels, as e. g. a DKMS module
# might work against the default kernel but fail against a
# different flavor; so for those, ignore the "ever
# passed" check; FIXME: check against trigsrc only
if trigsrc.startswith('linux-meta') or trigsrc == 'linux':
ever_passed = False
if ever_passed:
if self.has_force_badtest(testsrc, testver, arch):
result = 'IGNORE-FAIL'
else:
result = 'REGRESSION'
else:
result = 'ALWAYSFAIL'
url = os.path.join(self.britney.options.adt_swift_url,
self.swift_container,
self.series,
arch,
srchash(testsrc),
testsrc,
run_id,
'log.gz')
except KeyError:
# no result for testsrc/arch; still running?
if arch in self.pending_tests.get(trigger, {}).get(testsrc, []):
if ever_passed and not self.has_force_badtest(testsrc, testver, arch):
result = 'RUNNING'
else:
result = 'RUNNING-ALWAYSFAIL'
url = 'http://autopkgtest.ubuntu.com/running'
else:
# ignore if adt or swift results are disabled,
# otherwise this is unexpected
if not hasattr(self.britney.options, 'adt_swift_url'):
continue
raise RuntimeError('Result for %s/%s/%s (triggered by %s) is neither known nor pending!' %
(testsrc, testver, arch, trigger))
pkg_arch_result.setdefault((testsrc, testver), {})[arch] = (result, url)
for ((testsrc, testver), arch_results) in pkg_arch_result.items():
r = set([v[0] for v in arch_results.values()])
passed = 'REGRESSION' not in r and 'RUNNING' not in r
# skip version if still running on all arches
if not r - {'RUNNING', 'RUNNING-ALWAYSFAIL'}:
testver = None
yield (passed, testsrc, testver, arch_results)

@ -81,3 +81,16 @@ SMOOTH_UPDATES = badgers
IGNORE_CRUFT = 0
REMOVE_OBSOLETE = no
ADT_ENABLE = yes
ADT_DEBUG = no
ADT_ARCHES = amd64 i386 armhf ppc64el
ADT_AMQP = amqp://test_request:password@162.213.33.228
# Swift base URL with the results (must be publicly readable and browsable)
ADT_SWIFT_URL = https://objectstorage.prodstack4-5.canonical.com/v1/AUTH_77e2ada1e7a84929a74ba3b87153c0ac
# space separate list of PPAs to add for test requests and for polling results;
# the *last* one determines the swift container name
ADT_PPAS =
# set this to the path of a (r/o) results.cache for running many parallel
# britney instances for PPAs without updating the cache
ADT_SHARED_RESULTS_CACHE =

@ -196,7 +196,7 @@ from functools import reduce
from itertools import product
from operator import attrgetter
from urllib.parse import quote
from urllib.parse import quote, urlencode
from installability.builder import InstallabilityTesterBuilder
from excuse import Excuse
@ -217,6 +217,7 @@ from policies.policy import AgePolicy, RCBugPolicy, LPBlockBugPolicy, PolicyVerd
# Check the "check_field_name" reflection before removing an import here.
from consts import (SOURCE, SOURCEVER, ARCHITECTURE, CONFLICTS, DEPENDS,
PROVIDES, MULTIARCH, MULTIVERSE)
from autopkgtest import AutoPackageTest, srchash
__author__ = 'Fabio Tranchitella and the Debian Release Team'
__version__ = '2.0'
@ -238,16 +239,17 @@ check_fields = sorted(check_field_name)
class SourcePackage(object):
__slots__ = ['version', 'section', 'binaries', 'maintainer', 'is_fakesrc',
'testsuite']
'testsuite', 'testsuite_triggers']
def __init__(self, version, section, binaries, maintainer, is_fakesrc,
testsuite):
testsuite, testsuite_triggers):
self.version = version
self.section = section
self.binaries = binaries
self.maintainer = maintainer
self.is_fakesrc = is_fakesrc
self.testsuite = testsuite
self.testsuite_triggers = testsuite_triggers
def __getitem__(self, item):
return getattr(self, self.__slots__[item])
@ -287,7 +289,7 @@ class Britney(object):
HINTS_HELPERS = ("easy", "hint", "remove", "block", "block-udeb", "unblock", "unblock-udeb", "approve", "remark")
HINTS_STANDARD = ("urgent", "age-days") + HINTS_HELPERS
# ALL = {"force", "force-hint", "block-all"} | HINTS_STANDARD | registered policy hints (not covered above)
HINTS_ALL = ('ALL')
HINTS_ALL = ('ALL', "force-badtest", "force-skiptest")
def __init__(self):
"""Class constructor
@ -338,6 +340,14 @@ class Britney(object):
self.binaries['tpu'] = {}
self.binaries['pu'] = {}
# compute inverse Testsuite-Triggers: map, unifying all series
self.log('Building inverse testsuite_triggers map')
self.testsuite_triggers = {}
for suitemap in self.sources.values():
for src, data in suitemap.items():
for trigger in data.testsuite_triggers:
self.testsuite_triggers.setdefault(trigger, set()).add(src)
self.binaries['unstable'] = self.read_binaries(self.options.unstable, "unstable", self.options.architectures)
for suite in ('tpu', 'pu'):
if hasattr(self.options, suite):
@ -529,6 +539,19 @@ class Britney(object):
self.options.ignore_cruft == "0":
self.options.ignore_cruft = False
# restrict adt_arches to architectures we actually run for
adt_arches = []
for arch in self.options.adt_arches.split():
if arch in self.options.architectures:
adt_arches.append(arch)
else:
self.log("Ignoring ADT_ARCHES %s as it is not in architectures list" % arch)
self.options.adt_arches = adt_arches
try:
self.options.adt_ppas = self.options.adt_ppas.strip().split()
except AttributeError:
self.options.adt_ppas = []
self.policies.append(AgePolicy(self.options, MINDAYS))
self.policies.append(RCBugPolicy(self.options))
self.policies.append(LPBlockBugPolicy(self.options))
@ -590,6 +613,7 @@ class Britney(object):
None,
True,
[],
[],
)
self.sources['testing'][pkg_name] = src_data
@ -665,6 +689,7 @@ class Britney(object):
None,
True,
[],
[],
)
self.sources['testing'][pkg_name] = src_data
self.sources['unstable'][pkg_name] = src_data
@ -847,6 +872,7 @@ class Britney(object):
maint,
False,
get_field('Testsuite', '').split(),
get_field('Testsuite-Triggers', '').replace(',', '').split(),
)
return sources
@ -996,7 +1022,7 @@ class Britney(object):
srcdist[source].binaries.append(pkg_id)
# if the source package doesn't exist, create a fake one
else:
srcdist[source] = SourcePackage(source_version, 'faux', [pkg_id], None, True, [])
srcdist[source] = SourcePackage(source_version, 'faux', [pkg_id], None, True, [], [])
# add the resulting dictionary to the package list
packages[pkg] = dpkg
@ -1173,7 +1199,7 @@ class Britney(object):
hints = self._hint_parser.hints
for x in ["block", "block-all", "block-udeb", "unblock", "unblock-udeb", "force", "urgent", "remove", "age-days"]:
for x in ["block", "block-all", "block-udeb", "unblock", "unblock-udeb", "force", "force-badtest", "force-skiptest", "urgent", "remove", "age-days"]:
z = {}
for hint in hints[x]:
package = hint.package
@ -1191,7 +1217,7 @@ class Britney(object):
self.log("Ignoring %s[%s] = ('%s', '%s'), ('%s', '%s') is higher or equal" %
(x, package, hint.version, hint.user, hint2.version, hint2.user), type="W")
hint.set_active(False)
else:
elif x not in ["force-badtest", "force-skiptest"]:
self.log("Overriding %s[%s] = ('%s', '%s', '%s') with ('%s', '%s', '%s')" %
(x, package, hint2.version, hint2.user, hint2.days,
hint.version, hint.user, hint.days), type="W")
@ -1572,7 +1598,7 @@ class Britney(object):
source_u.section and excuse.set_section(source_u.section)
excuse.set_distribution(self.options.distribution)
# the starting point is that we will update the candidate
# the starting point is that we will update the candidate and run autopkgtests
update_candidate = True
# if the version in unstable is older, then stop here with a warning in the excuse and return False
@ -1981,6 +2007,76 @@ class Britney(object):
# extract the not considered packages, which are in the excuses but not in upgrade_me
unconsidered = [ename for ename in excuses if ename not in upgrade_me]
if getattr(self.options, "adt_enable", "no") == "yes" and \
self.options.series:
# trigger autopkgtests for valid candidates
adt_debug = getattr(self.options, "adt_debug", "no") == "yes"
autopkgtest = AutoPackageTest(
self, self.options.distribution, self.options.series,
debug=adt_debug)
autopkgtest_packages = []
autopkgtest_excuses = []
autopkgtest_excludes = []
for e in self.excuses.values():
# we still want to run tests if the only invalid reason is that
# it's blocked
if not e.is_valid and (list(e.reason) != ['block'] or e.missing_builds):
autopkgtest_excludes.append(e.name)
continue
# skip removals, binary-only candidates, and proposed-updates
if e.name.startswith("-") or "/" in e.name or "_" in e.name:
continue
if e.ver[1] == "-":
continue
autopkgtest_excuses.append(e)
autopkgtest_packages.append((e.name, e.ver[1]))
autopkgtest.request(autopkgtest_packages, autopkgtest_excludes)
cloud_url = "http://autopkgtest.ubuntu.com/packages/%(h)s/%(s)s/%(r)s/%(a)s"
for e in autopkgtest_excuses:
adtpass = True
for passed, adtsrc, adtver, arch_status in autopkgtest.results(
e.name, e.ver[1]):
for arch, (status, log_url) in arch_status.items():
kwargs = {}
if self.options.adt_ppas:
if log_url.endswith('log.gz'):
kwargs['artifact_url'] = log_url.replace('log.gz', 'artifacts.tar.gz')
else:
kwargs['history_url'] = cloud_url % {
'h': srchash(adtsrc), 's': adtsrc,
'r': self.options.series, 'a': arch}
if status == 'REGRESSION':
kwargs['retry_url'] = 'https://autopkgtest.ubuntu.com/request.cgi?' + \
urlencode([('release', self.options.series),
('arch', arch),
('package', adtsrc),
('trigger', '%s/%s' % (e.name, e.ver[1]))] +
[('ppa', p) for p in self.options.adt_ppas])
if adtver:
testname = '%s/%s' % (adtsrc, adtver)
else:
testname = adtsrc
e.addtest('autopkgtest', testname,
arch, status, log_url, **kwargs)
if not passed:
adtpass = False
if not adtpass and e.is_valid:
hints = self.hints.search('force-skiptest', package=e.name)
hints.extend(self.hints.search('force', package=e.name))
forces = [x for x in hints if e.ver[1] == x.version]
if forces:
e.force()
e.addreason('skiptest')
e.addhtml("Should wait for tests relating to %s %s, but forced by %s" %
(e.name, e.ver[1], forces[0].user))
else:
upgrade_me.remove(e.name)
unconsidered.append(e.name)
e.addhtml("Not considered")
e.addreason("autopkgtest")
e.is_valid = False
# invalidate impossible excuses
for e in excuses.values():
# parts[0] == package name

@ -79,3 +79,16 @@ HINTS_UBUNTU-TOUCH/OGRA = block unblock
SMOOTH_UPDATES = badgers
REMOVE_OBSOLETE = no
ADT_ENABLE = yes
ADT_DEBUG = no
ADT_ARCHES = amd64 i386 armhf ppc64el
ADT_AMQP = amqp://test_request:password@162.213.33.228
# Swift base URL with the results (must be publicly readable and browsable)
ADT_SWIFT_URL = https://objectstorage.prodstack4-5.canonical.com/v1/AUTH_77e2ada1e7a84929a74ba3b87153c0ac
# space separate list of PPAs to add for test requests and for polling results;
# the *last* one determines the swift container name
ADT_PPAS =
# set this to the path of a (r/o) results.cache for running many parallel
# britney instances for PPAs without updating the cache
ADT_SHARED_RESULTS_CACHE =

@ -17,6 +17,16 @@
from collections import defaultdict
import re
EXCUSES_LABELS = {
"PASS": '<span style="background:#87d96c">Pass</span>',
"FAIL": '<span style="background:#ff6666">Failed</span>',
"ALWAYSFAIL": '<span style="background:#e5c545">Always failed</span>',
"REGRESSION": '<span style="background:#ff6666">Regression</span>',
"IGNORE-FAIL": '<span style="background:#e5c545">Ignored failure</span>',
"RUNNING": '<span style="background:#99ddff">Test in progress</span>',
"RUNNING-ALWAYSFAIL": '<span style="background:#99ddff">Test in progress (always failed)</span>',
}
class Excuse(object):
"""Excuse class
@ -68,6 +78,9 @@ class Excuse(object):
self.missing_builds_ood_arch = set()
self.old_binaries = defaultdict(set)
self.policy_info = {}
# type (e. g. "autopkgtest") -> package (e. g. "foo/2-1") -> arch ->
# ['PASS'|'ALWAYSFAIL'|'REGRESSION'|'RUNNING'|'RUNNING-ALWAYSFAIL', log_url, history_url]
self.tests = {}
def sortkey(self):
if self.daysold == None:
@ -186,6 +199,24 @@ class Excuse(object):
else:
res = res + ("<li>%d days old (needed %d days)\n" %
(self.daysold, self.mindays))
for testtype in sorted(self.tests):
for pkg in sorted(self.tests[testtype]):
archmsg = []
for arch in sorted(self.tests[testtype][pkg]):
status, log_url, history_url, artifact_url, retry_url = self.tests[testtype][pkg][arch]
label = EXCUSES_LABELS[status]
if history_url:
message = '<a href="%s">%s</a>' % (history_url, arch)
else:
message = arch
message += ': <a href="%s">%s</a>' % (log_url, label)
if retry_url:
message += ' <a href="%s" style="text-decoration: none;">♻ </a> ' % retry_url
if artifact_url:
message += ' <a href="%s">[artifacts]</a>' % artifact_url
archmsg.append(message)
res = res + ("<li>%s for %s: %s</li>\n" % (testtype, pkg, ', '.join(archmsg)))
for x in self.htmlline:
res = res + "<li>" + x + "\n"
lastdep = ""
@ -216,6 +247,11 @@ class Excuse(object):
""""adding reason"""
self.reason[reason] = 1
def addtest(self, type_, package, arch, state, log_url, history_url=None,
artifact_url=None, retry_url=None):
"""Add test result"""
self.tests.setdefault(type_, {}).setdefault(package, {})[arch] = [state, log_url, history_url, artifact_url, retry_url]
# TODO: remove
def _text(self):
"""Render the excuse in text"""
@ -281,5 +317,6 @@ class Excuse(object):
else:
excusedata["reason"] = sorted(list(self.reason.keys()))
excusedata["is-candidate"] = self.is_valid
excusedata["tests"] = self.tests
return excusedata

@ -147,6 +147,10 @@ class HintParser(object):
'unblock': (1, split_into_one_hint_per_package),
'unblock-udeb': (1, split_into_one_hint_per_package),
# test related hints
'force-badtest': (1, split_into_one_hint_per_package),
'force-skiptest': (1, split_into_one_hint_per_package),
# Other
'remove': (1, split_into_one_hint_per_package),
'force': (1, split_into_one_hint_per_package),

@ -0,0 +1,188 @@
# (C) 2015 Canonical Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import shutil
import subprocess
import tempfile
import unittest
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
architectures = ['amd64', 'arm64', 'armhf', 'i386', 'powerpc', 'ppc64el']
class TestData:
def __init__(self):
'''Construct local test package indexes.
The archive is initially empty. You can create new packages with
create_deb(). self.path contains the path of the archive, and
self.apt_source provides an apt source "deb" line.
It is kept in a temporary directory which gets removed when the Archive
object gets deleted.
'''
self.path = tempfile.mkdtemp(prefix='testarchive.')
self.apt_source = 'deb file://%s /' % self.path
self.series = 'series'
self.dirs = {False: os.path.join(self.path, 'data', self.series),
True: os.path.join(
self.path, 'data', '%s-proposed' % self.series)}
os.makedirs(self.dirs[False])
os.mkdir(self.dirs[True])
self.added_sources = {False: set(), True: set()}
self.added_binaries = {False: set(), True: set()}
# pre-create all files for all architectures
for arch in architectures:
for dir in self.dirs.values():
with open(os.path.join(dir, 'Packages_' + arch), 'w'):
pass
for dir in self.dirs.values():
for fname in ['Dates', 'Blocks']:
with open(os.path.join(dir, fname), 'w'):
pass
for dname in ['Hints']:
os.mkdir(os.path.join(dir, dname))
os.mkdir(os.path.join(self.path, 'output'))
# create temporary home dir for proposed-migration autopktest status
self.home = os.path.join(self.path, 'home')
os.environ['HOME'] = self.home
os.makedirs(os.path.join(self.home, 'proposed-migration',
'autopkgtest', 'work'))
def __del__(self):
shutil.rmtree(self.path)
def add(self, name, unstable, fields={}, add_src=True, testsuite=None, srcfields=None):
'''Add a binary package to the index file.
You need to specify at least the package name and in which list to put
it (unstable==True for unstable/proposed, or False for
testing/release). fields specifies all additional entries, e. g.
{'Depends': 'foo, bar', 'Conflicts: baz'}. There are defaults for most
fields.
Unless add_src is set to False, this will also automatically create a
source record, based on fields['Source'] and name. In that case, the
"Testsuite:" field is set to the testsuite argument.
'''
assert (name not in self.added_binaries[unstable])
self.added_binaries[unstable].add(name)
fields.setdefault('Architecture', 'all')
fields.setdefault('Version', '1')
fields.setdefault('Priority', 'optional')
fields.setdefault('Section', 'devel')
fields.setdefault('Description', 'test pkg')
if fields['Architecture'] == 'all':
for a in architectures:
self._append(name, unstable, 'Packages_' + a, fields)
else:
self._append(name, unstable, 'Packages_' + fields['Architecture'],
fields)
if add_src:
src = fields.get('Source', name)
if src not in self.added_sources[unstable]:
if srcfields is None:
srcfields = {}
srcfields['Version'] = fields['Version']
srcfields['Section'] = fields['Section']
if testsuite:
srcfields['Testsuite'] = testsuite
self.add_src(src, unstable, srcfields)
def add_src(self, name, unstable, fields={}):
'''Add a source package to the index file.
You need to specify at least the package name and in which list to put
it (unstable==True for unstable/proposed, or False for
testing/release). fields specifies all additional entries, which can be
Version (default: 1), Section (default: devel), Testsuite (default:
none), and Extra-Source-Only.
'''
assert (name not in self.added_sources[unstable])
self.added_sources[unstable].add(name)
fields.setdefault('Version', '1')
fields.setdefault('Section', 'devel')
self._append(name, unstable, 'Sources', fields)
def _append(self, name, unstable, file_name, fields):
with open(os.path.join(self.dirs[unstable], file_name), 'a') as f:
f.write('''Package: %s
Maintainer: Joe <joe@example.com>
''' % name)
for k, v in fields.items():
f.write('%s: %s\n' % (k, v))
f.write('\n')
def remove_all(self, unstable):
'''Remove all added packages'''
self.added_binaries[unstable] = set()
self.added_sources[unstable] = set()
for a in architectures:
open(os.path.join(self.dirs[unstable], 'Packages_' + a), 'w').close()
open(os.path.join(self.dirs[unstable], 'Sources'), 'w').close()
class TestBase(unittest.TestCase):
def setUp(self):
super(TestBase, self).setUp()
self.maxDiff = None
self.data = TestData()
self.britney = os.path.join(PROJECT_DIR, 'britney.py')
# create temporary config so that tests can hack it
self.britney_conf = os.path.join(self.data.path, 'britney.conf')
shutil.copy(os.path.join(PROJECT_DIR, 'britney.conf'), self.britney_conf)
assert os.path.exists(self.britney)
def tearDown(self):
del self.data
def run_britney(self, args=[]):
'''Run britney.
Assert that it succeeds and does not produce anything on stderr.
Return (excuses.yaml, excuses.html, britney_out).
'''
britney = subprocess.Popen([self.britney, '-v', '-c', self.britney_conf,
'--distribution=ubuntu',
'--series=%s' % self.data.series],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.data.path,
universal_newlines=True)
(out, err) = britney.communicate()
self.assertEqual(britney.returncode, 0, out + err)
self.assertEqual(err, '')
with open(os.path.join(self.data.path, 'output', self.data.series,
'excuses.yaml')) as f:
yaml = f.read()
with open(os.path.join(self.data.path, 'output', self.data.series,
'excuses.html')) as f:
html = f.read()
return (yaml, html, out)
def create_hint(self, username, content):
'''Create a hint file for the given username and content'''
hints_path = os.path.join(
self.data.path, 'data', self.data.series + '-proposed', 'Hints', username)
with open(hints_path, 'a') as fd:
fd.write(content)
fd.write('\n')

@ -0,0 +1,170 @@
# Mock a Swift server with autopkgtest results
# Author: Martin Pitt <martin.pitt@ubuntu.com>
import os
import tarfile
import io
import sys
import socket
import time
import tempfile
import json
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
except ImportError:
# Python 2
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urlparse import urlparse, parse_qs
class SwiftHTTPRequestHandler(BaseHTTPRequestHandler):
'''Mock swift container with autopkgtest results
This accepts retrieving a particular result.tar (e. g.
/container/path/result.tar) or listing the container contents
(/container/?prefix=foo&delimiter=@&marker=foo/bar).
'''
# map container -> result.tar path -> (exitcode, testpkg-version[, testinfo])
results = {}
def do_GET(self):
p = urlparse(self.path)
path_comp = p.path.split('/')
container = path_comp[1]
path = '/'.join(path_comp[2:])
if path:
self.serve_file(container, path)
else:
self.list_container(container, parse_qs(p.query))
def serve_file(self, container, path):
if os.path.basename(path) != 'result.tar':
self.send_error(404, 'File not found (only result.tar supported)')
return
try:
fields = self.results[container][os.path.dirname(path)]
try:
(exitcode, pkgver, testinfo) = fields
except ValueError:
(exitcode, pkgver) = fields
testinfo = None
except KeyError:
self.send_error(404, 'File not found')
return
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.end_headers()
tar = io.BytesIO()
with tarfile.open('result.tar', 'w', tar) as results:
# add exitcode
contents = ('%i' % exitcode).encode()
ti = tarfile.TarInfo('exitcode')
ti.size = len(contents)
results.addfile(ti, io.BytesIO(contents))
# add testpkg-version
if pkgver is not None:
contents = pkgver.encode()
ti = tarfile.TarInfo('testpkg-version')
ti.size = len(contents)
results.addfile(ti, io.BytesIO(contents))
# add testinfo.json
if testinfo:
contents = json.dumps(testinfo).encode()
ti = tarfile.TarInfo('testinfo.json')
ti.size = len(contents)
results.addfile(ti, io.BytesIO(contents))
self.wfile.write(tar.getvalue())
def list_container(self, container, query):
try:
objs = set(['%s/result.tar' % r for r in self.results[container]])
except KeyError:
self.send_error(401, 'Container does not exist')
return
if 'prefix' in query:
p = query['prefix'][-1]
objs = set([o for o in objs if o.startswith(p)])
if 'delimiter' in query:
d = query['delimiter'][-1]
# if find() returns a value, we want to include the delimiter, thus
# bump its result; for "not found" return None
find_adapter = lambda i: (i >= 0) and (i + 1) or None
objs = set([o[:find_adapter(o.find(d))] for o in objs])
if 'marker' in query:
m = query['marker'][-1]
objs = set([o for o in objs if o > m])
self.send_response(objs and 200 or 204) # 204: "No Content"
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(('\n'.join(sorted(objs)) + '\n').encode('UTF-8'))
class AutoPkgTestSwiftServer:
def __init__(self, port=8080):
self.port = port
self.server_pid = None
self.log = None
def __del__(self):
if self.server_pid:
self.stop()
@classmethod
def set_results(klass, results):
'''Set served results.
results is a map: container -> result.tar path ->
(exitcode, testpkg-version, testinfo)
'''
SwiftHTTPRequestHandler.results = results
def start(self):
assert self.server_pid is None, 'already started'
if self.log:
self.log.close()
self.log = tempfile.TemporaryFile()
p = os.fork()
if p:
# parent: wait until server starts
self.server_pid = p
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
if s.connect_ex(('127.0.0.1', self.port)) == 0:
break
time.sleep(0.1)
s.close()
return
# child; quiesce logging on stderr
os.dup2(self.log.fileno(), sys.stderr.fileno())
srv = HTTPServer(('', self.port), SwiftHTTPRequestHandler)
srv.serve_forever()
sys.exit(0)
def stop(self):
assert self.server_pid, 'not running'
os.kill(self.server_pid, 15)
os.waitpid(self.server_pid, 0)
self.server_pid = None
self.log.close()
if __name__ == '__main__':
srv = AutoPkgTestSwiftServer()
srv.set_results({'autopkgtest-series': {
'series/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1'),
'series/i386/g/green/20150101_100000@': (0, 'green 1', {'custom_environment': ['ADT_TEST_TRIGGERS=green']}),
'series/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1'),
'series/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 2'),
'series/i386/l/lightgreen/20150101_100102@': (0, 'lightgreen 3'),
}})
srv.start()
print('Running on http://localhost:8080/autopkgtest-series')
print('Press Enter to quit.')
sys.stdin.readline()
srv.stop()

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save