Merge remote-tracking branch 'github/autopkgtest'

ubuntu/rebased
Niels Thykier 7 years ago
commit 92817767b6

@ -3,7 +3,7 @@ sudo: required
dist: trusty
before_install:
- git clone https://salsa.debian.org/debian/britney2-tests.git britney2-tests
- git clone -b autopkgtest https://salsa.debian.org/debian/britney2-tests.git britney2-tests
- git clone https://salsa.debian.org/debian/britney-tests-live-data.git britney2-tests/live-data
- rm -f .coverage
@ -24,7 +24,11 @@ install:
script:
# https://docs.codecov.io/docs/testing-with-docker
- ci_env=$(bash <(curl -s https://codecov.io/env)) ; docker run $ci_env britney /bin/sh -c "export CI=true ; ci/run-everything-and-upload-to-codecov.io.sh"
- mkdir shared
- docker run -v "$PWD/shared:/shared" britney /bin/sh -c "export CI=true ; ci/run-everything-and-upload-to-codecov.io.sh"
after-success:
- bash <(curl -s https://codecov.io/bash)
#notifications:
# email: false

@ -8,3 +8,4 @@ Requirements:
* Python APT/DPKG bindings aptitude install python3-apt
* Python YAML library aptitude install python3-yaml
* Python nose tests (testing) aptitude install python3-nose
* Python AMQP library aptitude install python3-amqplib

@ -51,6 +51,8 @@ MINDAYS_HIGH = 2
MINDAYS_CRITICAL = 0
MINDAYS_EMERGENCY = 0
DEFAULT_URGENCY = medium
NO_PENALTIES = high critical emergency
BOUNTY_MIN_AGE = 2
HINTSDIR = /srv/release.debian.org/britney/hints
@ -79,3 +81,24 @@ HINTS_AUTO-REMOVALS = remove
SMOOTH_UPDATES = libs oldlibs
IGNORE_CRUFT = 1
ADT_ENABLE = no
ADT_ARCHES = amd64
ADT_AMQP = file:///srv/release.debian.org/britney/var/data-b2/output/debci.input
# space separate list of PPAs to add for test requests and for polling results;
# the *last* one determines the swift container name
ADT_PPAS =
# set this to the path of a (r/o) autopkgtest-results.cache for running many parallel
# britney instances for PPAs without updating the cache
ADT_SHARED_RESULTS_CACHE =
# Swift base URL with the results (must be publicly readable and browsable)
# or file location if results are pre-fetched
ADT_SWIFT_URL = file:///srv/release.debian.org/britney/state/debci.json
# Base URL for autopkgtest site, used for links in the excuses
ADT_CI_URL = https://ci.debian.net/
# Autopkgtest results can be used to influence the aging
ADT_REGRESSION_PENALTY = 10
ADT_SUCCESS_BOUNTY = 3
ADT_BASELINE = reference
ADT_RETRY_URL_MECH = run_id

@ -57,6 +57,14 @@ MINDAYS_EMERGENCY = 0
# The urgency to assume if none is provided or it is not defined with
# a MINDAYS_$NAME config above
DEFAULT_URGENCY = medium
# Don't apply penalties (e.g. from autopktest in bounty/penalty mode) for the
# following urgencies
NO_PENALTIES = high critical emergency
# Lower limit of the age, so accumulated bounties don't let package migrate
# too quick (urgency still has president of course)
# Can be given an urgency name
#BOUNTY_MIN_AGE = high
BOUNTY_MIN_AGE = 2
# Directory where hints files are stored
HINTSDIR = /path/to/britney/hints-dir
@ -99,3 +107,31 @@ SMOOTH_UPDATES = libs oldlibs
# Whether old binaries in the source distribution should be
# considered as a blocker for migration.
IGNORE_CRUFT = 1
# Enable the autopkgtest policy
ADT_ENABLE = no
# Define on which architectures tests should be executed and taken into account
ADT_ARCHES = amd64
# AMQP url or request file for the testing framework
#ADT_AMQP = amqp://test_request:password@127.0.0.1
ADT_AMQP = file:///path/to/britney/debci.input
# space separate list of PPAs to add for test requests and for polling results;
# the *last* one determines the swift container name
ADT_PPAS =
# set this to the path of a (r/o) autopkgtest-results.cache for running many parallel
# britney instances for PPAs without updating the cache
ADT_SHARED_RESULTS_CACHE =
# Swift base URL with the results (must be publicly readable and browsable)
# or file location if results are pre-fetched
#ADT_SWIFT_URL = https://example.com/some/url
ADT_SWIFT_URL = file:///path/to/britney/state/debci.json
# Base URL for autopkgtest site, used for links in the excuses
ADT_CI_URL = https://example.com/
# Enable the huge queue for packages that trigger vast amounts of tests to not
# starve the regular queue
#ADT_HUGE = 20
# Autopkgtest results can be used to influence the aging, leave
# ADT_REGRESSION_PENALTY empty to have regressions block migration
ADT_REGRESSION_PENALTY = 10
ADT_SUCCESS_BOUNTY = 3

@ -200,6 +200,7 @@ from britney2.installability.builder import build_installability_tester
from britney2.migrationitem import MigrationItem
from britney2.policies import PolicyVerdict
from britney2.policies.policy import AgePolicy, RCBugPolicy, PiupartsPolicy, BuildDependsPolicy
from britney2.policies.autopkgtest import AutopkgtestPolicy
from britney2.utils import (old_libraries_format, undo_changes,
compute_reverse_tree, possibly_compressed,
read_nuninst, write_nuninst, write_heidi,
@ -325,6 +326,14 @@ class Britney(object):
self.binaries['tpu'] = {}
self.binaries['pu'] = {}
# compute inverse Testsuite-Triggers: map, unifying all series
self.logger.info('Building inverse testsuite_triggers map')
self.testsuite_triggers = {}
for suitemap in self.sources.values():
for src, data in suitemap.items():
for trigger in data.testsuite_triggers:
self.testsuite_triggers.setdefault(trigger, set()).add(src)
self.binaries['unstable'] = self.read_binaries(self.suite_info['unstable'].path, "unstable", self.options.architectures)
for suite in ('tpu', 'pu'):
if suite in self.suite_info:
@ -443,6 +452,8 @@ class Britney(object):
help="Compute which packages can migrate (the default)")
parser.add_option("", "--no-compute-migrations", action="store_false", dest="compute_migrations",
help="Do not compute which packages can migrate.")
parser.add_option("", "--series", action="store", dest="series", default='testing',
help="set distribution series name")
(self.options, self.args) = parser.parse_args()
if self.options.verbose:
@ -548,9 +559,14 @@ class Britney(object):
self.options.ignore_cruft == "0":
self.options.ignore_cruft = False
self.policies.append(AgePolicy(self.options, self.suite_info, MINDAYS))
if not hasattr(self.options, 'adt_retry_url_mech'):
self.options.adt_retry_url_mech = ''
self.policies.append(RCBugPolicy(self.options, self.suite_info))
self.policies.append(PiupartsPolicy(self.options, self.suite_info))
if getattr(self.options, 'adt_enable') == 'yes':
self.policies.append(AutopkgtestPolicy(self.options, self.suite_info))
self.policies.append(AgePolicy(self.options, self.suite_info, MINDAYS))
self.policies.append(BuildDependsPolicy(self.options, self.suite_info))
for policy in self.policies:
@ -597,7 +613,9 @@ class Britney(object):
[],
None,
True,
None
None,
[],
[],
)
self.sources['testing'][pkg_name] = src_data
@ -673,6 +691,8 @@ class Britney(object):
None,
True,
None,
[],
[],
)
self.sources['testing'][pkg_name] = src_data
self.sources['unstable'][pkg_name] = src_data
@ -874,7 +894,7 @@ class Britney(object):
srcdist[source].binaries.append(pkg_id)
# if the source package doesn't exist, create a fake one
else:
srcdist[source] = SourcePackage(source_version, 'faux', [pkg_id], None, True, None)
srcdist[source] = SourcePackage(source_version, 'faux', [pkg_id], None, True, None, [], [])
# add the resulting dictionary to the package list
packages[pkg] = dpkg
@ -1073,6 +1093,7 @@ class Britney(object):
if not packages:
excuse.addhtml("%s/%s unsatisfiable Depends: %s" % (pkg, arch, block_txt.strip()))
excuse.addreason("depends")
excuse.add_unsatisfiable_on_arch(arch)
if arch not in self.options.break_arches:
is_all_ok = False
continue

@ -9,15 +9,17 @@ SuiteInfo = namedtuple('SuiteInfo', [
class SourcePackage(object):
__slots__ = ['version', 'section', 'binaries', 'maintainer', 'is_fakesrc', 'build_deps_arch']
__slots__ = ['version', 'section', 'binaries', 'maintainer', 'is_fakesrc', 'build_deps_arch', 'testsuite', 'testsuite_triggers']
def __init__(self, version, section, binaries, maintainer, is_fakesrc, build_deps_arch):
def __init__(self, version, section, binaries, maintainer, is_fakesrc, build_deps_arch, testsuite, testsuite_triggers):
self.version = version
self.section = section
self.binaries = binaries
self.maintainer = maintainer
self.is_fakesrc = is_fakesrc
self.build_deps_arch = build_deps_arch
self.testsuite = testsuite
self.testsuite_triggers = testsuite_triggers
def __getitem__(self, item):
return getattr(self, self.__slots__[item])

@ -80,6 +80,7 @@ class Excuse(object):
self.arch_build_deps = {}
self.sane_deps = []
self.break_deps = []
self.unsatisfiable_on_archs = []
self.newbugs = set()
self.oldbugs = set()
self.reason = {}
@ -89,6 +90,9 @@ class Excuse(object):
self.old_binaries = defaultdict(set)
self.policy_info = {}
self.bounty = {}
self.penalty = {}
def sortkey(self):
if self.daysold == None:
return (-1, self.name)
@ -137,6 +141,11 @@ class Excuse(object):
if (name, arch) not in self.break_deps:
self.break_deps.append( (name, arch) )
def add_unsatisfiable_on_arch(self, arch):
"""Add an arch that has unsatisfiable dependencies"""
if arch not in self.unsatisfiable_on_archs:
self.unsatisfiable_on_archs.append(arch)
def add_arch_build_dep(self, name, arch):
if name not in self.arch_build_deps:
self.arch_build_deps[name] = []
@ -316,3 +325,10 @@ class Excuse(object):
excusedata["is-candidate"] = self.is_valid
return excusedata
def add_bounty(self, policy, bounty):
""""adding bounty"""
self.bounty[policy] = bounty
def add_penalty(self, policy, penalty):
""""adding penalty"""
self.penalty[policy] = penalty

@ -0,0 +1,840 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2013 - 2016 Canonical Ltd.
# Authors:
# Colin Watson <cjwatson@ubuntu.com>
# Jean-Baptiste Lallement <jean-baptiste.lallement@canonical.com>
# Martin Pitt <martin.pitt@ubuntu.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import collections
import os
import json
import tarfile
import io
import re
import sys
import urllib.parse
from urllib.request import urlopen
import apt_pkg
import britney2.hints
from britney2.policies.policy import BasePolicy, PolicyVerdict
EXCUSES_LABELS = {
"PASS": '<span style="background:#87d96c">Pass</span>',
"FAIL": '<span style="background:#ff6666">Failed</span>',
"ALWAYSFAIL": '<span style="background:#e5c545">Always failed</span>',
"REGRESSION": '<span style="background:#ff6666">Regression</span>',
"IGNORE-FAIL": '<span style="background:#e5c545">Ignored failure</span>',
"RUNNING": '<span style="background:#99ddff">Test in progress</span>',
"RUNNING-ALWAYSFAIL": '<span style="background:#99ddff">Test in progress (always failed)</span>',
}
REF_TRIG = 'migration-reference/0'
def srchash(src):
'''archive hash prefix for source package'''
if src.startswith('lib'):
return src[:4]
else:
return src[0]
class AutopkgtestPolicy(BasePolicy):
"""autopkgtest regression policy for source migrations
Run autopkgtests for the excuse and all of its reverse dependencies, and
reject the upload if any of those regress.
"""
def __init__(self, options, suite_info):
super().__init__('autopkgtest', options, suite_info, {'unstable'})
# tests requested in this and previous runs
# trigger -> src -> [arch]
self.pending_tests = None
self.pending_tests_file = os.path.join(self.options.state_dir, 'autopkgtest-pending.json')
# results map: trigger -> src -> arch -> [passed, version, run_id]
# - trigger is "source/version" of an unstable package that triggered
# this test run.
# - "passed" is a bool
# - "version" is the package version of "src" of that test
# - "run_id" is an opaque ID that identifies a particular test run for
# a given src/arch. It's usually a time stamp like "20150120_125959".
# This is also used for tracking the latest seen time stamp for
# requesting only newer results.
self.test_results = {}
if self.options.adt_shared_results_cache:
self.results_cache_file = self.options.adt_shared_results_cache
else:
self.results_cache_file = os.path.join(self.options.state_dir, 'autopkgtest-results.cache')
try:
self.options.adt_ppas = self.options.adt_ppas.strip().split()
except AttributeError:
self.options.adt_ppas = []
self.swift_container = 'autopkgtest-' + options.series
if self.options.adt_ppas:
self.swift_container += '-' + options.adt_ppas[-1].replace('/', '-')
# restrict adt_arches to architectures we actually run for
self.adt_arches = []
for arch in self.options.adt_arches.split():
if arch in self.options.architectures:
self.adt_arches.append(arch)
else:
self.logger.info("Ignoring ADT_ARCHES %s as it is not in architectures list", arch)
def register_hints(self, hint_parser):
hint_parser.register_hint_type('force-badtest', britney2.hints.split_into_one_hint_per_package)
hint_parser.register_hint_type('force-skiptest', britney2.hints.split_into_one_hint_per_package)
def initialise(self, britney):
super().initialise(britney)
os.makedirs(self.options.state_dir, exist_ok=True)
self.read_pending_tests()
# read the cached results that we collected so far
if os.path.exists(self.results_cache_file):
with open(self.results_cache_file) as f:
self.test_results = json.load(f)
self.logger.info('Read previous results from %s', self.results_cache_file)
else:
self.logger.info('%s does not exist, re-downloading all results from swift', self.results_cache_file)
# read in the new results
if self.options.adt_swift_url.startswith('file://'):
debci_file = self.options.adt_swift_url[7:]
if os.path.exists(debci_file):
with open(debci_file) as f:
test_results = json.load(f)
self.logger.info('Read new results from %s', debci_file)
# With debci, pending tests are determined from the debci file
self.pending_tests = {}
for res in test_results['results']:
# Blacklisted tests don't get a version
if res['version'] is None:
res['version'] = 'blacklisted'
(trigger, src, arch, ver, status, stamp) = ([res['trigger'], res['package'], res['arch'], res['version'], res['status'], str(res['run_id'])])
if trigger is None:
# not requested for this policy, so ignore
continue
if status is None:
# still running => pending
arch_list = self.pending_tests.setdefault(trigger, {}).setdefault(src, [])
if arch not in arch_list:
self.logger.info('Pending autopkgtest %s on %s to verify %s',src, arch, trigger)
arch_list.append(arch)
arch_list.sort()
elif status == 'tmpfail':
# let's see if we still need it
continue
else:
self.logger.info('Results %s %s %s added', src, trigger, status)
self.add_trigger_to_results(trigger, src, ver, arch, stamp, status == 'pass')
else:
self.logger.info('%s does not exist, no new data will be processed', debci_file)
# we need sources, binaries, and installability tester, so for now
# remember the whole britney object
self.britney = britney
# Initialize AMQP connection
self.amqp_channel = None
self.amqp_file = None
if self.options.dry_run:
return
amqp_url = self.options.adt_amqp
if amqp_url.startswith('amqp://'):
import amqplib.client_0_8 as amqp
# depending on the setup we connect to a AMQP server
creds = urllib.parse.urlsplit(amqp_url, allow_fragments=False)
self.amqp_con = amqp.Connection(creds.hostname, userid=creds.username,
password=creds.password)
self.amqp_channel = self.amqp_con.channel()
self.logger.info('Connected to AMQP server')
elif amqp_url.startswith('file://'):
# or in Debian and in testing mode, adt_amqp will be a file:// URL
self.amqp_file = amqp_url[7:]
else:
raise RuntimeError('Unknown ADT_AMQP schema %s' % amqp_url.split(':', 1)[0])
def save_state(self, britney):
super().save_state(britney)
# update the results on-disk cache, unless we are using a r/o shared one
if not self.options.adt_shared_results_cache:
self.logger.info('Updating results cache')
with open(self.results_cache_file + '.new', 'w') as f:
json.dump(self.test_results, f, indent=2)
os.rename(self.results_cache_file + '.new', self.results_cache_file)
# update the pending tests on-disk cache
self.logger.info('Updating pending requested tests in %s', self.pending_tests_file)
with open(self.pending_tests_file + '.new', 'w') as f:
json.dump(self.pending_tests, f, indent=2)
os.rename(self.pending_tests_file + '.new', self.pending_tests_file)
def apply_policy_impl(self, tests_info, suite, source_name, source_data_tdist, source_data_srcdist, excuse):
# initialize
verdict = PolicyVerdict.PASS
elegible_for_bounty = False
# skip/delay autopkgtests until new package is built somewhere
binaries_info = self.britney.sources[suite][source_name]
if not binaries_info.binaries:
self.logger.info('%s hasn''t been built anywhere, skipping autopkgtest policy', excuse.name)
verdict = PolicyVerdict.REJECTED_TEMPORARILY
if 'all' in excuse.missing_builds:
self.logger.info('%s hasn''t been built for arch:all, skipping autopkgtest policy', source_name)
verdict = PolicyVerdict.REJECTED_TEMPORARILY
if verdict == PolicyVerdict.PASS:
self.logger.info('Checking autopkgtests for %s', source_name)
trigger = source_name + '/' + source_data_srcdist.version
# build a (testsrc, testver) → arch → (status, log_url) map; we trigger/check test
# results per architecture for technical/efficiency reasons, but we
# want to evaluate and present the results by tested source package
# first
pkg_arch_result = collections.defaultdict(dict)
for arch in self.adt_arches:
if arch in excuse.missing_builds:
verdict = PolicyVerdict.REJECTED_TEMPORARILY
self.logger.info('%s hasn''t been built on arch %s, delay autopkgtest there', source_name, arch)
elif arch in excuse.unsatisfiable_on_archs:
verdict = PolicyVerdict.REJECTED_TEMPORARILY
self.logger.info('%s is uninstallable on arch %s, delay autopkgtest there', source_name, arch)
else:
# request tests (unless they were already requested earlier or have a result)
tests = self.tests_for_source(source_name, source_data_srcdist.version, arch)
is_huge = False
try:
is_huge = len(tests) > int(self.options.adt_huge)
except AttributeError:
pass
for (testsrc, testver) in tests:
self.pkg_test_request(testsrc, arch, trigger, huge=is_huge)
(result, real_ver, run_id, url) = self.pkg_test_result(testsrc, testver, arch, trigger)
pkg_arch_result[(testsrc, real_ver)][arch] = (result, run_id, url)
# add test result details to Excuse
cloud_url = self.options.adt_ci_url + "packages/%(h)s/%(s)s/%(r)s/%(a)s"
for (testsrc, testver) in sorted(pkg_arch_result):
arch_results = pkg_arch_result[(testsrc, testver)]
r = {v[0] for v in arch_results.values()}
if 'REGRESSION' in r:
verdict = PolicyVerdict.REJECTED_PERMANENTLY
elif 'RUNNING' in r and verdict == PolicyVerdict.PASS:
verdict = PolicyVerdict.REJECTED_TEMPORARILY
# skip version if still running on all arches
if not r - {'RUNNING', 'RUNNING-ALWAYSFAIL'}:
testver = None
# A source package is elegible for the bounty if it has tests
# of its own that pass on all tested architectures.
if testsrc == source_name and r == {'PASS'}:
elegible_for_bounty = True
if testver:
testname = '%s/%s' % (testsrc, testver)
else:
testname = testsrc
html_archmsg = []
for arch in sorted(arch_results):
(status, run_id, log_url) = arch_results[arch]
artifact_url = None
retry_url = None
history_url = None
if self.options.adt_ppas:
if log_url.endswith('log.gz'):
artifact_url = log_url.replace('log.gz', 'artifacts.tar.gz')
else:
history_url = cloud_url % {
'h': srchash(testsrc), 's': testsrc,
'r': self.options.series, 'a': arch}
if status == 'REGRESSION':
if self.options.adt_retry_url_mech == 'run_id':
retry_url = self.options.adt_ci_url + 'api/v1/retry/' + run_id
else:
retry_url = self.options.adt_ci_url + 'request.cgi?' + \
urllib.parse.urlencode([('release', self.options.series),
('arch', arch),
('package', testsrc),
('trigger', trigger)] +
[('ppa', p) for p in self.options.adt_ppas])
tests_info.setdefault(testname, {})[arch] = \
[status, log_url, history_url, artifact_url, retry_url]
# render HTML snippet for testsrc entry for current arch
if history_url:
message = '<a href="%s">%s</a>' % (history_url, arch)
else:
message = arch
message += ': <a href="%s">%s</a>' % (log_url, EXCUSES_LABELS[status])
if retry_url:
message += ' <a href="%s" style="text-decoration: none;">♻ </a> ' % retry_url
if artifact_url:
message += ' <a href="%s">[artifacts]</a>' % artifact_url
html_archmsg.append(message)
# render HTML line for testsrc entry
excuse.addhtml("autopkgtest for %s: %s" % (testname, ', '.join(html_archmsg)))
if verdict != PolicyVerdict.PASS:
# check for force-skiptest hint
hints = self.britney.hints.search('force-skiptest', package=source_name, version=source_data_srcdist.version)
if hints:
excuse.addreason('skiptest')
excuse.addhtml("Should wait for tests relating to %s %s, but forced by %s" %
(source_name, source_data_srcdist.version, hints[0].user))
verdict = PolicyVerdict.PASS_HINTED
else:
excuse.addreason('autopkgtest')
if self.options.adt_success_bounty and verdict == PolicyVerdict.PASS and elegible_for_bounty:
excuse.add_bounty('autopkgtest', int(self.options.adt_success_bounty))
if self.options.adt_regression_penalty and \
verdict in {PolicyVerdict.REJECTED_PERMANENTLY, PolicyVerdict.REJECTED_TEMPORARILY}:
excuse.add_penalty('autopkgtest', int(self.options.adt_regression_penalty))
# In case we give penalties instead of blocking, we must always pass
verdict = PolicyVerdict.PASS
return verdict
#
# helper functions
#
@classmethod
def has_autodep8(kls, srcinfo, binaries):
'''Check if package is covered by autodep8
srcinfo is an item from self.britney.sources
binaries is self.britney.binaries['unstable'][arch][0]
'''
# autodep8?
for t in srcinfo.testsuite:
if t.startswith('autopkgtest-pkg'):
return True
# DKMS: some binary depends on "dkms"
for pkg_id in srcinfo.binaries:
try:
bininfo = binaries[pkg_id.package_name]
except KeyError:
continue
if 'dkms' in (bininfo.depends or ''):
return True
return False
def tests_for_source(self, src, ver, arch):
'''Iterate over all tests that should be run for given source and arch'''
sources_info = self.britney.sources['testing']
binaries_info = self.britney.binaries['testing'][arch][0]
reported_pkgs = set()
tests = []
# gcc-N triggers tons of tests via libgcc1, but this is mostly in vain:
# gcc already tests itself during build, and it is being used from
# -proposed, so holding it back on a dozen unrelated test failures
# serves no purpose. Just check some key packages which actually use
# gcc during the test, and libreoffice as an example for a libgcc user.
if src.startswith('gcc-'):
if re.match('gcc-\d$', src):
for test in ['binutils', 'fglrx-installer', 'libreoffice', 'linux']:
try:
tests.append((test, sources_info[test].version))
except KeyError:
# no package in that series? *shrug*, then not (mostly for testing)
pass
return tests
else:
# for other compilers such as gcc-snapshot etc. we don't need
# to trigger anything
return []
# Debian doesn't have linux-meta, but Ubuntu does
# for linux themselves we don't want to trigger tests -- these should
# all come from linux-meta*. A new kernel ABI without a corresponding
# -meta won't be installed and thus we can't sensibly run tests against
# it.
if src.startswith('linux') and src.replace('linux', 'linux-meta') in sources_info:
return []
# we want to test the package itself, if it still has a test in unstable
srcinfo = self.britney.sources['unstable'][src]
if 'autopkgtest' in srcinfo.testsuite or self.has_autodep8(srcinfo, binaries_info):
reported_pkgs.add(src)
tests.append((src, ver))
extra_bins = []
# Debian doesn't have linux-meta, but Ubuntu does
# Hack: For new kernels trigger all DKMS packages by pretending that
# linux-meta* builds a "dkms" binary as well. With that we ensure that we
# don't regress DKMS drivers with new kernel versions.
if src.startswith('linux-meta'):
# does this have any image on this arch?
for pkg_id in srcinfo.binaries:
if pkg_id.architecture == arch and '-image' in pkg_id.package_name:
try:
extra_bins.append(binaries_info['dkms'].pkg_id)
except KeyError:
pass
# plus all direct reverse dependencies and test triggers of its
# binaries which have an autopkgtest
for binary in srcinfo.binaries + extra_bins:
rdeps = self.britney._inst_tester.reverse_dependencies_of(binary)
for rdep in rdeps:
try:
rdep_src = binaries_info[rdep.package_name].source
# Don't re-trigger the package itself here; this should
# have been done above if the package still continues to
# have an autopkgtest in unstable.
if rdep_src == src:
continue
except KeyError:
continue
rdep_src_info = sources_info[rdep_src]
if 'autopkgtest' in rdep_src_info.testsuite or self.has_autodep8(rdep_src_info, binaries_info):
if rdep_src not in reported_pkgs:
tests.append((rdep_src, rdep_src_info.version))
reported_pkgs.add(rdep_src)
for tdep_src in self.britney.testsuite_triggers.get(binary.package_name, set()):
if tdep_src not in reported_pkgs:
try:
tdep_src_info = sources_info[tdep_src]
except KeyError:
continue
if 'autopkgtest' in tdep_src_info.testsuite or self.has_autodep8(tdep_src_info, binaries_info):
for pkg_id in tdep_src_info.binaries:
if pkg_id.architecture == arch:
tests.append((tdep_src, tdep_src_info.version))
reported_pkgs.add(tdep_src)
break
tests.sort(key=lambda s_v: s_v[0])
return tests
def read_pending_tests(self):
'''Read pending test requests from previous britney runs
Initialize self.pending_tests with that data.
'''
assert self.pending_tests is None, 'already initialized'
if not os.path.exists(self.pending_tests_file):
self.logger.info('No %s, starting with no pending tests', self.pending_tests_file)
self.pending_tests = {}
return
with open(self.pending_tests_file) as f:
self.pending_tests = json.load(f)
self.logger.info('Read pending requested tests from %s: %s', self.pending_tests_file, self.pending_tests)
def latest_run_for_package(self, src, arch):
'''Return latest run ID for src on arch'''
# this requires iterating over all triggers and thus is expensive;
# cache the results
try:
return self.latest_run_for_package._cache[src][arch]
except KeyError:
pass
latest_run_id = ''
for srcmap in self.test_results.values():
try:
run_id = srcmap[src][arch][2]
except KeyError:
continue
if run_id > latest_run_id:
latest_run_id = run_id
self.latest_run_for_package._cache[arch] = latest_run_id
return latest_run_id
latest_run_for_package._cache = collections.defaultdict(dict)
def fetch_swift_results(self, swift_url, src, arch):
'''Download new results for source package/arch from swift'''
# Download results for one particular src/arch at most once in every
# run, as this is expensive
done_entry = src + '/' + arch
if done_entry in self.fetch_swift_results._done:
return
self.fetch_swift_results._done.add(done_entry)
# prepare query: get all runs with a timestamp later than the latest
# run_id for this package/arch; '@' is at the end of each run id, to
# mark the end of a test run directory path
# example: <autopkgtest-wily>wily/amd64/libp/libpng/20150630_054517@/result.tar
query = {'delimiter': '@',
'prefix': '%s/%s/%s/%s/' % (self.options.series, arch, srchash(src), src)}
# determine latest run_id from results
if not self.options.adt_shared_results_cache:
latest_run_id = self.latest_run_for_package(src, arch)
if latest_run_id:
query['marker'] = query['prefix'] + latest_run_id
# request new results from swift
url = os.path.join(swift_url, self.swift_container)
url += '?' + urllib.parse.urlencode(query)
f = None
try:
f = urlopen(url, timeout=30)
if f.getcode() == 200:
result_paths = f.read().decode().strip().splitlines()
elif f.getcode() == 204: # No content
result_paths = []
else:
# we should not ever end up here as we expect a HTTPError in
# other cases; e. g. 3XX is something that tells us to adjust
# our URLS, so fail hard on those
raise NotImplementedError('fetch_swift_results(%s): cannot handle HTTP code %i' %
(url, f.getcode()))
except IOError as e:
# 401 "Unauthorized" is swift's way of saying "container does not exist"
if hasattr(e, 'code') and e.code == 401:
self.logger.info('fetch_swift_results: %s does not exist yet or is inaccessible', url)
return
# Other status codes are usually a transient
# network/infrastructure failure. Ignoring this can lead to
# re-requesting tests which we already have results for, so
# fail hard on this and let the next run retry.
self.logger.error('Failure to fetch swift results from %s: %s', url, str(e))
sys.exit(1)
finally:
if f is not None:
f.close()
for p in result_paths:
self.fetch_one_result(
os.path.join(swift_url, self.swift_container, p, 'result.tar'), src, arch)
fetch_swift_results._done = set()
def fetch_one_result(self, url, src, arch):
'''Download one result URL for source/arch
Remove matching pending_tests entries.
'''
f = None
try:
f = urlopen(url, timeout=30)
if f.getcode() == 200:
tar_bytes = io.BytesIO(f.read())
else:
raise NotImplementedError('fetch_one_result(%s): cannot handle HTTP code %i' %
(url, f.getcode()))
except IOError as e:
self.logger.error('Failure to fetch %s: %s', url, str(e))
# we tolerate "not found" (something went wrong on uploading the
# result), but other things indicate infrastructure problems
if hasattr(e, 'code') and e.code == 404:
return
sys.exit(1)
finally:
if f is not None:
f.close()
try:
with tarfile.open(None, 'r', tar_bytes) as tar:
exitcode = int(tar.extractfile('exitcode').read().strip())
srcver = tar.extractfile('testpkg-version').read().decode().strip()
(ressrc, ver) = srcver.split()
testinfo = json.loads(tar.extractfile('testinfo.json').read().decode())
except (KeyError, ValueError, tarfile.TarError) as e:
self.logger.error('%s is damaged, ignoring: %s', url, str(e))
# ignore this; this will leave an orphaned request in autopkgtest-pending.json
# and thus require manual retries after fixing the tmpfail, but we
# can't just blindly attribute it to some pending test.
return
if src != ressrc:
self.logger.error('%s is a result for package %s, but expected package %s', url, ressrc, src)
return
# parse recorded triggers in test result
for e in testinfo.get('custom_environment', []):
if e.startswith('ADT_TEST_TRIGGERS='):
result_triggers = [i for i in e.split('=', 1)[1].split() if '/' in i]
break
else:
self.logger.error('%s result has no ADT_TEST_TRIGGERS, ignoring')
return
stamp = os.path.basename(os.path.dirname(url))
# allow some skipped tests, but nothing else
passed = exitcode in [0, 2]
self.logger.info('Fetched test result for %s/%s/%s %s (triggers: %s): %s',
src, ver, arch, stamp, result_triggers, passed and 'pass' or 'fail')
# remove matching test requests
for trigger in result_triggers:
self.remove_from_pending(trigger, src, arch)
# add this result
for trigger in result_triggers:
self.add_trigger_to_results(trigger, src, ver, arch, stamp, passed)
def remove_from_pending(self, trigger, src, arch):
try:
arch_list = self.pending_tests[trigger][src]
arch_list.remove(arch)
if not arch_list:
del self.pending_tests[trigger][src]
if not self.pending_tests[trigger]:
del self.pending_tests[trigger]
self.logger.info('-> matches pending request %s/%s for trigger %s', src, arch, trigger)
except (KeyError, ValueError):
self.logger.info('-> does not match any pending request for %s/%s', src, arch)
def add_trigger_to_results(self, trigger, src, ver, arch, stamp, passed):
# If a test runs because of its own package (newer version), ensure
# that we got a new enough version; FIXME: this should be done more
# generically by matching against testpkg-versions
(trigsrc, trigver) = trigger.split('/', 1)
if trigsrc == src and apt_pkg.version_compare(ver, trigver) < 0:
self.logger.error('test trigger %s, but run for older version %s, ignoring', trigger, ver)
return
result = self.test_results.setdefault(trigger, {}).setdefault(
src, {}).setdefault(arch, [False, None, ''])
# don't clobber existing passed results with failures from re-runs
# except for reference updates
if passed or not result[0] or (self.options.adt_baseline == 'reference' and trigger == REF_TRIG):
result[0] = passed
result[1] = ver
result[2] = stamp
if self.options.adt_baseline == 'reference' and trigsrc != src:
self.test_results.setdefault(REF_TRIG, {}).setdefault(
src, {}).setdefault(arch, [passed, ver, stamp])
def send_test_request(self, src, arch, trigger, huge=False):
'''Send out AMQP request for testing src/arch for trigger
If huge is true, then the request will be put into the -huge instead of
normal queue.
'''
if self.options.dry_run:
return
params = {'triggers': [trigger]}
if self.options.adt_ppas:
params['ppas'] = self.options.adt_ppas
qname = 'debci-ppa-%s-%s' % (self.options.series, arch)
elif huge:
qname = 'debci-huge-%s-%s' % (self.options.series, arch)
else:
qname = 'debci-%s-%s' % (self.options.series, arch)
params = json.dumps(params)
if self.amqp_channel:
self.amqp_channel.basic_publish(amqp.Message(src + '\n' + params), routing_key=qname)
else:
assert self.amqp_file
with open(self.amqp_file, 'a') as f:
f.write('%s:%s %s\n' % (qname, src, params))
def pkg_test_request(self, src, arch, trigger, huge=False):
'''Request one package test for one particular trigger
trigger is "pkgname/version" of the package that triggers the testing
of src. If huge is true, then the request will be put into the -huge
instead of normal queue.
This will only be done if that test wasn't already requested in a
previous run (i. e. not already in self.pending_tests) or there already
is a result for it. This ensures to download current results for this
package before requesting any test.
'''
# Don't re-request if we already have a result
try:
passed = self.test_results[trigger][src][arch][0]
if self.options.adt_swift_url.startswith('file://'):
return
if passed:
self.logger.info('%s/%s triggered by %s already passed', src, arch, trigger)
return
self.logger.info('Checking for new results for failed %s/%s for trigger %s', src, arch, trigger)
raise KeyError # fall through
except KeyError:
# Without swift we don't expect new results
if not self.options.adt_swift_url.startswith('file://'):
self.fetch_swift_results(self.options.adt_swift_url, src, arch)
# do we have one now?
try:
self.test_results[trigger][src][arch]
return
except KeyError:
pass
# Don't re-request if it's already pending
arch_list = self.pending_tests.setdefault(trigger, {}).setdefault(src, [])
if arch in arch_list:
self.logger.info('Test %s/%s for %s is already pending, not queueing', src, arch, trigger)
else:
self.logger.info('Requesting %s autopkgtest on %s to verify %s', src, arch, trigger)
arch_list.append(arch)
arch_list.sort()
self.send_test_request(src, arch, trigger, huge=huge)
if self.options.adt_baseline == 'reference':
# Check if we already have a reference for this src on this
# arch (or pending).
try:
self.test_results[REF_TRIG][src][arch]
except KeyError:
try:
arch_list = self.pending_tests[REF_TRIG][src]
if arch not in arch_list:
raise KeyError # fall through
except KeyError:
self.logger.info('Requesting %s autopkgtest on %s to set a reference',
src, arch)
self.send_test_request(src, arch, REF_TRIG, huge=huge)
def passed_in_baseline(self, src, arch):
'''Check if tests for src passed on arch in the baseline
The baseline is optionally all data or a reference set)
'''
# this requires iterating over all cached results and thus is expensive;
# cache the results
try:
return self.passed_in_baseline._cache[src][arch]
except KeyError:
pass
passed_reference = False
if self.options.adt_baseline == 'reference':
try:
passed_reference = self.test_results[REF_TRIG][src][arch][0]
self.logger.info('Found result for src %s in reference: pass=%s', src, passed_reference)
except KeyError:
self.logger.info('Found NO result for src %s in reference: pass=%s', src, passed_reference)
pass
self.passed_in_baseline._cache[arch] = passed_reference
return passed_reference
passed_ever = False
for srcmap in self.test_results.values():
try:
if srcmap[src][arch][0]:
passed_ever = True
break
except KeyError:
pass
self.passed_in_baseline._cache[arch] = passed_ever
self.logger.info('Result for src %s ever: pass=%s', src, passed_ever)
return passed_ever
passed_in_baseline._cache = collections.defaultdict(dict)
def pkg_test_result(self, src, ver, arch, trigger):
'''Get current test status of a particular package
Return (status, real_version, run_id, log_url) tuple; status is a key in
EXCUSES_LABELS. run_id is None if the test is still running.
'''
# determine current test result status
ever_passed = self.passed_in_baseline(src, arch)
url = None
run_id = None
try:
r = self.test_results[trigger][src][arch]
ver = r[1]
run_id = r[2]
if r[0]:
result = 'PASS'
else:
# Special-case triggers from linux-meta*: we cannot compare
# results against different kernels, as e. g. a DKMS module
# might work against the default kernel but fail against a
# different flavor; so for those, ignore the "ever
# passed" check; FIXME: check against trigsrc only
if trigger.startswith('linux-meta') or trigger.startswith('linux/'):
ever_passed = False
if ever_passed:
if self.has_force_badtest(src, ver, arch):
result = 'IGNORE-FAIL'
else:
result = 'REGRESSION'
else:
result = 'ALWAYSFAIL'
if self.options.adt_swift_url.startswith('file://'):
url = os.path.join(self.options.adt_ci_url,
'data',
'autopkgtest',
self.options.series,
arch,
srchash(src),
src,
run_id,
'log.gz')
else:
url = os.path.join(self.options.adt_swift_url,
self.swift_container,
self.options.series,
arch,
srchash(src),
src,
run_id,
'log.gz')
except KeyError:
# no result for src/arch; still running?
if arch in self.pending_tests.get(trigger, {}).get(src, []):
if ever_passed and not self.has_force_badtest(src, ver, arch):
result = 'RUNNING'
else:
result = 'RUNNING-ALWAYSFAIL'
url = self.options.adt_ci_url + 'status/pending'
else:
raise RuntimeError('Result for %s/%s/%s (triggered by %s) is neither known nor pending!' %
(src, ver, arch, trigger))
return (result, ver, run_id, url)
def has_force_badtest(self, src, ver, arch):
'''Check if src/ver/arch has a force-badtest hint'''
hints = self.britney.hints.search('force-badtest', package=src)
if hints:
self.logger.info('Checking hints for %s/%s/%s: %s', src, ver, arch, [str(h) for h in hints])
for hint in hints:
if [mi for mi in hint.packages if mi.architecture in ['source', arch] and
(mi.version == 'all' or apt_pkg.version_compare(ver, mi.version) <= 0)]:
return True
return False

@ -219,6 +219,36 @@ class AgePolicy(BasePolicy):
days_old = self._date_now - self._dates[source_name][1]
min_days = self._min_days[urgency]
for bounty in excuse.bounty:
self.logger.info('Applying bounty for %s granted by %s: %d days',
source_name, bounty, excuse.bounty[bounty])
excuse.addhtml('Required age reduced by %d days because of %s' %
(excuse.bounty[bounty], bounty))
min_days -= excuse.bounty[bounty]
if not hasattr(self.options, 'no_penalties') or \
urgency not in self.options.no_penalties:
for penalty in excuse.penalty:
self.logger.info('Applying penalty for %s given by %s: %d days',
source_name, penalty, excuse.penalty[penalty])
excuse.addhtml('Required age increased by %d days because of %s' %
(excuse.penalty[penalty], penalty))
min_days += excuse.penalty[penalty]
try:
bounty_min_age = int(self.options.bounty_min_age)
except ValueError:
if self.options.bounty_min_age in self._min_days:
bounty_min_age = self._min_days[self.options.bounty_min_age]
else:
raise ValueError('Please fix BOUNTY_MIN_AGE in the britney configuration')
except AttributeError:
# The option wasn't defined in the configuration
bounty_min_age = 0
# the age in BOUNTY_MIN_AGE can be higher than the one associated with
# the real urgency, so don't forget to take it into account
bounty_min_age = min(bounty_min_age, self._min_days[urgency])
if min_days < bounty_min_age:
min_days = bounty_min_age
excuse.addhtml('Required age is not allowed to drop below %d days' % min_days)
age_info['age-requirement'] = min_days
age_info['current-age'] = days_old

@ -738,6 +738,8 @@ def read_sources_file(filename, sources=None, intern=sys.intern):
maint,
False,
build_deps_arch,
get_field('Testsuite', '').split(),
get_field('Testsuite-Triggers', '').replace(',', '').split(),
)
return sources

@ -9,17 +9,16 @@ echo
echo
britney2-tests/bin/runtests ./ci/britney-coverage.sh britney2-tests/t test-out || err=$?
echo
britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-1 live-2011-12-13 || err=$?
echo
britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-2 live-2011-12-20 || err=$?
echo
if [ -n "$CI" ] ; then
echo skipping live-2011-12-13 to prevent time out on Travis of the whole test suite
echo skipping live-2012-01-04 to prevent time out on Travis of the whole test suite
else
britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-1 live-2011-12-13 || err=$?
britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-3 live-2012-01-04 || err=$?
fi
echo
britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-2 live-2011-12-20 || err=$?
echo
britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-3 live-2012-01-04 || err=$?
echo
britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-4 live-2012-05-09 || err=$?
echo
britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-5 live-2016-04-11 || err=$?
@ -32,7 +31,7 @@ if [ $err = 0 ] ; then
echo
python3-coverage xml -i || true
echo
bash <(curl -s https://codecov.io/bash) || true
mv .coverage shared
fi
exit $err

@ -116,3 +116,66 @@ piuparts, the package needs to be fixed first to install and purge cleanly in
the non-interactive debconf state. An URL to the relevant piuparts results is
provided in the excuses.
Britney complains about "autopkgtest"
-------------------------------------
Maintainers can add autopkgtest test cases to their packages. Britney can be
configured to request a test runner instance (in the case of Debian, this is
debci) to run relevant tests. The idea is that a package that is a candidate
for migration is updated in the target suite to its candidate version and that
the autopkgtest case(s) of the package (if it has any) *and* those of all
reverse dependencies are run. Regression in the results with respect to the
current situation in the target suite can influence migration in the following
ways, depending on britney's configuration:
* migration is blocked
* regression adds to the required time a package needs to be in the source
suite before migration is considered (via the age policy). This time can
then be used to investigate the situation and potentially block migration
via other policies (e.g. the bug policy).
Regression in the autopkgtest of the candidate package just needs to be fixed
in the package itself. However, due to the addition of test cases from reverse
dependencies, regression in this policy may come from a test case that the
package does not control. If that is the case, the maintainers of the package
and the maintainers of the regressing test case typically need to discuss and
solve the issue together. The maintainers of the package have the knowledge of
what changed, while the maintainers of the reverse dependency with the failing
test case know what and how the test is actually testing. After all, a
regression in a reverse dependency can come due to one of the following reasons
(of course not complete):
* new bug in the candidate package (fix the package)
* bug in the test case that only gets triggered due to the update (fix the
reverse dependency, but see below)
* out-of-date reference date in the test case that captures a former bug in
the candidate package (fix the reverse dependency, but see below)
* deprecation of functionality that is used in the reverse dependency and/or
its test case (discussion needed)
Unfortunately sometimes a regression is only intermittent. Ideally this should
be fixed, but it may be OK to just have the autopkgtest retried (how this is to
be achieved depends on the setup that is being used).
There are cases where it is required to have multiple packages migrate together
to have the test cases pass, e.g. when there was a bug in a regressing test
case of a reverse dependency and that got fixed. In that case the test cases
need to be triggered with both packages from the source suite in the target
suite (again, how this is done depends on the setup).
If britney is configured to add time to the age policy in case of regression, a
test case that hasn't been run (but ran successfully in the past) will also
cause the penalty to be added. This is harmless, because once the results come
in, the penalty will no longer be effective. Similarly, a missing build will
also cause the (harmless) penalty.
A failing test that has never succeeded in britney's memory will be treated as
if the test case doesn't exist.
On top of the penalties for regressions, britney can be configured to reward
bounties for packages that have a successful test case.

@ -1,3 +1,14 @@
# This file is merged from Debian's tests and Ubuntu's autopktest implementation
# For Ubuntu's part Canonical is the original copyright holder.
#
# (C) 2015 Canonical Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
## Debian's part
from britney2 import BinaryPackageId
from britney2.installability.builder import InstallabilityTesterBuilder
@ -5,6 +16,18 @@ TEST_HINTER = 'test-hinter'
HINTS_ALL = ('ALL')
DEFAULT_URGENCY = 'medium'
## autopkgtest part
import os
import shutil
import subprocess
import tempfile
import unittest
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
architectures = ['amd64', 'arm64', 'armhf', 'i386', 'powerpc', 'ppc64el']
##
def new_pkg_universe_builder():
return UniverseBuilder()
@ -123,3 +146,299 @@ class UniverseBuilder(object):
if pkg_id not in self._packages:
raise ValueError("Package %s has not been added yet" % pkg_id)
return self._packages[pkg_id]
# autopkgtest classes
class TestData:
def __init__(self):
'''Construct local test package indexes.
The archive is initially empty. You can create new packages with
create_deb(). self.path contains the path of the archive, and
self.apt_source provides an apt source "deb" line.
It is kept in a temporary directory which gets removed when the Archive
object gets deleted.
'''
self.path = tempfile.mkdtemp(prefix='testarchive.')
self.apt_source = 'deb file://%s /' % self.path
self.suite_testing = 'testing'
self.suite_unstable = 'unstable'
self.compute_migrations = ''
self.dirs = {False: os.path.join(self.path, 'data', self.suite_testing),
True: os.path.join(self.path, 'data', self.suite_unstable)}
os.makedirs(self.dirs[False])
os.mkdir(self.dirs[True])
self.added_sources = {False: set(), True: set()}
self.added_binaries = {False: set(), True: set()}
# pre-create all files for all architectures
for arch in architectures:
for dir in self.dirs.values():
with open(os.path.join(dir, 'Packages_' + arch), 'w'):
pass
for dir in self.dirs.values():
for fname in ['Dates', 'Blocks', 'Urgency', 'BugsV']:
with open(os.path.join(dir, fname), 'w'):
pass
os.mkdir(os.path.join(self.path, 'data', 'hints'))
shutil.copytree(os.path.join(PROJECT_DIR, 'tests', 'policy-test-data', 'piuparts', 'basic'), os.path.join(self.dirs[False], 'state'))
os.mkdir(os.path.join(self.path, 'output'))
# create temporary home dir for proposed-migration autopktest status
self.home = os.path.join(self.path, 'home')
os.environ['HOME'] = self.home
os.makedirs(os.path.join(self.home, 'proposed-migration',
'autopkgtest', 'work'))
def __del__(self):
shutil.rmtree(self.path)
def add(self, name, unstable, fields={}, add_src=True, testsuite=None, srcfields=None):
'''Add a binary package to the index file.
You need to specify at least the package name and in which list to put
it (unstable==True for unstable/proposed, or False for
testing/release). fields specifies all additional entries, e. g.
{'Depends': 'foo, bar', 'Conflicts: baz'}. There are defaults for most
fields.
Unless add_src is set to False, this will also automatically create a
source record, based on fields['Source'] and name. In that case, the
"Testsuite:" field is set to the testsuite argument.
'''
assert (name not in self.added_binaries[unstable])
self.added_binaries[unstable].add(name)
fields.setdefault('Architecture', 'any')
fields.setdefault('Version', '1')
fields.setdefault('Priority', 'optional')
fields.setdefault('Section', 'devel')
fields.setdefault('Description', 'test pkg')
if fields['Architecture'] == 'any':
fields_local_copy = fields.copy()
for a in architectures:
fields_local_copy['Architecture'] = a
self._append(name, unstable, 'Packages_' + a, fields_local_copy)
elif fields['Architecture'] == 'all':
for a in architectures:
self._append(name, unstable, 'Packages_' + a, fields)
else:
self._append(name, unstable, 'Packages_' + fields['Architecture'],
fields)
if add_src:
src = fields.get('Source', name)
if src not in self.added_sources[unstable]:
if srcfields is None:
srcfields = {}
srcfields['Version'] = fields['Version']
srcfields['Section'] = fields['Section']
if testsuite:
srcfields['Testsuite'] = testsuite
self.add_src(src, unstable, srcfields)
def add_src(self, name, unstable, fields={}):
'''Add a source package to the index file.
You need to specify at least the package name and in which list to put
it (unstable==True for unstable/proposed, or False for
testing/release). fields specifies all additional entries, which can be
Version (default: 1), Section (default: devel), Testsuite (default:
none), and Extra-Source-Only.
'''
assert (name not in self.added_sources[unstable])
self.added_sources[unstable].add(name)
fields.setdefault('Version', '1')
fields.setdefault('Section', 'devel')
self._append(name, unstable, 'Sources', fields)
def _append(self, name, unstable, file_name, fields):
with open(os.path.join(self.dirs[unstable], file_name), 'a') as f:
f.write('''Package: %s
Maintainer: Joe <joe@example.com>
''' % name)
for k, v in fields.items():
f.write('%s: %s\n' % (k, v))
f.write('\n')
def remove_all(self, unstable):
'''Remove all added packages'''
self.added_binaries[unstable] = set()
self.added_sources[unstable] = set()
for a in architectures:
open(os.path.join(self.dirs[unstable], 'Packages_' + a), 'w').close()
open(os.path.join(self.dirs[unstable], 'Sources'), 'w').close()
def add_default_packages(self, libc6=True, green=True, lightgreen=True, darkgreen=True, blue=True, black=True, grey=True):
'''To avoid duplication, add packages we need all the time'''
# libc6 (always)
self.add('libc6', False)
if (libc6 is True):
self.add('libc6', True)
# src:green
self.add('libgreen1', False, {'Source': 'green',
'Depends': 'libc6 (>= 0.9)'},
testsuite='autopkgtest')
if (green is True):
self.add('libgreen1', True, {'Source': 'green',
'Depends': 'libc6 (>= 0.9)'},
testsuite='autopkgtest')
self.add('green', False, {'Depends': 'libc6 (>= 0.9), libgreen1',
'Conflicts': 'blue'},
testsuite='autopkgtest')
if (green is True):
self.add('green', True, {'Depends': 'libc6 (>= 0.9), libgreen1',
'Conflicts': 'blue'},
testsuite='autopkgtest')
# lightgreen
self.add('lightgreen', False, {'Depends': 'libgreen1'},
testsuite='autopkgtest')
if (lightgreen is True):
self.add('lightgreen', True, {'Depends': 'libgreen1'},
testsuite='autopkgtest')
## autodep8 or similar test
# darkgreen
self.add('darkgreen', False, {'Depends': 'libgreen1'},
testsuite='autopkgtest-pkg-foo')
if (darkgreen is True):
self.add('darkgreen', True, {'Depends': 'libgreen1'},
testsuite='autopkgtest-pkg-foo')
# blue
self.add('blue', False, {'Depends': 'libc6 (>= 0.9)',
'Conflicts': 'green'},
testsuite='specialtest')
if blue is True:
self.add('blue', True, {'Depends': 'libc6 (>= 0.9)',
'Conflicts': 'green'},
testsuite='specialtest')
# black
self.add('black', False, {},
testsuite='autopkgtest')
if black is True:
self.add('black', True, {},
testsuite='autopkgtest')
# grey
self.add('grey', False, {},
testsuite='autopkgtest')
if grey is True:
self.add('grey', True, {},
testsuite='autopkgtest')
class TestBase(unittest.TestCase):
def setUp(self):
super(TestBase, self).setUp()
self.maxDiff = None
self.data = TestData()
self.britney = os.path.join(PROJECT_DIR, 'britney.py')
# create temporary config so that tests can hack it
self.britney_conf = os.path.join(self.data.path, 'britney.conf')
with open(self.britney_conf, 'w') as f:
f.write('''
TESTING = data/testing
UNSTABLE = data/unstable
NONINST_STATUS = data/testing/non-installable-status
EXCUSES_OUTPUT = output/excuses.html
EXCUSES_YAML_OUTPUT = output/excuses.yaml
UPGRADE_OUTPUT = output/output.txt
HEIDI_OUTPUT = output/HeidiResult
STATIC_INPUT_DIR = data/testing/input
STATE_DIR = data/testing/state
ARCHITECTURES = amd64 arm64 armhf i386 powerpc ppc64el
NOBREAKALL_ARCHES = amd64 arm64 armhf i386 powerpc ppc64el
OUTOFSYNC_ARCHES =
BREAK_ARCHES =
NEW_ARCHES =
MINDAYS_LOW = 0
MINDAYS_MEDIUM = 0
MINDAYS_HIGH = 0
MINDAYS_CRITICAL = 0
MINDAYS_EMERGENCY = 0
DEFAULT_URGENCY = medium
NO_PENALTIES = high critical emergency
BOUNTY_MIN_AGE = 8
HINTSDIR = data/hints
HINTS_AUTOPKGTEST = ALL
HINTS_FREEZE = block block-all block-udeb
HINTS_FREEZE-EXCEPTION = unblock unblock-udeb
HINTS_SATBRITNEY = easy
HINTS_AUTO-REMOVALS = remove
SMOOTH_UPDATES = badgers
IGNORE_CRUFT = 0
REMOVE_OBSOLETE = no
ADT_ENABLE = yes
ADT_ARCHES = amd64 i386
ADT_AMQP = file://output/debci.input
ADT_PPAS =
ADT_SHARED_RESULTS_CACHE =
ADT_SWIFT_URL = http://localhost:18085
ADT_CI_URL = https://autopkgtest.ubuntu.com/
ADT_HUGE = 20
ADT_SUCCESS_BOUNTY =
ADT_REGRESSION_PENALTY =
ADT_BASELINE =
''')
assert os.path.exists(self.britney)
def tearDown(self):
del self.data
def run_britney(self, args=[]):
'''Run britney.
Assert that it succeeds and does not produce anything on stderr.
Return (excuses.yaml, excuses.html, britney_out).
'''
britney = subprocess.Popen([self.britney, '-v', '-c', self.britney_conf,
'%s' % self.data.compute_migrations],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.data.path,
universal_newlines=True)
(out, err) = britney.communicate()
self.assertEqual(britney.returncode, 0, out + err)
self.assertEqual(err, '')
with open(os.path.join(self.data.path, 'output',
'excuses.yaml'), encoding='utf-8') as f:
yaml = f.read()
with open(os.path.join(self.data.path, 'output',
'excuses.html'), encoding='utf-8') as f:
html = f.read()
return (yaml, html, out)
def create_hint(self, username, content):
'''Create a hint file for the given username and content'''
hints_path = os.path.join(
self.data.path, 'data', 'hints', username)
with open(hints_path, 'a') as fd:
fd.write(content)
fd.write('\n')

@ -0,0 +1,170 @@
# Mock a Swift server with autopkgtest results
# Author: Martin Pitt <martin.pitt@ubuntu.com>
import os
import tarfile
import io
import sys
import socket
import time
import tempfile
import json
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
except ImportError:
# Python 2
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urlparse import urlparse, parse_qs
class SwiftHTTPRequestHandler(BaseHTTPRequestHandler):
'''Mock swift container with autopkgtest results
This accepts retrieving a particular result.tar (e. g.
/container/path/result.tar) or listing the container contents
(/container/?prefix=foo&delimiter=@&marker=foo/bar).
'''
# map container -> result.tar path -> (exitcode, testpkg-version[, testinfo])
results = {}
def do_GET(self):
p = urlparse(self.path)
path_comp = p.path.split('/')
container = path_comp[1]
path = '/'.join(path_comp[2:])
if path:
self.serve_file(container, path)
else:
self.list_container(container, parse_qs(p.query))
def serve_file(self, container, path):
if os.path.basename(path) != 'result.tar':
self.send_error(404, 'File not found (only result.tar supported)')
return
try:
fields = self.results[container][os.path.dirname(path)]
try:
(exitcode, pkgver, testinfo) = fields
except ValueError:
(exitcode, pkgver) = fields
testinfo = None
except KeyError:
self.send_error(404, 'File not found')
return
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.end_headers()
tar = io.BytesIO()
with tarfile.open('result.tar', 'w', tar) as results:
# add exitcode
contents = ('%i' % exitcode).encode()
ti = tarfile.TarInfo('exitcode')
ti.size = len(contents)
results.addfile(ti, io.BytesIO(contents))
# add testpkg-version
if pkgver is not None:
contents = pkgver.encode()
ti = tarfile.TarInfo('testpkg-version')
ti.size = len(contents)
results.addfile(ti, io.BytesIO(contents))
# add testinfo.json
if testinfo:
contents = json.dumps(testinfo).encode()
ti = tarfile.TarInfo('testinfo.json')
ti.size = len(contents)
results.addfile(ti, io.BytesIO(contents))
self.wfile.write(tar.getvalue())
def list_container(self, container, query):
try:
objs = set(['%s/result.tar' % r for r in self.results[container]])
except KeyError:
self.send_error(401, 'Container does not exist')
return
if 'prefix' in query:
p = query['prefix'][-1]
objs = set([o for o in objs if o.startswith(p)])
if 'delimiter' in query:
d = query['delimiter'][-1]
# if find() returns a value, we want to include the delimiter, thus
# bump its result; for "not found" return None
find_adapter = lambda i: (i >= 0) and (i + 1) or None
objs = set([o[:find_adapter(o.find(d))] for o in objs])
if 'marker' in query:
m = query['marker'][-1]
objs = set([o for o in objs if o > m])
self.send_response(objs and 200 or 204) # 204: "No Content"
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(('\n'.join(sorted(objs)) + '\n').encode('UTF-8'))
class AutoPkgTestSwiftServer:
def __init__(self, port=8080):
self.port = port
self.server_pid = None
self.log = None
def __del__(self):
if self.server_pid:
self.stop()
@classmethod
def set_results(klass, results):
'''Set served results.
results is a map: container -> result.tar path ->
(exitcode, testpkg-version, testinfo)
'''
SwiftHTTPRequestHandler.results = results
def start(self):
assert self.server_pid is None, 'already started'
if self.log:
self.log.close()
self.log = tempfile.TemporaryFile()
p = os.fork()
if p:
# parent: wait until server starts
self.server_pid = p
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
if s.connect_ex(('127.0.0.1', self.port)) == 0:
break
time.sleep(0.1)
s.close()
return
# child; quiesce logging on stderr
os.dup2(self.log.fileno(), sys.stderr.fileno())
srv = HTTPServer(('', self.port), SwiftHTTPRequestHandler)
srv.serve_forever()
sys.exit(0)
def stop(self):
assert self.server_pid, 'not running'
os.kill(self.server_pid, 15)
os.waitpid(self.server_pid, 0)
self.server_pid = None
self.log.close()
if __name__ == '__main__':
srv = AutoPkgTestSwiftServer()
srv.set_results({'autopkgtest-testing': {
'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1'),
'testing/i386/g/green/20150101_100000@': (0, 'green 1', {'custom_environment': ['ADT_TEST_TRIGGERS=green']}),
'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1'),
'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 2'),
'testing/i386/l/lightgreen/20150101_100102@': (0, 'lightgreen 3'),
}})
srv.start()
print('Running on http://localhost:8080/autopkgtest-testing')
print('Press Enter to quit.')
sys.stdin.readline()
srv.stop()

File diff suppressed because it is too large Load Diff

@ -39,7 +39,7 @@ def create_excuse(name):
def create_source_package(version, section='devel', binaries=None):
if binaries is None:
binaries = []
return SourcePackage(version, section, binaries, 'Random tester', False, None)
return SourcePackage(version, section, binaries, 'Random tester', False, None, '', '')
def create_policy_objects(source_name, target_version, source_version):

Loading…
Cancel
Save