diff --git a/.travis.yml b/.travis.yml index 6a86b1a..8d6ac00 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ sudo: required dist: trusty before_install: - - git clone https://salsa.debian.org/debian/britney2-tests.git britney2-tests + - git clone -b autopkgtest https://salsa.debian.org/debian/britney2-tests.git britney2-tests - git clone https://salsa.debian.org/debian/britney-tests-live-data.git britney2-tests/live-data - rm -f .coverage @@ -24,7 +24,11 @@ install: script: # https://docs.codecov.io/docs/testing-with-docker - - ci_env=$(bash <(curl -s https://codecov.io/env)) ; docker run $ci_env britney /bin/sh -c "export CI=true ; ci/run-everything-and-upload-to-codecov.io.sh" + - mkdir shared + - docker run -v "$PWD/shared:/shared" britney /bin/sh -c "export CI=true ; ci/run-everything-and-upload-to-codecov.io.sh" + +after-success: + - bash <(curl -s https://codecov.io/bash) #notifications: # email: false diff --git a/INSTALL b/INSTALL index 4b1b199..16c74f7 100644 --- a/INSTALL +++ b/INSTALL @@ -8,3 +8,4 @@ Requirements: * Python APT/DPKG bindings aptitude install python3-apt * Python YAML library aptitude install python3-yaml * Python nose tests (testing) aptitude install python3-nose + * Python AMQP library aptitude install python3-amqplib diff --git a/britney.conf b/britney.conf index 461ee80..32fd8dd 100644 --- a/britney.conf +++ b/britney.conf @@ -51,6 +51,8 @@ MINDAYS_HIGH = 2 MINDAYS_CRITICAL = 0 MINDAYS_EMERGENCY = 0 DEFAULT_URGENCY = medium +NO_PENALTIES = high critical emergency +BOUNTY_MIN_AGE = 2 HINTSDIR = /srv/release.debian.org/britney/hints @@ -79,3 +81,24 @@ HINTS_AUTO-REMOVALS = remove SMOOTH_UPDATES = libs oldlibs IGNORE_CRUFT = 1 + +ADT_ENABLE = no +ADT_ARCHES = amd64 +ADT_AMQP = file:///srv/release.debian.org/britney/var/data-b2/output/debci.input +# space separate list of PPAs to add for test requests and for polling results; +# the *last* one determines the swift container name +ADT_PPAS = +# set this to the path of a (r/o) autopkgtest-results.cache for running many parallel +# britney instances for PPAs without updating the cache +ADT_SHARED_RESULTS_CACHE = +# Swift base URL with the results (must be publicly readable and browsable) +# or file location if results are pre-fetched +ADT_SWIFT_URL = file:///srv/release.debian.org/britney/state/debci.json +# Base URL for autopkgtest site, used for links in the excuses +ADT_CI_URL = https://ci.debian.net/ + +# Autopkgtest results can be used to influence the aging +ADT_REGRESSION_PENALTY = 10 +ADT_SUCCESS_BOUNTY = 3 +ADT_BASELINE = reference +ADT_RETRY_URL_MECH = run_id diff --git a/britney.conf.template b/britney.conf.template index eba5efa..2079374 100644 --- a/britney.conf.template +++ b/britney.conf.template @@ -57,6 +57,14 @@ MINDAYS_EMERGENCY = 0 # The urgency to assume if none is provided or it is not defined with # a MINDAYS_$NAME config above DEFAULT_URGENCY = medium +# Don't apply penalties (e.g. from autopktest in bounty/penalty mode) for the +# following urgencies +NO_PENALTIES = high critical emergency +# Lower limit of the age, so accumulated bounties don't let package migrate +# too quick (urgency still has president of course) +# Can be given an urgency name +#BOUNTY_MIN_AGE = high +BOUNTY_MIN_AGE = 2 # Directory where hints files are stored HINTSDIR = /path/to/britney/hints-dir @@ -99,3 +107,31 @@ SMOOTH_UPDATES = libs oldlibs # Whether old binaries in the source distribution should be # considered as a blocker for migration. IGNORE_CRUFT = 1 + +# Enable the autopkgtest policy +ADT_ENABLE = no +# Define on which architectures tests should be executed and taken into account +ADT_ARCHES = amd64 +# AMQP url or request file for the testing framework +#ADT_AMQP = amqp://test_request:password@127.0.0.1 +ADT_AMQP = file:///path/to/britney/debci.input +# space separate list of PPAs to add for test requests and for polling results; +# the *last* one determines the swift container name +ADT_PPAS = +# set this to the path of a (r/o) autopkgtest-results.cache for running many parallel +# britney instances for PPAs without updating the cache +ADT_SHARED_RESULTS_CACHE = +# Swift base URL with the results (must be publicly readable and browsable) +# or file location if results are pre-fetched +#ADT_SWIFT_URL = https://example.com/some/url +ADT_SWIFT_URL = file:///path/to/britney/state/debci.json +# Base URL for autopkgtest site, used for links in the excuses +ADT_CI_URL = https://example.com/ +# Enable the huge queue for packages that trigger vast amounts of tests to not +# starve the regular queue +#ADT_HUGE = 20 + +# Autopkgtest results can be used to influence the aging, leave +# ADT_REGRESSION_PENALTY empty to have regressions block migration +ADT_REGRESSION_PENALTY = 10 +ADT_SUCCESS_BOUNTY = 3 diff --git a/britney.py b/britney.py index 4b86da6..bf78645 100755 --- a/britney.py +++ b/britney.py @@ -200,6 +200,7 @@ from britney2.installability.builder import build_installability_tester from britney2.migrationitem import MigrationItem from britney2.policies import PolicyVerdict from britney2.policies.policy import AgePolicy, RCBugPolicy, PiupartsPolicy, BuildDependsPolicy +from britney2.policies.autopkgtest import AutopkgtestPolicy from britney2.utils import (old_libraries_format, undo_changes, compute_reverse_tree, possibly_compressed, read_nuninst, write_nuninst, write_heidi, @@ -325,6 +326,14 @@ class Britney(object): self.binaries['tpu'] = {} self.binaries['pu'] = {} + # compute inverse Testsuite-Triggers: map, unifying all series + self.logger.info('Building inverse testsuite_triggers map') + self.testsuite_triggers = {} + for suitemap in self.sources.values(): + for src, data in suitemap.items(): + for trigger in data.testsuite_triggers: + self.testsuite_triggers.setdefault(trigger, set()).add(src) + self.binaries['unstable'] = self.read_binaries(self.suite_info['unstable'].path, "unstable", self.options.architectures) for suite in ('tpu', 'pu'): if suite in self.suite_info: @@ -443,6 +452,8 @@ class Britney(object): help="Compute which packages can migrate (the default)") parser.add_option("", "--no-compute-migrations", action="store_false", dest="compute_migrations", help="Do not compute which packages can migrate.") + parser.add_option("", "--series", action="store", dest="series", default='testing', + help="set distribution series name") (self.options, self.args) = parser.parse_args() if self.options.verbose: @@ -548,9 +559,14 @@ class Britney(object): self.options.ignore_cruft == "0": self.options.ignore_cruft = False - self.policies.append(AgePolicy(self.options, self.suite_info, MINDAYS)) + if not hasattr(self.options, 'adt_retry_url_mech'): + self.options.adt_retry_url_mech = '' + self.policies.append(RCBugPolicy(self.options, self.suite_info)) self.policies.append(PiupartsPolicy(self.options, self.suite_info)) + if getattr(self.options, 'adt_enable') == 'yes': + self.policies.append(AutopkgtestPolicy(self.options, self.suite_info)) + self.policies.append(AgePolicy(self.options, self.suite_info, MINDAYS)) self.policies.append(BuildDependsPolicy(self.options, self.suite_info)) for policy in self.policies: @@ -597,7 +613,9 @@ class Britney(object): [], None, True, - None + None, + [], + [], ) self.sources['testing'][pkg_name] = src_data @@ -673,6 +691,8 @@ class Britney(object): None, True, None, + [], + [], ) self.sources['testing'][pkg_name] = src_data self.sources['unstable'][pkg_name] = src_data @@ -874,7 +894,7 @@ class Britney(object): srcdist[source].binaries.append(pkg_id) # if the source package doesn't exist, create a fake one else: - srcdist[source] = SourcePackage(source_version, 'faux', [pkg_id], None, True, None) + srcdist[source] = SourcePackage(source_version, 'faux', [pkg_id], None, True, None, [], []) # add the resulting dictionary to the package list packages[pkg] = dpkg @@ -1073,6 +1093,7 @@ class Britney(object): if not packages: excuse.addhtml("%s/%s unsatisfiable Depends: %s" % (pkg, arch, block_txt.strip())) excuse.addreason("depends") + excuse.add_unsatisfiable_on_arch(arch) if arch not in self.options.break_arches: is_all_ok = False continue diff --git a/britney2/__init__.py b/britney2/__init__.py index c65f560..c462920 100644 --- a/britney2/__init__.py +++ b/britney2/__init__.py @@ -9,15 +9,17 @@ SuiteInfo = namedtuple('SuiteInfo', [ class SourcePackage(object): - __slots__ = ['version', 'section', 'binaries', 'maintainer', 'is_fakesrc', 'build_deps_arch'] + __slots__ = ['version', 'section', 'binaries', 'maintainer', 'is_fakesrc', 'build_deps_arch', 'testsuite', 'testsuite_triggers'] - def __init__(self, version, section, binaries, maintainer, is_fakesrc, build_deps_arch): + def __init__(self, version, section, binaries, maintainer, is_fakesrc, build_deps_arch, testsuite, testsuite_triggers): self.version = version self.section = section self.binaries = binaries self.maintainer = maintainer self.is_fakesrc = is_fakesrc self.build_deps_arch = build_deps_arch + self.testsuite = testsuite + self.testsuite_triggers = testsuite_triggers def __getitem__(self, item): return getattr(self, self.__slots__[item]) diff --git a/britney2/excuse.py b/britney2/excuse.py index 0baa41e..cd1e46d 100644 --- a/britney2/excuse.py +++ b/britney2/excuse.py @@ -80,6 +80,7 @@ class Excuse(object): self.arch_build_deps = {} self.sane_deps = [] self.break_deps = [] + self.unsatisfiable_on_archs = [] self.newbugs = set() self.oldbugs = set() self.reason = {} @@ -89,6 +90,9 @@ class Excuse(object): self.old_binaries = defaultdict(set) self.policy_info = {} + self.bounty = {} + self.penalty = {} + def sortkey(self): if self.daysold == None: return (-1, self.name) @@ -137,6 +141,11 @@ class Excuse(object): if (name, arch) not in self.break_deps: self.break_deps.append( (name, arch) ) + def add_unsatisfiable_on_arch(self, arch): + """Add an arch that has unsatisfiable dependencies""" + if arch not in self.unsatisfiable_on_archs: + self.unsatisfiable_on_archs.append(arch) + def add_arch_build_dep(self, name, arch): if name not in self.arch_build_deps: self.arch_build_deps[name] = [] @@ -316,3 +325,10 @@ class Excuse(object): excusedata["is-candidate"] = self.is_valid return excusedata + def add_bounty(self, policy, bounty): + """"adding bounty""" + self.bounty[policy] = bounty + + def add_penalty(self, policy, penalty): + """"adding penalty""" + self.penalty[policy] = penalty diff --git a/britney2/policies/autopkgtest.py b/britney2/policies/autopkgtest.py new file mode 100644 index 0000000..6a5c92a --- /dev/null +++ b/britney2/policies/autopkgtest.py @@ -0,0 +1,840 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2013 - 2016 Canonical Ltd. +# Authors: +# Colin Watson +# Jean-Baptiste Lallement +# Martin Pitt + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +import collections +import os +import json +import tarfile +import io +import re +import sys +import urllib.parse +from urllib.request import urlopen + +import apt_pkg + +import britney2.hints +from britney2.policies.policy import BasePolicy, PolicyVerdict + + +EXCUSES_LABELS = { + "PASS": 'Pass', + "FAIL": 'Failed', + "ALWAYSFAIL": 'Always failed', + "REGRESSION": 'Regression', + "IGNORE-FAIL": 'Ignored failure', + "RUNNING": 'Test in progress', + "RUNNING-ALWAYSFAIL": 'Test in progress (always failed)', +} + +REF_TRIG = 'migration-reference/0' + +def srchash(src): + '''archive hash prefix for source package''' + + if src.startswith('lib'): + return src[:4] + else: + return src[0] + + +class AutopkgtestPolicy(BasePolicy): + """autopkgtest regression policy for source migrations + + Run autopkgtests for the excuse and all of its reverse dependencies, and + reject the upload if any of those regress. + """ + + def __init__(self, options, suite_info): + super().__init__('autopkgtest', options, suite_info, {'unstable'}) + # tests requested in this and previous runs + # trigger -> src -> [arch] + self.pending_tests = None + self.pending_tests_file = os.path.join(self.options.state_dir, 'autopkgtest-pending.json') + + # results map: trigger -> src -> arch -> [passed, version, run_id] + # - trigger is "source/version" of an unstable package that triggered + # this test run. + # - "passed" is a bool + # - "version" is the package version of "src" of that test + # - "run_id" is an opaque ID that identifies a particular test run for + # a given src/arch. It's usually a time stamp like "20150120_125959". + # This is also used for tracking the latest seen time stamp for + # requesting only newer results. + self.test_results = {} + if self.options.adt_shared_results_cache: + self.results_cache_file = self.options.adt_shared_results_cache + else: + self.results_cache_file = os.path.join(self.options.state_dir, 'autopkgtest-results.cache') + + try: + self.options.adt_ppas = self.options.adt_ppas.strip().split() + except AttributeError: + self.options.adt_ppas = [] + + self.swift_container = 'autopkgtest-' + options.series + if self.options.adt_ppas: + self.swift_container += '-' + options.adt_ppas[-1].replace('/', '-') + + # restrict adt_arches to architectures we actually run for + self.adt_arches = [] + for arch in self.options.adt_arches.split(): + if arch in self.options.architectures: + self.adt_arches.append(arch) + else: + self.logger.info("Ignoring ADT_ARCHES %s as it is not in architectures list", arch) + + def register_hints(self, hint_parser): + hint_parser.register_hint_type('force-badtest', britney2.hints.split_into_one_hint_per_package) + hint_parser.register_hint_type('force-skiptest', britney2.hints.split_into_one_hint_per_package) + + def initialise(self, britney): + super().initialise(britney) + os.makedirs(self.options.state_dir, exist_ok=True) + self.read_pending_tests() + + # read the cached results that we collected so far + if os.path.exists(self.results_cache_file): + with open(self.results_cache_file) as f: + self.test_results = json.load(f) + self.logger.info('Read previous results from %s', self.results_cache_file) + else: + self.logger.info('%s does not exist, re-downloading all results from swift', self.results_cache_file) + + # read in the new results + if self.options.adt_swift_url.startswith('file://'): + debci_file = self.options.adt_swift_url[7:] + if os.path.exists(debci_file): + with open(debci_file) as f: + test_results = json.load(f) + self.logger.info('Read new results from %s', debci_file) + # With debci, pending tests are determined from the debci file + self.pending_tests = {} + for res in test_results['results']: + # Blacklisted tests don't get a version + if res['version'] is None: + res['version'] = 'blacklisted' + (trigger, src, arch, ver, status, stamp) = ([res['trigger'], res['package'], res['arch'], res['version'], res['status'], str(res['run_id'])]) + if trigger is None: + # not requested for this policy, so ignore + continue + if status is None: + # still running => pending + arch_list = self.pending_tests.setdefault(trigger, {}).setdefault(src, []) + if arch not in arch_list: + self.logger.info('Pending autopkgtest %s on %s to verify %s',src, arch, trigger) + arch_list.append(arch) + arch_list.sort() + elif status == 'tmpfail': + # let's see if we still need it + continue + else: + self.logger.info('Results %s %s %s added', src, trigger, status) + self.add_trigger_to_results(trigger, src, ver, arch, stamp, status == 'pass') + else: + self.logger.info('%s does not exist, no new data will be processed', debci_file) + + # we need sources, binaries, and installability tester, so for now + # remember the whole britney object + self.britney = britney + + # Initialize AMQP connection + self.amqp_channel = None + self.amqp_file = None + if self.options.dry_run: + return + + amqp_url = self.options.adt_amqp + + if amqp_url.startswith('amqp://'): + import amqplib.client_0_8 as amqp + # depending on the setup we connect to a AMQP server + creds = urllib.parse.urlsplit(amqp_url, allow_fragments=False) + self.amqp_con = amqp.Connection(creds.hostname, userid=creds.username, + password=creds.password) + self.amqp_channel = self.amqp_con.channel() + self.logger.info('Connected to AMQP server') + elif amqp_url.startswith('file://'): + # or in Debian and in testing mode, adt_amqp will be a file:// URL + self.amqp_file = amqp_url[7:] + else: + raise RuntimeError('Unknown ADT_AMQP schema %s' % amqp_url.split(':', 1)[0]) + + def save_state(self, britney): + super().save_state(britney) + + # update the results on-disk cache, unless we are using a r/o shared one + if not self.options.adt_shared_results_cache: + self.logger.info('Updating results cache') + with open(self.results_cache_file + '.new', 'w') as f: + json.dump(self.test_results, f, indent=2) + os.rename(self.results_cache_file + '.new', self.results_cache_file) + + # update the pending tests on-disk cache + self.logger.info('Updating pending requested tests in %s', self.pending_tests_file) + with open(self.pending_tests_file + '.new', 'w') as f: + json.dump(self.pending_tests, f, indent=2) + os.rename(self.pending_tests_file + '.new', self.pending_tests_file) + + def apply_policy_impl(self, tests_info, suite, source_name, source_data_tdist, source_data_srcdist, excuse): + # initialize + verdict = PolicyVerdict.PASS + elegible_for_bounty = False + + # skip/delay autopkgtests until new package is built somewhere + binaries_info = self.britney.sources[suite][source_name] + if not binaries_info.binaries: + self.logger.info('%s hasn''t been built anywhere, skipping autopkgtest policy', excuse.name) + verdict = PolicyVerdict.REJECTED_TEMPORARILY + + if 'all' in excuse.missing_builds: + self.logger.info('%s hasn''t been built for arch:all, skipping autopkgtest policy', source_name) + verdict = PolicyVerdict.REJECTED_TEMPORARILY + + if verdict == PolicyVerdict.PASS: + self.logger.info('Checking autopkgtests for %s', source_name) + trigger = source_name + '/' + source_data_srcdist.version + + # build a (testsrc, testver) → arch → (status, log_url) map; we trigger/check test + # results per architecture for technical/efficiency reasons, but we + # want to evaluate and present the results by tested source package + # first + pkg_arch_result = collections.defaultdict(dict) + for arch in self.adt_arches: + if arch in excuse.missing_builds: + verdict = PolicyVerdict.REJECTED_TEMPORARILY + self.logger.info('%s hasn''t been built on arch %s, delay autopkgtest there', source_name, arch) + elif arch in excuse.unsatisfiable_on_archs: + verdict = PolicyVerdict.REJECTED_TEMPORARILY + self.logger.info('%s is uninstallable on arch %s, delay autopkgtest there', source_name, arch) + else: + # request tests (unless they were already requested earlier or have a result) + tests = self.tests_for_source(source_name, source_data_srcdist.version, arch) + is_huge = False + try: + is_huge = len(tests) > int(self.options.adt_huge) + except AttributeError: + pass + for (testsrc, testver) in tests: + self.pkg_test_request(testsrc, arch, trigger, huge=is_huge) + (result, real_ver, run_id, url) = self.pkg_test_result(testsrc, testver, arch, trigger) + pkg_arch_result[(testsrc, real_ver)][arch] = (result, run_id, url) + + # add test result details to Excuse + cloud_url = self.options.adt_ci_url + "packages/%(h)s/%(s)s/%(r)s/%(a)s" + for (testsrc, testver) in sorted(pkg_arch_result): + arch_results = pkg_arch_result[(testsrc, testver)] + r = {v[0] for v in arch_results.values()} + if 'REGRESSION' in r: + verdict = PolicyVerdict.REJECTED_PERMANENTLY + elif 'RUNNING' in r and verdict == PolicyVerdict.PASS: + verdict = PolicyVerdict.REJECTED_TEMPORARILY + # skip version if still running on all arches + if not r - {'RUNNING', 'RUNNING-ALWAYSFAIL'}: + testver = None + + # A source package is elegible for the bounty if it has tests + # of its own that pass on all tested architectures. + if testsrc == source_name and r == {'PASS'}: + elegible_for_bounty = True + + if testver: + testname = '%s/%s' % (testsrc, testver) + else: + testname = testsrc + + html_archmsg = [] + for arch in sorted(arch_results): + (status, run_id, log_url) = arch_results[arch] + artifact_url = None + retry_url = None + history_url = None + if self.options.adt_ppas: + if log_url.endswith('log.gz'): + artifact_url = log_url.replace('log.gz', 'artifacts.tar.gz') + else: + history_url = cloud_url % { + 'h': srchash(testsrc), 's': testsrc, + 'r': self.options.series, 'a': arch} + if status == 'REGRESSION': + if self.options.adt_retry_url_mech == 'run_id': + retry_url = self.options.adt_ci_url + 'api/v1/retry/' + run_id + else: + retry_url = self.options.adt_ci_url + 'request.cgi?' + \ + urllib.parse.urlencode([('release', self.options.series), + ('arch', arch), + ('package', testsrc), + ('trigger', trigger)] + + [('ppa', p) for p in self.options.adt_ppas]) + + tests_info.setdefault(testname, {})[arch] = \ + [status, log_url, history_url, artifact_url, retry_url] + + # render HTML snippet for testsrc entry for current arch + if history_url: + message = '%s' % (history_url, arch) + else: + message = arch + message += ': %s' % (log_url, EXCUSES_LABELS[status]) + if retry_url: + message += ' ' % retry_url + if artifact_url: + message += ' [artifacts]' % artifact_url + html_archmsg.append(message) + + # render HTML line for testsrc entry + excuse.addhtml("autopkgtest for %s: %s" % (testname, ', '.join(html_archmsg))) + + if verdict != PolicyVerdict.PASS: + # check for force-skiptest hint + hints = self.britney.hints.search('force-skiptest', package=source_name, version=source_data_srcdist.version) + if hints: + excuse.addreason('skiptest') + excuse.addhtml("Should wait for tests relating to %s %s, but forced by %s" % + (source_name, source_data_srcdist.version, hints[0].user)) + verdict = PolicyVerdict.PASS_HINTED + else: + excuse.addreason('autopkgtest') + + if self.options.adt_success_bounty and verdict == PolicyVerdict.PASS and elegible_for_bounty: + excuse.add_bounty('autopkgtest', int(self.options.adt_success_bounty)) + if self.options.adt_regression_penalty and \ + verdict in {PolicyVerdict.REJECTED_PERMANENTLY, PolicyVerdict.REJECTED_TEMPORARILY}: + excuse.add_penalty('autopkgtest', int(self.options.adt_regression_penalty)) + # In case we give penalties instead of blocking, we must always pass + verdict = PolicyVerdict.PASS + + return verdict + + # + # helper functions + # + + @classmethod + def has_autodep8(kls, srcinfo, binaries): + '''Check if package is covered by autodep8 + + srcinfo is an item from self.britney.sources + binaries is self.britney.binaries['unstable'][arch][0] + ''' + # autodep8? + for t in srcinfo.testsuite: + if t.startswith('autopkgtest-pkg'): + return True + + # DKMS: some binary depends on "dkms" + for pkg_id in srcinfo.binaries: + try: + bininfo = binaries[pkg_id.package_name] + except KeyError: + continue + if 'dkms' in (bininfo.depends or ''): + return True + return False + + def tests_for_source(self, src, ver, arch): + '''Iterate over all tests that should be run for given source and arch''' + + sources_info = self.britney.sources['testing'] + binaries_info = self.britney.binaries['testing'][arch][0] + + reported_pkgs = set() + + tests = [] + + # gcc-N triggers tons of tests via libgcc1, but this is mostly in vain: + # gcc already tests itself during build, and it is being used from + # -proposed, so holding it back on a dozen unrelated test failures + # serves no purpose. Just check some key packages which actually use + # gcc during the test, and libreoffice as an example for a libgcc user. + if src.startswith('gcc-'): + if re.match('gcc-\d$', src): + for test in ['binutils', 'fglrx-installer', 'libreoffice', 'linux']: + try: + tests.append((test, sources_info[test].version)) + except KeyError: + # no package in that series? *shrug*, then not (mostly for testing) + pass + return tests + else: + # for other compilers such as gcc-snapshot etc. we don't need + # to trigger anything + return [] + + # Debian doesn't have linux-meta, but Ubuntu does + # for linux themselves we don't want to trigger tests -- these should + # all come from linux-meta*. A new kernel ABI without a corresponding + # -meta won't be installed and thus we can't sensibly run tests against + # it. + if src.startswith('linux') and src.replace('linux', 'linux-meta') in sources_info: + return [] + + # we want to test the package itself, if it still has a test in unstable + srcinfo = self.britney.sources['unstable'][src] + if 'autopkgtest' in srcinfo.testsuite or self.has_autodep8(srcinfo, binaries_info): + reported_pkgs.add(src) + tests.append((src, ver)) + + extra_bins = [] + # Debian doesn't have linux-meta, but Ubuntu does + # Hack: For new kernels trigger all DKMS packages by pretending that + # linux-meta* builds a "dkms" binary as well. With that we ensure that we + # don't regress DKMS drivers with new kernel versions. + if src.startswith('linux-meta'): + # does this have any image on this arch? + for pkg_id in srcinfo.binaries: + if pkg_id.architecture == arch and '-image' in pkg_id.package_name: + try: + extra_bins.append(binaries_info['dkms'].pkg_id) + except KeyError: + pass + + # plus all direct reverse dependencies and test triggers of its + # binaries which have an autopkgtest + for binary in srcinfo.binaries + extra_bins: + rdeps = self.britney._inst_tester.reverse_dependencies_of(binary) + for rdep in rdeps: + try: + rdep_src = binaries_info[rdep.package_name].source + # Don't re-trigger the package itself here; this should + # have been done above if the package still continues to + # have an autopkgtest in unstable. + if rdep_src == src: + continue + except KeyError: + continue + + rdep_src_info = sources_info[rdep_src] + if 'autopkgtest' in rdep_src_info.testsuite or self.has_autodep8(rdep_src_info, binaries_info): + if rdep_src not in reported_pkgs: + tests.append((rdep_src, rdep_src_info.version)) + reported_pkgs.add(rdep_src) + + for tdep_src in self.britney.testsuite_triggers.get(binary.package_name, set()): + if tdep_src not in reported_pkgs: + try: + tdep_src_info = sources_info[tdep_src] + except KeyError: + continue + if 'autopkgtest' in tdep_src_info.testsuite or self.has_autodep8(tdep_src_info, binaries_info): + for pkg_id in tdep_src_info.binaries: + if pkg_id.architecture == arch: + tests.append((tdep_src, tdep_src_info.version)) + reported_pkgs.add(tdep_src) + break + + tests.sort(key=lambda s_v: s_v[0]) + return tests + + def read_pending_tests(self): + '''Read pending test requests from previous britney runs + + Initialize self.pending_tests with that data. + ''' + assert self.pending_tests is None, 'already initialized' + if not os.path.exists(self.pending_tests_file): + self.logger.info('No %s, starting with no pending tests', self.pending_tests_file) + self.pending_tests = {} + return + with open(self.pending_tests_file) as f: + self.pending_tests = json.load(f) + self.logger.info('Read pending requested tests from %s: %s', self.pending_tests_file, self.pending_tests) + + def latest_run_for_package(self, src, arch): + '''Return latest run ID for src on arch''' + + # this requires iterating over all triggers and thus is expensive; + # cache the results + try: + return self.latest_run_for_package._cache[src][arch] + except KeyError: + pass + + latest_run_id = '' + for srcmap in self.test_results.values(): + try: + run_id = srcmap[src][arch][2] + except KeyError: + continue + if run_id > latest_run_id: + latest_run_id = run_id + self.latest_run_for_package._cache[arch] = latest_run_id + return latest_run_id + + latest_run_for_package._cache = collections.defaultdict(dict) + + def fetch_swift_results(self, swift_url, src, arch): + '''Download new results for source package/arch from swift''' + + # Download results for one particular src/arch at most once in every + # run, as this is expensive + done_entry = src + '/' + arch + if done_entry in self.fetch_swift_results._done: + return + self.fetch_swift_results._done.add(done_entry) + + # prepare query: get all runs with a timestamp later than the latest + # run_id for this package/arch; '@' is at the end of each run id, to + # mark the end of a test run directory path + # example: wily/amd64/libp/libpng/20150630_054517@/result.tar + query = {'delimiter': '@', + 'prefix': '%s/%s/%s/%s/' % (self.options.series, arch, srchash(src), src)} + + # determine latest run_id from results + if not self.options.adt_shared_results_cache: + latest_run_id = self.latest_run_for_package(src, arch) + if latest_run_id: + query['marker'] = query['prefix'] + latest_run_id + + # request new results from swift + url = os.path.join(swift_url, self.swift_container) + url += '?' + urllib.parse.urlencode(query) + f = None + try: + f = urlopen(url, timeout=30) + if f.getcode() == 200: + result_paths = f.read().decode().strip().splitlines() + elif f.getcode() == 204: # No content + result_paths = [] + else: + # we should not ever end up here as we expect a HTTPError in + # other cases; e. g. 3XX is something that tells us to adjust + # our URLS, so fail hard on those + raise NotImplementedError('fetch_swift_results(%s): cannot handle HTTP code %i' % + (url, f.getcode())) + except IOError as e: + # 401 "Unauthorized" is swift's way of saying "container does not exist" + if hasattr(e, 'code') and e.code == 401: + self.logger.info('fetch_swift_results: %s does not exist yet or is inaccessible', url) + return + # Other status codes are usually a transient + # network/infrastructure failure. Ignoring this can lead to + # re-requesting tests which we already have results for, so + # fail hard on this and let the next run retry. + self.logger.error('Failure to fetch swift results from %s: %s', url, str(e)) + sys.exit(1) + finally: + if f is not None: + f.close() + + for p in result_paths: + self.fetch_one_result( + os.path.join(swift_url, self.swift_container, p, 'result.tar'), src, arch) + + fetch_swift_results._done = set() + + def fetch_one_result(self, url, src, arch): + '''Download one result URL for source/arch + + Remove matching pending_tests entries. + ''' + f = None + try: + f = urlopen(url, timeout=30) + if f.getcode() == 200: + tar_bytes = io.BytesIO(f.read()) + else: + raise NotImplementedError('fetch_one_result(%s): cannot handle HTTP code %i' % + (url, f.getcode())) + except IOError as e: + self.logger.error('Failure to fetch %s: %s', url, str(e)) + # we tolerate "not found" (something went wrong on uploading the + # result), but other things indicate infrastructure problems + if hasattr(e, 'code') and e.code == 404: + return + sys.exit(1) + finally: + if f is not None: + f.close() + try: + with tarfile.open(None, 'r', tar_bytes) as tar: + exitcode = int(tar.extractfile('exitcode').read().strip()) + srcver = tar.extractfile('testpkg-version').read().decode().strip() + (ressrc, ver) = srcver.split() + testinfo = json.loads(tar.extractfile('testinfo.json').read().decode()) + except (KeyError, ValueError, tarfile.TarError) as e: + self.logger.error('%s is damaged, ignoring: %s', url, str(e)) + # ignore this; this will leave an orphaned request in autopkgtest-pending.json + # and thus require manual retries after fixing the tmpfail, but we + # can't just blindly attribute it to some pending test. + return + + if src != ressrc: + self.logger.error('%s is a result for package %s, but expected package %s', url, ressrc, src) + return + + # parse recorded triggers in test result + for e in testinfo.get('custom_environment', []): + if e.startswith('ADT_TEST_TRIGGERS='): + result_triggers = [i for i in e.split('=', 1)[1].split() if '/' in i] + break + else: + self.logger.error('%s result has no ADT_TEST_TRIGGERS, ignoring') + return + + stamp = os.path.basename(os.path.dirname(url)) + # allow some skipped tests, but nothing else + passed = exitcode in [0, 2] + + self.logger.info('Fetched test result for %s/%s/%s %s (triggers: %s): %s', + src, ver, arch, stamp, result_triggers, passed and 'pass' or 'fail') + + # remove matching test requests + for trigger in result_triggers: + self.remove_from_pending(trigger, src, arch) + + # add this result + for trigger in result_triggers: + self.add_trigger_to_results(trigger, src, ver, arch, stamp, passed) + + def remove_from_pending(self, trigger, src, arch): + try: + arch_list = self.pending_tests[trigger][src] + arch_list.remove(arch) + if not arch_list: + del self.pending_tests[trigger][src] + if not self.pending_tests[trigger]: + del self.pending_tests[trigger] + self.logger.info('-> matches pending request %s/%s for trigger %s', src, arch, trigger) + except (KeyError, ValueError): + self.logger.info('-> does not match any pending request for %s/%s', src, arch) + + def add_trigger_to_results(self, trigger, src, ver, arch, stamp, passed): + # If a test runs because of its own package (newer version), ensure + # that we got a new enough version; FIXME: this should be done more + # generically by matching against testpkg-versions + (trigsrc, trigver) = trigger.split('/', 1) + if trigsrc == src and apt_pkg.version_compare(ver, trigver) < 0: + self.logger.error('test trigger %s, but run for older version %s, ignoring', trigger, ver) + return + + result = self.test_results.setdefault(trigger, {}).setdefault( + src, {}).setdefault(arch, [False, None, '']) + + # don't clobber existing passed results with failures from re-runs + # except for reference updates + if passed or not result[0] or (self.options.adt_baseline == 'reference' and trigger == REF_TRIG): + result[0] = passed + result[1] = ver + result[2] = stamp + + if self.options.adt_baseline == 'reference' and trigsrc != src: + self.test_results.setdefault(REF_TRIG, {}).setdefault( + src, {}).setdefault(arch, [passed, ver, stamp]) + + def send_test_request(self, src, arch, trigger, huge=False): + '''Send out AMQP request for testing src/arch for trigger + + If huge is true, then the request will be put into the -huge instead of + normal queue. + ''' + if self.options.dry_run: + return + + params = {'triggers': [trigger]} + if self.options.adt_ppas: + params['ppas'] = self.options.adt_ppas + qname = 'debci-ppa-%s-%s' % (self.options.series, arch) + elif huge: + qname = 'debci-huge-%s-%s' % (self.options.series, arch) + else: + qname = 'debci-%s-%s' % (self.options.series, arch) + params = json.dumps(params) + + if self.amqp_channel: + self.amqp_channel.basic_publish(amqp.Message(src + '\n' + params), routing_key=qname) + else: + assert self.amqp_file + with open(self.amqp_file, 'a') as f: + f.write('%s:%s %s\n' % (qname, src, params)) + + def pkg_test_request(self, src, arch, trigger, huge=False): + '''Request one package test for one particular trigger + + trigger is "pkgname/version" of the package that triggers the testing + of src. If huge is true, then the request will be put into the -huge + instead of normal queue. + + This will only be done if that test wasn't already requested in a + previous run (i. e. not already in self.pending_tests) or there already + is a result for it. This ensures to download current results for this + package before requesting any test. + ''' + # Don't re-request if we already have a result + try: + passed = self.test_results[trigger][src][arch][0] + if self.options.adt_swift_url.startswith('file://'): + return + if passed: + self.logger.info('%s/%s triggered by %s already passed', src, arch, trigger) + return + self.logger.info('Checking for new results for failed %s/%s for trigger %s', src, arch, trigger) + raise KeyError # fall through + except KeyError: + # Without swift we don't expect new results + if not self.options.adt_swift_url.startswith('file://'): + self.fetch_swift_results(self.options.adt_swift_url, src, arch) + # do we have one now? + try: + self.test_results[trigger][src][arch] + return + except KeyError: + pass + + # Don't re-request if it's already pending + arch_list = self.pending_tests.setdefault(trigger, {}).setdefault(src, []) + if arch in arch_list: + self.logger.info('Test %s/%s for %s is already pending, not queueing', src, arch, trigger) + else: + self.logger.info('Requesting %s autopkgtest on %s to verify %s', src, arch, trigger) + arch_list.append(arch) + arch_list.sort() + self.send_test_request(src, arch, trigger, huge=huge) + if self.options.adt_baseline == 'reference': + # Check if we already have a reference for this src on this + # arch (or pending). + try: + self.test_results[REF_TRIG][src][arch] + except KeyError: + try: + arch_list = self.pending_tests[REF_TRIG][src] + if arch not in arch_list: + raise KeyError # fall through + except KeyError: + self.logger.info('Requesting %s autopkgtest on %s to set a reference', + src, arch) + self.send_test_request(src, arch, REF_TRIG, huge=huge) + + def passed_in_baseline(self, src, arch): + '''Check if tests for src passed on arch in the baseline + + The baseline is optionally all data or a reference set) + ''' + + # this requires iterating over all cached results and thus is expensive; + # cache the results + try: + return self.passed_in_baseline._cache[src][arch] + except KeyError: + pass + + passed_reference = False + if self.options.adt_baseline == 'reference': + try: + passed_reference = self.test_results[REF_TRIG][src][arch][0] + self.logger.info('Found result for src %s in reference: pass=%s', src, passed_reference) + except KeyError: + self.logger.info('Found NO result for src %s in reference: pass=%s', src, passed_reference) + pass + self.passed_in_baseline._cache[arch] = passed_reference + return passed_reference + + passed_ever = False + for srcmap in self.test_results.values(): + try: + if srcmap[src][arch][0]: + passed_ever = True + break + except KeyError: + pass + + self.passed_in_baseline._cache[arch] = passed_ever + self.logger.info('Result for src %s ever: pass=%s', src, passed_ever) + return passed_ever + + passed_in_baseline._cache = collections.defaultdict(dict) + + def pkg_test_result(self, src, ver, arch, trigger): + '''Get current test status of a particular package + + Return (status, real_version, run_id, log_url) tuple; status is a key in + EXCUSES_LABELS. run_id is None if the test is still running. + ''' + # determine current test result status + ever_passed = self.passed_in_baseline(src, arch) + url = None + run_id = None + try: + r = self.test_results[trigger][src][arch] + ver = r[1] + run_id = r[2] + if r[0]: + result = 'PASS' + else: + # Special-case triggers from linux-meta*: we cannot compare + # results against different kernels, as e. g. a DKMS module + # might work against the default kernel but fail against a + # different flavor; so for those, ignore the "ever + # passed" check; FIXME: check against trigsrc only + if trigger.startswith('linux-meta') or trigger.startswith('linux/'): + ever_passed = False + + if ever_passed: + if self.has_force_badtest(src, ver, arch): + result = 'IGNORE-FAIL' + else: + result = 'REGRESSION' + else: + result = 'ALWAYSFAIL' + + if self.options.adt_swift_url.startswith('file://'): + url = os.path.join(self.options.adt_ci_url, + 'data', + 'autopkgtest', + self.options.series, + arch, + srchash(src), + src, + run_id, + 'log.gz') + else: + url = os.path.join(self.options.adt_swift_url, + self.swift_container, + self.options.series, + arch, + srchash(src), + src, + run_id, + 'log.gz') + except KeyError: + # no result for src/arch; still running? + if arch in self.pending_tests.get(trigger, {}).get(src, []): + if ever_passed and not self.has_force_badtest(src, ver, arch): + result = 'RUNNING' + else: + result = 'RUNNING-ALWAYSFAIL' + url = self.options.adt_ci_url + 'status/pending' + else: + raise RuntimeError('Result for %s/%s/%s (triggered by %s) is neither known nor pending!' % + (src, ver, arch, trigger)) + + return (result, ver, run_id, url) + + def has_force_badtest(self, src, ver, arch): + '''Check if src/ver/arch has a force-badtest hint''' + + hints = self.britney.hints.search('force-badtest', package=src) + if hints: + self.logger.info('Checking hints for %s/%s/%s: %s', src, ver, arch, [str(h) for h in hints]) + for hint in hints: + if [mi for mi in hint.packages if mi.architecture in ['source', arch] and + (mi.version == 'all' or apt_pkg.version_compare(ver, mi.version) <= 0)]: + return True + + return False + diff --git a/britney2/policies/policy.py b/britney2/policies/policy.py index c3cd34f..c066784 100644 --- a/britney2/policies/policy.py +++ b/britney2/policies/policy.py @@ -219,6 +219,36 @@ class AgePolicy(BasePolicy): days_old = self._date_now - self._dates[source_name][1] min_days = self._min_days[urgency] + for bounty in excuse.bounty: + self.logger.info('Applying bounty for %s granted by %s: %d days', + source_name, bounty, excuse.bounty[bounty]) + excuse.addhtml('Required age reduced by %d days because of %s' % + (excuse.bounty[bounty], bounty)) + min_days -= excuse.bounty[bounty] + if not hasattr(self.options, 'no_penalties') or \ + urgency not in self.options.no_penalties: + for penalty in excuse.penalty: + self.logger.info('Applying penalty for %s given by %s: %d days', + source_name, penalty, excuse.penalty[penalty]) + excuse.addhtml('Required age increased by %d days because of %s' % + (excuse.penalty[penalty], penalty)) + min_days += excuse.penalty[penalty] + try: + bounty_min_age = int(self.options.bounty_min_age) + except ValueError: + if self.options.bounty_min_age in self._min_days: + bounty_min_age = self._min_days[self.options.bounty_min_age] + else: + raise ValueError('Please fix BOUNTY_MIN_AGE in the britney configuration') + except AttributeError: + # The option wasn't defined in the configuration + bounty_min_age = 0 + # the age in BOUNTY_MIN_AGE can be higher than the one associated with + # the real urgency, so don't forget to take it into account + bounty_min_age = min(bounty_min_age, self._min_days[urgency]) + if min_days < bounty_min_age: + min_days = bounty_min_age + excuse.addhtml('Required age is not allowed to drop below %d days' % min_days) age_info['age-requirement'] = min_days age_info['current-age'] = days_old diff --git a/britney2/utils.py b/britney2/utils.py index 6e976db..105f0de 100644 --- a/britney2/utils.py +++ b/britney2/utils.py @@ -738,6 +738,8 @@ def read_sources_file(filename, sources=None, intern=sys.intern): maint, False, build_deps_arch, + get_field('Testsuite', '').split(), + get_field('Testsuite-Triggers', '').replace(',', '').split(), ) return sources diff --git a/ci/run-everything-and-upload-to-codecov.io.sh b/ci/run-everything-and-upload-to-codecov.io.sh index 13de507..e5e6535 100755 --- a/ci/run-everything-and-upload-to-codecov.io.sh +++ b/ci/run-everything-and-upload-to-codecov.io.sh @@ -9,16 +9,15 @@ echo echo britney2-tests/bin/runtests ./ci/britney-coverage.sh britney2-tests/t test-out || err=$? echo -echo -if [ -n "$CI" ] ; then - echo skipping live-2011-12-13 to prevent time out on Travis of the whole test suite -else - britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-1 live-2011-12-13 || err=$? -fi +britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-1 live-2011-12-13 || err=$? echo britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-2 live-2011-12-20 || err=$? echo -britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-3 live-2012-01-04 || err=$? +if [ -n "$CI" ] ; then + echo skipping live-2012-01-04 to prevent time out on Travis of the whole test suite +else + britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-3 live-2012-01-04 || err=$? +fi echo britney2-tests/bin/runtests ./britney.py britney2-tests/live-data test-out-live-data-4 live-2012-05-09 || err=$? echo @@ -32,7 +31,7 @@ if [ $err = 0 ] ; then echo python3-coverage xml -i || true echo - bash <(curl -s https://codecov.io/bash) || true + mv .coverage shared fi exit $err diff --git a/doc/solutions-to-common-policy-issues.rst b/doc/solutions-to-common-policy-issues.rst index b82651a..29b90a1 100644 --- a/doc/solutions-to-common-policy-issues.rst +++ b/doc/solutions-to-common-policy-issues.rst @@ -116,3 +116,66 @@ piuparts, the package needs to be fixed first to install and purge cleanly in the non-interactive debconf state. An URL to the relevant piuparts results is provided in the excuses. +Britney complains about "autopkgtest" +------------------------------------- + +Maintainers can add autopkgtest test cases to their packages. Britney can be +configured to request a test runner instance (in the case of Debian, this is +debci) to run relevant tests. The idea is that a package that is a candidate +for migration is updated in the target suite to its candidate version and that +the autopkgtest case(s) of the package (if it has any) *and* those of all +reverse dependencies are run. Regression in the results with respect to the +current situation in the target suite can influence migration in the following +ways, depending on britney's configuration: + + * migration is blocked + + * regression adds to the required time a package needs to be in the source + suite before migration is considered (via the age policy). This time can + then be used to investigate the situation and potentially block migration + via other policies (e.g. the bug policy). + +Regression in the autopkgtest of the candidate package just needs to be fixed +in the package itself. However, due to the addition of test cases from reverse +dependencies, regression in this policy may come from a test case that the +package does not control. If that is the case, the maintainers of the package +and the maintainers of the regressing test case typically need to discuss and +solve the issue together. The maintainers of the package have the knowledge of +what changed, while the maintainers of the reverse dependency with the failing +test case know what and how the test is actually testing. After all, a +regression in a reverse dependency can come due to one of the following reasons +(of course not complete): + + * new bug in the candidate package (fix the package) + + * bug in the test case that only gets triggered due to the update (fix the + reverse dependency, but see below) + + * out-of-date reference date in the test case that captures a former bug in + the candidate package (fix the reverse dependency, but see below) + + * deprecation of functionality that is used in the reverse dependency and/or + its test case (discussion needed) + +Unfortunately sometimes a regression is only intermittent. Ideally this should +be fixed, but it may be OK to just have the autopkgtest retried (how this is to +be achieved depends on the setup that is being used). + +There are cases where it is required to have multiple packages migrate together +to have the test cases pass, e.g. when there was a bug in a regressing test +case of a reverse dependency and that got fixed. In that case the test cases +need to be triggered with both packages from the source suite in the target +suite (again, how this is done depends on the setup). + +If britney is configured to add time to the age policy in case of regression, a +test case that hasn't been run (but ran successfully in the past) will also +cause the penalty to be added. This is harmless, because once the results come +in, the penalty will no longer be effective. Similarly, a missing build will +also cause the (harmless) penalty. + +A failing test that has never succeeded in britney's memory will be treated as +if the test case doesn't exist. + +On top of the penalties for regressions, britney can be configured to reward +bounties for packages that have a successful test case. + diff --git a/tests/__init__.py b/tests/__init__.py index 6a9bbc7..d5bc809 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,3 +1,14 @@ +# This file is merged from Debian's tests and Ubuntu's autopktest implementation +# For Ubuntu's part Canonical is the original copyright holder. +# +# (C) 2015 Canonical Ltd. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +## Debian's part from britney2 import BinaryPackageId from britney2.installability.builder import InstallabilityTesterBuilder @@ -5,6 +16,18 @@ TEST_HINTER = 'test-hinter' HINTS_ALL = ('ALL') DEFAULT_URGENCY = 'medium' +## autopkgtest part +import os +import shutil +import subprocess +import tempfile +import unittest + +PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +architectures = ['amd64', 'arm64', 'armhf', 'i386', 'powerpc', 'ppc64el'] +## + def new_pkg_universe_builder(): return UniverseBuilder() @@ -123,3 +146,299 @@ class UniverseBuilder(object): if pkg_id not in self._packages: raise ValueError("Package %s has not been added yet" % pkg_id) return self._packages[pkg_id] + +# autopkgtest classes +class TestData: + + def __init__(self): + '''Construct local test package indexes. + + The archive is initially empty. You can create new packages with + create_deb(). self.path contains the path of the archive, and + self.apt_source provides an apt source "deb" line. + + It is kept in a temporary directory which gets removed when the Archive + object gets deleted. + ''' + self.path = tempfile.mkdtemp(prefix='testarchive.') + self.apt_source = 'deb file://%s /' % self.path + self.suite_testing = 'testing' + self.suite_unstable = 'unstable' + self.compute_migrations = '' + self.dirs = {False: os.path.join(self.path, 'data', self.suite_testing), + True: os.path.join(self.path, 'data', self.suite_unstable)} + os.makedirs(self.dirs[False]) + os.mkdir(self.dirs[True]) + self.added_sources = {False: set(), True: set()} + self.added_binaries = {False: set(), True: set()} + + # pre-create all files for all architectures + for arch in architectures: + for dir in self.dirs.values(): + with open(os.path.join(dir, 'Packages_' + arch), 'w'): + pass + for dir in self.dirs.values(): + for fname in ['Dates', 'Blocks', 'Urgency', 'BugsV']: + with open(os.path.join(dir, fname), 'w'): + pass + os.mkdir(os.path.join(self.path, 'data', 'hints')) + shutil.copytree(os.path.join(PROJECT_DIR, 'tests', 'policy-test-data', 'piuparts', 'basic'), os.path.join(self.dirs[False], 'state')) + + os.mkdir(os.path.join(self.path, 'output')) + + # create temporary home dir for proposed-migration autopktest status + self.home = os.path.join(self.path, 'home') + os.environ['HOME'] = self.home + os.makedirs(os.path.join(self.home, 'proposed-migration', + 'autopkgtest', 'work')) + + def __del__(self): + shutil.rmtree(self.path) + + def add(self, name, unstable, fields={}, add_src=True, testsuite=None, srcfields=None): + '''Add a binary package to the index file. + + You need to specify at least the package name and in which list to put + it (unstable==True for unstable/proposed, or False for + testing/release). fields specifies all additional entries, e. g. + {'Depends': 'foo, bar', 'Conflicts: baz'}. There are defaults for most + fields. + + Unless add_src is set to False, this will also automatically create a + source record, based on fields['Source'] and name. In that case, the + "Testsuite:" field is set to the testsuite argument. + ''' + assert (name not in self.added_binaries[unstable]) + self.added_binaries[unstable].add(name) + + fields.setdefault('Architecture', 'any') + fields.setdefault('Version', '1') + fields.setdefault('Priority', 'optional') + fields.setdefault('Section', 'devel') + fields.setdefault('Description', 'test pkg') + if fields['Architecture'] == 'any': + fields_local_copy = fields.copy() + for a in architectures: + fields_local_copy['Architecture'] = a + self._append(name, unstable, 'Packages_' + a, fields_local_copy) + elif fields['Architecture'] == 'all': + for a in architectures: + self._append(name, unstable, 'Packages_' + a, fields) + else: + self._append(name, unstable, 'Packages_' + fields['Architecture'], + fields) + + if add_src: + src = fields.get('Source', name) + if src not in self.added_sources[unstable]: + if srcfields is None: + srcfields = {} + srcfields['Version'] = fields['Version'] + srcfields['Section'] = fields['Section'] + if testsuite: + srcfields['Testsuite'] = testsuite + self.add_src(src, unstable, srcfields) + + def add_src(self, name, unstable, fields={}): + '''Add a source package to the index file. + + You need to specify at least the package name and in which list to put + it (unstable==True for unstable/proposed, or False for + testing/release). fields specifies all additional entries, which can be + Version (default: 1), Section (default: devel), Testsuite (default: + none), and Extra-Source-Only. + ''' + assert (name not in self.added_sources[unstable]) + self.added_sources[unstable].add(name) + + fields.setdefault('Version', '1') + fields.setdefault('Section', 'devel') + self._append(name, unstable, 'Sources', fields) + + def _append(self, name, unstable, file_name, fields): + with open(os.path.join(self.dirs[unstable], file_name), 'a') as f: + f.write('''Package: %s +Maintainer: Joe +''' % name) + + for k, v in fields.items(): + f.write('%s: %s\n' % (k, v)) + f.write('\n') + + def remove_all(self, unstable): + '''Remove all added packages''' + + self.added_binaries[unstable] = set() + self.added_sources[unstable] = set() + for a in architectures: + open(os.path.join(self.dirs[unstable], 'Packages_' + a), 'w').close() + open(os.path.join(self.dirs[unstable], 'Sources'), 'w').close() + + def add_default_packages(self, libc6=True, green=True, lightgreen=True, darkgreen=True, blue=True, black=True, grey=True): + '''To avoid duplication, add packages we need all the time''' + + # libc6 (always) + self.add('libc6', False) + if (libc6 is True): + self.add('libc6', True) + + # src:green + self.add('libgreen1', False, {'Source': 'green', + 'Depends': 'libc6 (>= 0.9)'}, + testsuite='autopkgtest') + if (green is True): + self.add('libgreen1', True, {'Source': 'green', + 'Depends': 'libc6 (>= 0.9)'}, + testsuite='autopkgtest') + self.add('green', False, {'Depends': 'libc6 (>= 0.9), libgreen1', + 'Conflicts': 'blue'}, + testsuite='autopkgtest') + if (green is True): + self.add('green', True, {'Depends': 'libc6 (>= 0.9), libgreen1', + 'Conflicts': 'blue'}, + testsuite='autopkgtest') + + # lightgreen + self.add('lightgreen', False, {'Depends': 'libgreen1'}, + testsuite='autopkgtest') + if (lightgreen is True): + self.add('lightgreen', True, {'Depends': 'libgreen1'}, + testsuite='autopkgtest') + + ## autodep8 or similar test + # darkgreen + self.add('darkgreen', False, {'Depends': 'libgreen1'}, + testsuite='autopkgtest-pkg-foo') + if (darkgreen is True): + self.add('darkgreen', True, {'Depends': 'libgreen1'}, + testsuite='autopkgtest-pkg-foo') + + # blue + self.add('blue', False, {'Depends': 'libc6 (>= 0.9)', + 'Conflicts': 'green'}, + testsuite='specialtest') + if blue is True: + self.add('blue', True, {'Depends': 'libc6 (>= 0.9)', + 'Conflicts': 'green'}, + testsuite='specialtest') + + # black + self.add('black', False, {}, + testsuite='autopkgtest') + if black is True: + self.add('black', True, {}, + testsuite='autopkgtest') + + # grey + self.add('grey', False, {}, + testsuite='autopkgtest') + if grey is True: + self.add('grey', True, {}, + testsuite='autopkgtest') + + +class TestBase(unittest.TestCase): + + def setUp(self): + super(TestBase, self).setUp() + self.maxDiff = None + self.data = TestData() + self.britney = os.path.join(PROJECT_DIR, 'britney.py') + # create temporary config so that tests can hack it + self.britney_conf = os.path.join(self.data.path, 'britney.conf') + with open(self.britney_conf, 'w') as f: + f.write(''' +TESTING = data/testing +UNSTABLE = data/unstable + +NONINST_STATUS = data/testing/non-installable-status +EXCUSES_OUTPUT = output/excuses.html +EXCUSES_YAML_OUTPUT = output/excuses.yaml +UPGRADE_OUTPUT = output/output.txt +HEIDI_OUTPUT = output/HeidiResult + +STATIC_INPUT_DIR = data/testing/input +STATE_DIR = data/testing/state + +ARCHITECTURES = amd64 arm64 armhf i386 powerpc ppc64el +NOBREAKALL_ARCHES = amd64 arm64 armhf i386 powerpc ppc64el +OUTOFSYNC_ARCHES = +BREAK_ARCHES = +NEW_ARCHES = + +MINDAYS_LOW = 0 +MINDAYS_MEDIUM = 0 +MINDAYS_HIGH = 0 +MINDAYS_CRITICAL = 0 +MINDAYS_EMERGENCY = 0 +DEFAULT_URGENCY = medium +NO_PENALTIES = high critical emergency +BOUNTY_MIN_AGE = 8 + +HINTSDIR = data/hints + +HINTS_AUTOPKGTEST = ALL +HINTS_FREEZE = block block-all block-udeb +HINTS_FREEZE-EXCEPTION = unblock unblock-udeb +HINTS_SATBRITNEY = easy +HINTS_AUTO-REMOVALS = remove + +SMOOTH_UPDATES = badgers + +IGNORE_CRUFT = 0 + +REMOVE_OBSOLETE = no + +ADT_ENABLE = yes +ADT_ARCHES = amd64 i386 +ADT_AMQP = file://output/debci.input +ADT_PPAS = +ADT_SHARED_RESULTS_CACHE = + +ADT_SWIFT_URL = http://localhost:18085 +ADT_CI_URL = https://autopkgtest.ubuntu.com/ +ADT_HUGE = 20 + +ADT_SUCCESS_BOUNTY = +ADT_REGRESSION_PENALTY = +ADT_BASELINE = +''') + assert os.path.exists(self.britney) + + + def tearDown(self): + del self.data + + def run_britney(self, args=[]): + '''Run britney. + + Assert that it succeeds and does not produce anything on stderr. + Return (excuses.yaml, excuses.html, britney_out). + ''' + britney = subprocess.Popen([self.britney, '-v', '-c', self.britney_conf, + '%s' % self.data.compute_migrations], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=self.data.path, + universal_newlines=True) + (out, err) = britney.communicate() + self.assertEqual(britney.returncode, 0, out + err) + self.assertEqual(err, '') + + with open(os.path.join(self.data.path, 'output', + 'excuses.yaml'), encoding='utf-8') as f: + yaml = f.read() + with open(os.path.join(self.data.path, 'output', + 'excuses.html'), encoding='utf-8') as f: + html = f.read() + + return (yaml, html, out) + + def create_hint(self, username, content): + '''Create a hint file for the given username and content''' + + hints_path = os.path.join( + self.data.path, 'data', 'hints', username) + with open(hints_path, 'a') as fd: + fd.write(content) + fd.write('\n') diff --git a/tests/mock_swift.py b/tests/mock_swift.py new file mode 100644 index 0000000..b33c65a --- /dev/null +++ b/tests/mock_swift.py @@ -0,0 +1,170 @@ +# Mock a Swift server with autopkgtest results +# Author: Martin Pitt + +import os +import tarfile +import io +import sys +import socket +import time +import tempfile +import json + +try: + from http.server import HTTPServer, BaseHTTPRequestHandler + from urllib.parse import urlparse, parse_qs +except ImportError: + # Python 2 + from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler + from urlparse import urlparse, parse_qs + + +class SwiftHTTPRequestHandler(BaseHTTPRequestHandler): + '''Mock swift container with autopkgtest results + + This accepts retrieving a particular result.tar (e. g. + /container/path/result.tar) or listing the container contents + (/container/?prefix=foo&delimiter=@&marker=foo/bar). + ''' + # map container -> result.tar path -> (exitcode, testpkg-version[, testinfo]) + results = {} + + def do_GET(self): + p = urlparse(self.path) + path_comp = p.path.split('/') + container = path_comp[1] + path = '/'.join(path_comp[2:]) + if path: + self.serve_file(container, path) + else: + self.list_container(container, parse_qs(p.query)) + + def serve_file(self, container, path): + if os.path.basename(path) != 'result.tar': + self.send_error(404, 'File not found (only result.tar supported)') + return + try: + fields = self.results[container][os.path.dirname(path)] + try: + (exitcode, pkgver, testinfo) = fields + except ValueError: + (exitcode, pkgver) = fields + testinfo = None + except KeyError: + self.send_error(404, 'File not found') + return + + self.send_response(200) + self.send_header('Content-type', 'application/octet-stream') + self.end_headers() + + tar = io.BytesIO() + with tarfile.open('result.tar', 'w', tar) as results: + # add exitcode + contents = ('%i' % exitcode).encode() + ti = tarfile.TarInfo('exitcode') + ti.size = len(contents) + results.addfile(ti, io.BytesIO(contents)) + # add testpkg-version + if pkgver is not None: + contents = pkgver.encode() + ti = tarfile.TarInfo('testpkg-version') + ti.size = len(contents) + results.addfile(ti, io.BytesIO(contents)) + # add testinfo.json + if testinfo: + contents = json.dumps(testinfo).encode() + ti = tarfile.TarInfo('testinfo.json') + ti.size = len(contents) + results.addfile(ti, io.BytesIO(contents)) + + self.wfile.write(tar.getvalue()) + + def list_container(self, container, query): + try: + objs = set(['%s/result.tar' % r for r in self.results[container]]) + except KeyError: + self.send_error(401, 'Container does not exist') + return + if 'prefix' in query: + p = query['prefix'][-1] + objs = set([o for o in objs if o.startswith(p)]) + if 'delimiter' in query: + d = query['delimiter'][-1] + # if find() returns a value, we want to include the delimiter, thus + # bump its result; for "not found" return None + find_adapter = lambda i: (i >= 0) and (i + 1) or None + objs = set([o[:find_adapter(o.find(d))] for o in objs]) + if 'marker' in query: + m = query['marker'][-1] + objs = set([o for o in objs if o > m]) + + self.send_response(objs and 200 or 204) # 204: "No Content" + self.send_header('Content-type', 'text/plain') + self.end_headers() + self.wfile.write(('\n'.join(sorted(objs)) + '\n').encode('UTF-8')) + + +class AutoPkgTestSwiftServer: + def __init__(self, port=8080): + self.port = port + self.server_pid = None + self.log = None + + def __del__(self): + if self.server_pid: + self.stop() + + @classmethod + def set_results(klass, results): + '''Set served results. + + results is a map: container -> result.tar path -> + (exitcode, testpkg-version, testinfo) + ''' + SwiftHTTPRequestHandler.results = results + + def start(self): + assert self.server_pid is None, 'already started' + if self.log: + self.log.close() + self.log = tempfile.TemporaryFile() + p = os.fork() + if p: + # parent: wait until server starts + self.server_pid = p + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + while True: + if s.connect_ex(('127.0.0.1', self.port)) == 0: + break + time.sleep(0.1) + s.close() + return + + # child; quiesce logging on stderr + os.dup2(self.log.fileno(), sys.stderr.fileno()) + srv = HTTPServer(('', self.port), SwiftHTTPRequestHandler) + srv.serve_forever() + sys.exit(0) + + def stop(self): + assert self.server_pid, 'not running' + os.kill(self.server_pid, 15) + os.waitpid(self.server_pid, 0) + self.server_pid = None + self.log.close() + +if __name__ == '__main__': + srv = AutoPkgTestSwiftServer() + srv.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1'), + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', {'custom_environment': ['ADT_TEST_TRIGGERS=green']}), + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1'), + 'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 2'), + 'testing/i386/l/lightgreen/20150101_100102@': (0, 'lightgreen 3'), + }}) + srv.start() + print('Running on http://localhost:8080/autopkgtest-testing') + print('Press Enter to quit.') + sys.stdin.readline() + srv.stop() diff --git a/tests/test_autopkgtest.py b/tests/test_autopkgtest.py new file mode 100644 index 0000000..ecee28e --- /dev/null +++ b/tests/test_autopkgtest.py @@ -0,0 +1,2619 @@ +#!/usr/bin/python3 +# (C) 2014 - 2015 Canonical Ltd. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +import os +import sys +import fileinput +import unittest +import json +import pprint +import urllib.parse + +import apt_pkg +import yaml + +PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, PROJECT_DIR) + +from tests import TestBase, mock_swift + +apt_pkg.init() + + +# shortcut for test triggers +def tr(s): + return {'custom_environment': ['ADT_TEST_TRIGGERS=%s' % s]} + +ON_ALL_ARCHES = {'on-architectures': ['amd64', 'arm64', 'armhf', 'i386', 'powerpc', 'ppc64el'], + 'on-unimportant-architectures': []} + + +class T(TestBase): + '''AMQP/cloud interface''' + + ################################################################ + # Common test code + ################################################################ + + def setUp(self): + super().setUp() + self.fake_amqp = os.path.join(self.data.path, 'amqp') + + # Set fake AMQP and Swift server + for line in fileinput.input(self.britney_conf, inplace=True): + if 'ADT_AMQP' in line: + print('ADT_AMQP = file://%s' % self.fake_amqp) + else: + sys.stdout.write(line) + + # Set up sourceppa cache for testing + self.sourceppa_cache = { + 'gcc-5': {'2': ''}, + 'gcc-snapshot': {'2': ''}, + 'green': {'2': '', '1.1': '', '3': ''}, + 'lightgreen': {'2': '', '1.1~beta': '', '3': ''}, + 'linux-meta-64only': {'1': ''}, + 'linux-meta-lts-grumpy': {'1': ''}, + 'linux-meta': {'0.2': '', '1': '', '2': ''}, + 'linux': {'2': ''}, + 'newgreen': {'2': ''}, + } + + self.email_cache = {} + for pkg, vals in self.sourceppa_cache.items(): + for version, empty in vals.items(): + self.email_cache.setdefault(pkg, {}) + self.email_cache[pkg][version] = True + + # create mock Swift server (but don't start it yet, as tests first need + # to poke in results) + self.swift = mock_swift.AutoPkgTestSwiftServer(port=18085) + self.swift.set_results({}) + + def tearDown(self): + del self.swift + + def run_it(self, unstable_add, expect_status, expect_excuses={}): + '''Run britney with some unstable packages and verify excuses. + + unstable_add is a list of (binpkgname, field_dict, testsuite_value) + passed to TestData.add for "unstable". + + expect_status is a dict sourcename → (is_candidate, testsrc → arch → status) + that is checked against the excuses YAML. + + expect_excuses is a dict sourcename → [(key, value), ...] + matches that are checked against the excuses YAML. + + Return (output, excuses_dict, excuses_html). + ''' + for (pkg, fields, testsuite) in unstable_add: + self.data.add(pkg, True, fields, True, testsuite) + self.sourceppa_cache.setdefault(pkg, {}) + if fields['Version'] not in self.sourceppa_cache[pkg]: + self.sourceppa_cache[pkg][fields['Version']] = '' + self.email_cache.setdefault(pkg, {}) + self.email_cache[pkg][fields['Version']] = True + + # Set up sourceppa cache for testing + sourceppa_path = os.path.join(self.data.dirs[True], 'SourcePPA') + with open(sourceppa_path, 'w', encoding='utf-8') as sourceppa: + sourceppa.write(json.dumps(self.sourceppa_cache)) + + email_path = os.path.join(self.data.dirs[True], 'EmailCache') + with open(email_path, 'w', encoding='utf-8') as email: + email.write(json.dumps(self.email_cache)) + + self.swift.start() + (excuses_yaml, excuses_html, out) = self.run_britney() + self.swift.stop() + + # convert excuses to source indexed dict + excuses_dict = {} + for s in yaml.load(excuses_yaml)['sources']: + excuses_dict[s['source']] = s + + if 'SHOW_EXCUSES' in os.environ: + print('------- excuses -----') + pprint.pprint(excuses_dict, width=200) + if 'SHOW_HTML' in os.environ: + print('------- excuses.html -----\n%s\n' % excuses_html) + if 'SHOW_OUTPUT' in os.environ: + print('------- output -----\n%s\n' % out) + + for src, (is_candidate, testmap) in expect_status.items(): + self.assertEqual(excuses_dict[src]['is-candidate'], is_candidate, + src + ': ' + pprint.pformat(excuses_dict[src])) + for testsrc, archmap in testmap.items(): + for arch, status in archmap.items(): + self.assertEqual(excuses_dict[src]['policy_info']['autopkgtest'][testsrc][arch][0], + status, + excuses_dict[src]['policy_info']['autopkgtest'][testsrc]) + + for src, matches in expect_excuses.items(): + for k, v in matches: + if isinstance(excuses_dict[src][k], list): + self.assertIn(v, excuses_dict[src][k]) + else: + self.assertEqual(excuses_dict[src][k], v) + + self.amqp_requests = set() + try: + with open(self.fake_amqp) as f: + for line in f: + self.amqp_requests.add(line.strip()) + os.unlink(self.fake_amqp) + except IOError: + pass + + try: + with open(os.path.join(self.data.path, 'data/testing/state/autopkgtest-pending.json')) as f: + self.pending_requests = json.load(f) + except IOError: + self.pending_requests = None + + self.assertNotIn('FIXME', out) + + return (out, excuses_dict, excuses_html) + + ################################################################ + # Tests for generic packages + ################################################################ + + def test_no_request_for_uninstallable(self): + '''Does not request a test for an uninstallable package''' + + self.data.add_default_packages(lightgreen=False) + + exc = self.run_it( + # uninstallable unstable version + [('lightgreen', {'Version': '1.1~beta', 'Depends': 'libc6 (>= 0.9), libgreen1 (>= 2)'}, 'autopkgtest')], + {'lightgreen': (False, {})}, + {'lightgreen': [('old-version', '1'), ('new-version', '1.1~beta'), + ('reason', 'depends'), + ('excuses', 'lightgreen/amd64 unsatisfiable Depends: libgreen1 (>= 2)') + ] + })[1] + # autopkgtest should not be triggered for uninstallable pkg + self.assertEqual(exc['lightgreen']['policy_info']['autopkgtest'], {'verdict': 'REJECTED_TEMPORARILY'}) + + self.assertEqual(self.pending_requests, {}) + self.assertEqual(self.amqp_requests, set()) + + with open(os.path.join(self.data.path, 'output','output.txt')) as f: + upgrade_out = f.read() + self.assertNotIn('accepted:', upgrade_out) + self.assertIn('SUCCESS (0/0)', upgrade_out) + + def test_no_wait_for_always_failed_test(self): + '''We do not need to wait for results for tests which have always failed''' + + self.data.add_default_packages(darkgreen=False) + + # The package has failed before, and with a trigger too on amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (4, 'green 1'), + 'testing/amd64/d/darkgreen/20150101_100000@': (4, 'green 1', tr('failedbefore/1')), + }}) + + exc = self.run_it( + [('darkgreen', {'Version': '2'}, 'autopkgtest')], + {'darkgreen': (True, {'darkgreen': {'i386': 'RUNNING-ALWAYSFAIL', 'amd64': 'RUNNING-ALWAYSFAIL'}})}, + )[1] + + # the test should still be triggered though + self.assertEqual(exc['darkgreen']['policy_info']['autopkgtest'], + {'darkgreen': { + 'amd64': ['RUNNING-ALWAYSFAIL', + 'https://autopkgtest.ubuntu.com/status/pending', + 'https://autopkgtest.ubuntu.com/packages/d/darkgreen/testing/amd64', + None, + None], + 'i386': ['RUNNING-ALWAYSFAIL', + 'https://autopkgtest.ubuntu.com/status/pending', + 'https://autopkgtest.ubuntu.com/packages/d/darkgreen/testing/i386', + None, + None]}, + 'verdict': 'PASS'}) + + self.assertEqual(self.pending_requests, + {'darkgreen/2': {'darkgreen': ['amd64', 'i386']}}) + + self.assertEqual( + self.amqp_requests, + set(['debci-testing-amd64:darkgreen {"triggers": ["darkgreen/2"]}', + 'debci-testing-i386:darkgreen {"triggers": ["darkgreen/2"]}'])) + + with open(os.path.join(self.data.path, 'output', 'output.txt')) as f: + upgrade_out = f.read() + self.assertIn('accepted: darkgreen', upgrade_out) + self.assertIn('SUCCESS (1/0)', upgrade_out) + + def test_dropped_test_not_run(self): + '''New version of a package drops its autopkgtest''' + + self.data.add_default_packages(green=False) + + # green has passed on amd64 before + # lightgreen has passed on i386, therefore we should block on it returning + self.swift.set_results({'autopkgtest-testing': { + 'testing/amd64/g/green/20150101_100000@': (0, 'green 4', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + }}) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green'}, None)], + {'green': (False, {'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}}) + }, + {'green': [('old-version', '1'), ('new-version', '2'), + ('reason', 'autopkgtest')]}) + + # we expect the package's reverse dependencies' tests to get triggered, + # but *not* the package itself since it has no autopkgtest any more + self.assertEqual( + self.amqp_requests, + set(['debci-testing-i386:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-i386:darkgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:darkgreen {"triggers": ["green/2"]}'])) + + # ... and that they get recorded as pending + expected_pending = {'green/2': {'darkgreen': ['amd64', 'i386'], + 'lightgreen': ['amd64', 'i386']}} + self.assertEqual(self.pending_requests, expected_pending) + + def test_multi_rdepends_with_tests_all_running(self): + '''Multiple reverse dependencies with tests (all running)''' + + self.data.add_default_packages(green=False) + + # green has passed before on i386 only, therefore ALWAYSFAIL on amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('passedbefore/1')), + }}) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2'), + ('reason', 'autopkgtest')]}) + + # we expect the package's and its reverse dependencies' tests to get + # triggered + self.assertEqual( + self.amqp_requests, + set(['debci-testing-i386:green {"triggers": ["green/2"]}', + 'debci-testing-amd64:green {"triggers": ["green/2"]}', + 'debci-testing-i386:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-i386:darkgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:darkgreen {"triggers": ["green/2"]}'])) + + # ... and that they get recorded as pending + expected_pending = {'green/2': {'darkgreen': ['amd64', 'i386'], + 'green': ['amd64', 'i386'], + 'lightgreen': ['amd64', 'i386']}} + self.assertEqual(self.pending_requests, expected_pending) + + # if we run britney again this should *not* trigger any new tests + self.run_it([], {'green': (False, {})}) + self.assertEqual(self.amqp_requests, set()) + # but the set of pending tests doesn't change + self.assertEqual(self.pending_requests, expected_pending) + + def test_multi_rdepends_with_tests_all_pass(self): + '''Multiple reverse dependencies with tests (all pass)''' + + self.data.add_default_packages(green=False) + + # green has passed before on i386 only, therefore ALWAYSFAIL on amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('passedbefore/1')), + }}) + + # first run requests tests and marks them as pending + exc = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'), + # a reverse dep that does not exist in testing should not be triggered + ('brittle', {'Depends': 'libgreen1'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2')]})[1] + self.assertNotIn('brittle', exc['green']['policy_info']['autopkgtest']) + + # second run collects the results + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('green/2')), + # version in testing fails + 'testing/i386/g/green/20150101_020000@': (4, 'green 1', tr('green/1')), + 'testing/amd64/g/green/20150101_020000@': (4, 'green 1', tr('green/1')), + # version in unstable succeeds + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + # new "brittle" succeeds + 'testing/i386/b/brittle/20150101_100200@': (0, 'brittle 1', tr('brittle/1')), + 'testing/amd64/b/brittle/20150101_100201@': (0, 'brittle 1', tr('brittle/1')), + }}) + + out = self.run_it( + [], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + 'brittle': (True, {'brittle/1': {'amd64': 'PASS', 'i386': 'PASS'}}) + }, + {'green': [('old-version', '1'), ('new-version', '2')]} + )[0] + + # all tests ran, there should be no more pending ones + self.assertEqual(self.pending_requests, {}) + + # not expecting any failures to retrieve from swift + self.assertNotIn('Failure', out, out) + + # caches the results and triggers + with open(os.path.join(self.data.path, 'data/testing/state/autopkgtest-results.cache')) as f: + res = json.load(f) + self.assertEqual(res['green/1']['green']['amd64'], + [False, '1', '20150101_020000@']) + self.assertEqual(set(res['green/2']), {'darkgreen', 'green', 'lightgreen'}) + self.assertEqual(res['green/2']['lightgreen']['i386'], + [True, '1', '20150101_100100@']) + + # third run should not trigger any new tests, should all be in the + # cache + self.swift.set_results({}) + out = self.run_it( + [], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }) + })[0] + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + self.assertNotIn('Failure', out, out) + + def test_multi_rdepends_with_tests_mixed(self): + '''Multiple reverse dependencies with tests (mixed results)''' + + self.data.add_default_packages(green=False) + + # green has passed before on i386 only, therefore ALWAYSFAIL on amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('passedbefore/1')), + }}) + + # first run requests tests and marks them as pending + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2')]}) + + # second run collects the results + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), + # unrelated results (wrong trigger), ignore this! + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('blue/1')), + }}) + + out = self.run_it( + [], + {'green': (False, {'green/2': {'amd64': 'ALWAYSFAIL', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}, + 'darkgreen/1': {'amd64': 'RUNNING', 'i386': 'PASS'}, + }) + })[0] + + self.assertIn('Update Excuses generation completed', out) + # not expecting any failures to retrieve from swift + self.assertNotIn('Failure', out) + + # there should be some pending ones + self.assertEqual(self.pending_requests, + {'green/2': {'darkgreen': ['amd64'], 'lightgreen': ['i386']}}) + + def test_results_without_triggers(self): + '''Old results without recorded triggers''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1'), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1'), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1'), + 'testing/i386/g/green/20150101_100100@': (0, 'green 1', tr('passedbefore/1')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2'), + 'testing/amd64/g/green/20150101_100201@': (4, 'green 2'), + }}) + + # none of the above results should be accepted + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }) + }) + + # there should be some pending ones + self.assertEqual(self.pending_requests, + {'green/2': {'lightgreen': ['amd64', 'i386'], + 'green': ['amd64', 'i386'], + 'darkgreen': ['amd64', 'i386']}}) + + def test_multi_rdepends_with_tests_regression(self): + '''Multiple reverse dependencies with tests (regression)''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/1')), + 'testing/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), + }}) + + out, exc, _ = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'REGRESSION', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2')]} + ) + + # should have links to log and history, but no artifacts (as this is + # not a PPA) + self.assertEqual(exc['green']['policy_info']['autopkgtest']['lightgreen/1']['amd64'][:4], + ['REGRESSION', + 'http://localhost:18085/autopkgtest-testing/testing/amd64/l/lightgreen/20150101_100101@/log.gz', + 'https://autopkgtest.ubuntu.com/packages/l/lightgreen/testing/amd64', + None]) + + # should have retry link for the regressions (not a stable URL, test + # seaprately) + link = urllib.parse.urlparse(exc['green']['policy_info']['autopkgtest']['lightgreen/1']['amd64'][4]) + self.assertEqual(link.netloc, 'autopkgtest.ubuntu.com') + self.assertEqual(link.path, '/request.cgi') + self.assertEqual(urllib.parse.parse_qs(link.query), + {'release': ['testing'], 'arch': ['amd64'], + 'package': ['lightgreen'], 'trigger': ['green/2']}) + + # we already had all results before the run, so this should not trigger + # any new requests + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # not expecting any failures to retrieve from swift + self.assertNotIn('Failure', out, out) + + def test_multi_rdepends_with_tests_regression_last_pass(self): + '''Multiple reverse dependencies with tests (regression), last one passes + + This ensures that we don't just evaluate the test result of the last + test, but all of them. + ''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/1')), + 'testing/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), + }}) + + out = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'REGRESSION', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2')]} + )[0] + + self.assertEqual(self.pending_requests, {}) + # not expecting any failures to retrieve from swift + self.assertNotIn('Failure', out, out) + + def test_multi_rdepends_with_tests_always_failed(self): + '''Multiple reverse dependencies with tests (always failed)''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100200@': (4, 'green 2', tr('green/1')), + 'testing/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), + }}) + + out = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (True, {'green/2': {'amd64': 'ALWAYSFAIL', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'ALWAYSFAIL', 'i386': 'ALWAYSFAIL'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2')]} + )[0] + + self.assertEqual(self.pending_requests, {}) + # not expecting any failures to retrieve from swift + self.assertNotIn('Failure', out, out) + + def test_multi_rdepends_arch_specific(self): + '''Multiple reverse dependencies with arch specific tests''' + + self.data.add_default_packages(green=False) + + # green has passed before on amd64, doesn't exist on i386 + self.swift.set_results({'autopkgtest-testing': { + 'testing/amd64/g/green64/20150101_100000@': (0, 'green64 0.1', tr('passedbefore/1')), + }}) + + self.data.add('green64', False, {'Depends': 'libc6 (>= 0.9), libgreen1', + 'Architecture': 'amd64'}, + testsuite='autopkgtest') + + # first run requests tests and marks them as pending + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'green64': {'amd64': 'RUNNING'}, + }) + }) + + self.assertEqual( + self.amqp_requests, + set(['debci-testing-i386:green {"triggers": ["green/2"]}', + 'debci-testing-amd64:green {"triggers": ["green/2"]}', + 'debci-testing-i386:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-i386:darkgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:darkgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:green64 {"triggers": ["green/2"]}'])) + + self.assertEqual(self.pending_requests, + {'green/2': {'lightgreen': ['amd64', 'i386'], + 'darkgreen': ['amd64', 'i386'], + 'green64': ['amd64'], + 'green': ['amd64', 'i386']}}) + + # second run collects the results + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('green/2')), + # version in testing fails + 'testing/i386/g/green/20150101_020000@': (4, 'green 1', tr('green/1')), + 'testing/amd64/g/green/20150101_020000@': (4, 'green 1', tr('green/1')), + # version in unstable succeeds + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + # only amd64 result for green64 + 'testing/amd64/g/green64/20150101_100200@': (0, 'green64 1', tr('green/2')), + }}) + + out = self.run_it( + [], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'green64/1': {'amd64': 'PASS'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2')]} + )[0] + + # all tests ran, there should be no more pending ones + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # not expecting any failures to retrieve from swift + self.assertNotIn('Failure', out, out) + + def test_unbuilt(self): + '''Unbuilt package should not trigger tests or get considered''' + + self.data.add_default_packages(green=False) + + self.data.add_src('green', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) + self.data.add('libgreen1', True, {'Source': 'green', + 'Depends': 'libc6 (>= 0.9)'}, + testsuite='autopkgtest', add_src=False) + self.data.add('green', True, {'Depends': 'libc6 (>= 0.9), libgreen1', + 'Conflicts': 'blue'}, + testsuite='autopkgtest', add_src=False) + + exc = self.run_it( + # uninstallable unstable version + [], + {'green': (False, {})}, + {'green': [('old-version', '1'), ('new-version', '2'), + ('missing-builds', ON_ALL_ARCHES), + ] + })[1] + # autopkgtest should not be triggered for unbuilt pkg + self.assertEqual(exc['green']['policy_info']['autopkgtest'], {'verdict': 'REJECTED_TEMPORARILY'}) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_unbuilt_not_in_testing(self): + '''Unbuilt package should not trigger tests or get considered (package not in testing)''' + + self.data.add_default_packages(green=False) + + self.sourceppa_cache['lime'] = {'1': ''} + + self.data.add_src('lime', True, {'Version': '1', 'Testsuite': 'autopkgtest'}) + exc = self.run_it( + # unbuilt unstable version + [], + {'lime': (False, {})}, + {'lime': [('old-version', '-'), ('new-version', '1'), + ('reason', 'no-binaries'), + ] + })[1] + # autopkgtest should not be triggered for unbuilt pkg + self.assertEqual(exc['lime']['policy_info']['autopkgtest'], {'verdict': 'REJECTED_TEMPORARILY'}) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_partial_unbuilt(self): + '''Unbuilt package on some arches should not trigger tests on those arches''' + + self.data.add_default_packages(green=False) + + self.data.add_src('green', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) + self.data.add('libgreen1', True, {'Version': '2', 'Source': 'green', 'Architecture': 'i386'}, add_src=False) + self.data.add('green', True, {'Depends': 'libc6 (>= 0.9), libgreen1', + 'Conflicts': 'blue'}, + testsuite='autopkgtest', add_src=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + }}) + + exc = self.run_it( + [], + {'green': (False, {})}, + {'green': [('old-version', '1'), ('new-version', '2'), + ('missing-builds', {'on-architectures': ['amd64', 'arm64', 'armhf', 'powerpc', 'ppc64el'], + 'on-unimportant-architectures': []}) + ] + })[1] + # autopkgtest should not be triggered on arches with unbuilt pkg + self.assertEqual(exc['green']['policy_info']['autopkgtest']['verdict'], 'REJECTED_TEMPORARILY') + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_partial_unbuilt_block(self): + '''Unbuilt blocked package on some arches should not trigger tests on those arches''' + + self.data.add_default_packages(green=False) + + self.create_hint('freeze', 'block-all source') + + self.data.add_src('green', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) + self.data.add('libgreen1', True, {'Version': '2', 'Source': 'green', 'Architecture': 'i386'}, add_src=False) + self.data.add('green', True, {'Depends': 'libc6 (>= 0.9), libgreen1', + 'Conflicts': 'blue'}, + testsuite='autopkgtest', add_src=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + }}) + + exc = self.run_it( + [], + {'green': (False, {})}, + {'green': [('old-version', '1'), ('new-version', '2'), + ('missing-builds', {'on-architectures': ['amd64', 'arm64', 'armhf', 'powerpc', 'ppc64el'], + 'on-unimportant-architectures': []}) + ] + })[1] + # autopkgtest should not be triggered on arches with unbuilt pkg + self.assertEqual(exc['green']['policy_info']['autopkgtest']['verdict'], 'REJECTED_TEMPORARILY') + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_rdepends_unbuilt(self): + '''Unbuilt reverse dependency''' + + self.data.add_default_packages(green=False, lightgreen=False) + + # old lightgreen fails, thus new green should be held back + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/1.1')), + 'testing/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/1.1')), + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1.1')), + 'testing/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1.1')), + 'testing/i386/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), + 'testing/amd64/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 1.1', tr('green/1.1')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 1.1', tr('green/1.1')), + }}) + + # add unbuilt lightgreen; should run tests against the old version + self.data.add_src('lightgreen', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) + self.data.add('lightgreen', True, {'Depends': 'libgreen1'}, + testsuite='autopkgtest', add_src=False) + + self.run_it( + [('libgreen1', {'Version': '1.1', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/1.1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + 'lightgreen': (False, {}), + }, + {'green': [('old-version', '1'), ('new-version', '1.1')], + 'lightgreen': [('old-version', '1'), ('new-version', '2'), + ('missing-builds', ON_ALL_ARCHES)], + } + ) + + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # next run should not trigger any new requests + self.run_it([], {'green': (False, {}), 'lightgreen': (False, {})}) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # now lightgreen 2 gets built, should trigger a new test run + self.data.remove_all(True) + self.data.add('libc6', True) + self.data.add('darkgreen', True, {'Depends': 'libgreen1'}, + testsuite='autopkgtest-pkg-foo') + + self.data.add('blue', True, {'Depends': 'libc6 (>= 0.9)', + 'Conflicts': 'green'}, + testsuite='specialtest') + self.data.add('black', True, {}, + testsuite='autopkgtest') + self.data.add('grey', True, {}, + testsuite='autopkgtest') + + self.run_it( + [('libgreen1', {'Version': '1.1', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'), + ('lightgreen', {'Version': '2'}, 'autopkgtest')], + {}) + self.assertEqual(self.amqp_requests, + set(['debci-testing-amd64:lightgreen {"triggers": ["lightgreen/2"]}', + 'debci-testing-i386:lightgreen {"triggers": ["lightgreen/2"]}'])) + + # next run collects the results + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100200@': (0, 'lightgreen 2', tr('lightgreen/2')), + 'testing/amd64/l/lightgreen/20150101_102000@': (0, 'lightgreen 2', tr('lightgreen/2')), + }}) + self.run_it( + [], + # green hasn't changed, the above re-run was for trigger lightgreen/2 + {'green': (False, {'green/1.1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + 'lightgreen': (True, {'lightgreen/2': {'amd64': 'PASS', 'i386': 'PASS'}}), + }, + {'green': [('old-version', '1'), ('new-version', '1.1')], + 'lightgreen': [('old-version', '1'), ('new-version', '2')], + } + ) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_rdepends_unbuilt_unstable_only(self): + '''Unbuilt reverse dependency which is not in testing''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), + 'testing/amd64/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + }}) + # run britney once to pick up previous results + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}})}) + + # add new uninstallable brokengreen; should not run test at all + exc = self.run_it( + [('brokengreen', {'Version': '1', 'Depends': 'libgreen1, nonexisting'}, 'autopkgtest')], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}}), + 'brokengreen': (False, {}), + }, + {'green': [('old-version', '1'), ('new-version', '2')], + 'brokengreen': [('old-version', '-'), ('new-version', '1'), + ('reason', 'depends'), + ('excuses', 'brokengreen/amd64 unsatisfiable Depends: nonexisting')], + })[1] + # autopkgtest should not be triggered for uninstallable pkg + self.assertEqual(exc['brokengreen']['policy_info']['autopkgtest'], {'verdict': 'REJECTED_TEMPORARILY'}) + + self.assertEqual(self.amqp_requests, set()) + + def test_rdepends_unbuilt_new_version_result(self): + '''Unbuilt reverse dependency gets test result for newer version + + This might happen if the autopkgtest infrastructure runs the unstable + source tests against the testing binaries. Even if that gets done + properly it might still happen that at the time of the britney run the + package isn't built yet, but it is once the test gets run. + ''' + + self.data.add_default_packages(green=False, lightgreen=False) + + # old lightgreen fails, thus new green should be held back + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/1.1')), + 'testing/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/1.1')), + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1.1')), + 'testing/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100100@': (4, 'lightgreen 1', tr('green/1.1')), + 'testing/i386/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), + 'testing/amd64/g/green/20150101_020000@': (0, 'green 1', tr('green/1')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 1.1', tr('green/1.1')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 1.1', tr('green/1.1')), + }}) + + # add unbuilt lightgreen; should run tests against the old version + self.data.add_src('lightgreen', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) + self.data.add('lightgreen', True, {'Depends': 'libgreen1'}, + testsuite='autopkgtest', add_src=False) + + self.run_it( + [('libgreen1', {'Version': '1.1', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/1.1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + 'lightgreen': (False, {}), + }, + {'green': [('old-version', '1'), ('new-version', '1.1')], + 'lightgreen': [('old-version', '1'), ('new-version', '2'), + ('missing-builds', ON_ALL_ARCHES)] + } + ) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # lightgreen 2 stays unbuilt in britney, but we get a test result for it + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100200@': (0, 'lightgreen 2', tr('green/1.1')), + 'testing/amd64/l/lightgreen/20150101_102000@': (0, 'lightgreen 2', tr('green/1.1')), + }}) + self.run_it( + [], + {'green': (True, {'green/1.1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + 'lightgreen': (False, {}), + }, + {'green': [('old-version', '1'), ('new-version', '1.1')], + 'lightgreen': [('old-version', '1'), ('new-version', '2'), + ('missing-builds', ON_ALL_ARCHES)] + } + ) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # next run should not trigger any new requests + self.run_it([], {'green': (True, {}), 'lightgreen': (False, {})}) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_rdepends_unbuilt_new_version_fail(self): + '''Unbuilt reverse dependency gets failure for newer version''' + + self.data.add_default_packages(green=False, lightgreen=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('lightgreen/1')), + }}) + + # add unbuilt lightgreen; should request tests against the old version + self.data.add_src('lightgreen', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) + self.data.add('lightgreen', True, {'Depends': 'libgreen1'}, + testsuite='autopkgtest', add_src=False) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }), + 'lightgreen': (False, {}), + }, + {'green': [('old-version', '1'), ('new-version', '2')], + 'lightgreen': [('old-version', '1'), ('new-version', '2'), + ('missing-builds', ON_ALL_ARCHES)], + } + ) + self.assertEqual(len(self.amqp_requests), 6) + + # we only get a result for lightgreen 2, not for the requested 1 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 0.5', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 0.5', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100200@': (4, 'lightgreen 2', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100200@': (4, 'lightgreen 2', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + }}) + self.run_it( + [], + {'green': (False, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + 'lightgreen': (False, {}), + }, + {'green': [('old-version', '1'), ('new-version', '2')], + 'lightgreen': [('old-version', '1'), ('new-version', '2'), + ('missing-builds', ON_ALL_ARCHES)], + } + ) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # next run should not trigger any new requests + self.run_it([], {'green': (False, {}), 'lightgreen': (False, {})}) + self.assertEqual(self.pending_requests, {}) + self.assertEqual(self.amqp_requests, set()) + +### def test_same_version_binary_in_unstable(self): +### '''binary from new architecture in unstable with testing version''' +### +### # Invalid dataset in Debian and Ubuntu: ... ARCHITECTURE all != i386 +### self.data.add('lightgreen', False) +### +### # i386 is in testing already, but amd64 just recently built and is in unstable +### self.data.add_src('brown', False, {'Testsuite': 'autopkgtest'}) +### self.data.add_src('brown', True, {'Testsuite': 'autopkgtest'}) +### self.data.add('brown', False, {'Architecture': 'i386'}, add_src=False) +### self.data.add('brown', True, {}, add_src=False) +### +### exc = self.run_it( +### # we need some other package to create unstable Sources +### [('lightgreen', {'Version': '2'}, 'autopkgtest')], +### {'brown': (True, {})} +### )[1] +### self.assertEqual(exc['brown']['item-name'], 'brown/amd64') + + def test_package_pair_running(self): + '''Two packages in unstable that need to go in together (running)''' + + self.data.add_default_packages(green=False, lightgreen=False) + + # green has passed before on i386 only, therefore ALWAYSFAIL on amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('passedbefore/1')), + }}) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'), + ('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 2)'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }), + 'lightgreen': (False, {'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}}), + }, + {'green': [('old-version', '1'), ('new-version', '2')], + 'lightgreen': [('old-version', '1'), ('new-version', '2')], + }) + + # we expect the package's and its reverse dependencies' tests to get + # triggered; lightgreen should be triggered for each trigger + self.assertEqual( + self.amqp_requests, + set(['debci-testing-i386:green {"triggers": ["green/2"]}', + 'debci-testing-amd64:green {"triggers": ["green/2"]}', + 'debci-testing-i386:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-i386:lightgreen {"triggers": ["lightgreen/2"]}', + 'debci-testing-amd64:lightgreen {"triggers": ["lightgreen/2"]}', + 'debci-testing-i386:darkgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:darkgreen {"triggers": ["green/2"]}'])) + + # ... and that they get recorded as pending + self.assertEqual(self.pending_requests, + {'lightgreen/2': {'lightgreen': ['amd64', 'i386']}, + 'green/2': {'darkgreen': ['amd64', 'i386'], + 'green': ['amd64', 'i386'], + 'lightgreen': ['amd64', 'i386']}}) + + def test_binary_from_new_source_package_running(self): + '''building an existing binary for a new source package (running)''' + + self.data.add_default_packages(green=False) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'}, 'autopkgtest')], + {'newgreen': (True, {'newgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }), + }, + {'newgreen': [('old-version', '-'), ('new-version', '2')]}) + + self.assertEqual(len(self.amqp_requests), 8) + self.assertEqual(self.pending_requests, + {'newgreen/2': {'darkgreen': ['amd64', 'i386'], + 'green': ['amd64', 'i386'], + 'lightgreen': ['amd64', 'i386'], + 'newgreen': ['amd64', 'i386']}}) + + def test_blacklisted_fail(self): + '''blacklisted packages return exit code 99 and version blacklisted, + check they are handled correctly''' + + self.data.add_default_packages(black=False, grey=False) + self.data.add('brown', False, {'Depends': 'grey'}, testsuite='autopkgtest') + self.data.add('brown', True, {'Depends': 'grey'}, testsuite='autopkgtest') + + self.swift.set_results({'autopkgtest-testing': { + 'testing/amd64/b/black/20150101_100000@': (0, 'black 1', tr('black/1')), + 'testing/amd64/b/black/20150102_100000@': (99, 'black blacklisted', tr('black/2')), + 'testing/amd64/g/grey/20150101_100000@': (99, 'grey blacklisted', tr('grey/1')), + 'testing/amd64/b/brown/20150101_100000@': (99, 'brown blacklisted', tr('grey/2')), + }}) + + self.run_it( + [('black', {'Version': '2'}, 'autopkgtest'), + ('grey', {'Version': '2'}, 'autopkgtest')], + {'black': (False, {'black/blacklisted': {'amd64': 'REGRESSION'}, + 'black': {'i386': 'RUNNING-ALWAYSFAIL'}}), + 'grey': (True, {'grey': {'amd64': 'RUNNING-ALWAYSFAIL'}, + 'brown/blacklisted': {'amd64': 'ALWAYSFAIL'}, + 'brown': {'i386': 'RUNNING-ALWAYSFAIL'}}) + }) + + self.assertEqual(len(self.amqp_requests), 4) + self.assertEqual(self.pending_requests, + {'black/2': {'black': ['i386']}, + 'grey/2': {'grey': ['amd64', 'i386'], + 'brown': ['i386']}}) + + def test_blacklisted_force(self): + '''blacklisted packages return exit code 99 and version all, check they + are handled correctly''' + + self.data.add_default_packages(black=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/amd64/b/black/20150101_100000@': (0, 'black 1', tr('black/1')), + 'testing/amd64/b/black/20150102_100000@': (99, 'black blacklisted', tr('black/2')), + 'testing/i386/b/black/20150101_100000@': (0, 'black 1', tr('black/1')), + 'testing/i386/b/black/20150102_100000@': (99, 'black blacklisted', tr('black/2')), + }}) + + self.create_hint('autopkgtest', 'force-badtest black/blacklisted') + + self.run_it( + [('black', {'Version': '2'}, 'autopkgtest')], + {'black': (True, {'black/blacklisted': {'amd64': 'IGNORE-FAIL', + 'i386': 'IGNORE-FAIL'}}) + }, + {'black': [('old-version', '1'), ('new-version', '2')]}) + + self.assertEqual(len(self.amqp_requests), 0) + + def test_binary_from_new_source_package_pass(self): + '''building an existing binary for a new source package (pass)''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('newgreen/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('newgreen/2')), + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('newgreen/2')), + 'testing/amd64/g/green/20150101_100000@': (0, 'green 1', tr('newgreen/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('newgreen/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('newgreen/2')), + 'testing/i386/n/newgreen/20150101_100200@': (0, 'newgreen 2', tr('newgreen/2')), + 'testing/amd64/n/newgreen/20150101_100201@': (0, 'newgreen 2', tr('newgreen/2')), + }}) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'newgreen', 'Depends': 'libc6'}, 'autopkgtest')], + {'newgreen': (True, {'newgreen/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'green/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'newgreen': [('old-version', '-'), ('new-version', '2')]}) + + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_result_from_older_version(self): + '''test result from older version than the uploaded one''' + + self.data.add_default_packages(darkgreen=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('darkgreen/1')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('darkgreen/1')), + }}) + + self.run_it( + [('darkgreen', {'Version': '2', 'Depends': 'libc6 (>= 0.9), libgreen1'}, 'autopkgtest')], + {'darkgreen': (False, {'darkgreen': {'amd64': 'RUNNING', 'i386': 'RUNNING'}})}) + + self.assertEqual( + self.amqp_requests, + set(['debci-testing-i386:darkgreen {"triggers": ["darkgreen/2"]}', + 'debci-testing-amd64:darkgreen {"triggers": ["darkgreen/2"]}'])) + self.assertEqual(self.pending_requests, + {'darkgreen/2': {'darkgreen': ['amd64', 'i386']}}) + + # second run gets the results for darkgreen 2 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100010@': (0, 'darkgreen 2', tr('darkgreen/2')), + 'testing/amd64/d/darkgreen/20150101_100010@': (0, 'darkgreen 2', tr('darkgreen/2')), + }}) + self.run_it( + [], + {'darkgreen': (True, {'darkgreen/2': {'amd64': 'PASS', 'i386': 'PASS'}})}) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # next run sees a newer darkgreen, should re-run tests + self.data.remove_all(True) + self.data.add('libc6', True) + self.data.add('libgreen1', True, {'Source': 'green', + 'Depends': 'libc6 (>= 0.9)'}, + testsuite='autopkgtest') + self.data.add('green', True, {'Depends': 'libc6 (>= 0.9), libgreen1', + 'Conflicts': 'blue'}, + testsuite='autopkgtest') + self.data.add('lightgreen', True, {'Depends': 'libgreen1'}, + testsuite='autopkgtest') + self.data.add('blue', True, {'Depends': 'libc6 (>= 0.9)', + 'Conflicts': 'green'}, + testsuite='specialtest') + self.data.add('black', True, {}, + testsuite='autopkgtest') + self.data.add('grey', True, {}, + testsuite='autopkgtest') + + self.run_it( + [('darkgreen', {'Version': '3', 'Depends': 'libc6 (>= 0.9), libgreen1'}, 'autopkgtest')], + {'darkgreen': (False, {'darkgreen': {'amd64': 'RUNNING', 'i386': 'RUNNING'}})}) + self.assertEqual( + self.amqp_requests, + set(['debci-testing-i386:darkgreen {"triggers": ["darkgreen/3"]}', + 'debci-testing-amd64:darkgreen {"triggers": ["darkgreen/3"]}'])) + self.assertEqual(self.pending_requests, + {'darkgreen/3': {'darkgreen': ['amd64', 'i386']}}) + + def test_old_result_from_rdep_version(self): + '''re-runs reverse dependency test on new versions''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('green/1')), + 'testing/amd64/g/green/20150101_100000@': (0, 'green 1', tr('green/1')), + 'testing/i386/g/green/20150101_100010@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100010@': (0, 'green 2', tr('green/2')), + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/2')), + }}) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }) + + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + self.data.remove_all(True) + + # second run: new version re-triggers all tests + self.run_it( + [('libgreen1', {'Version': '3', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING', 'i386': 'RUNNING'}, + 'lightgreen': {'amd64': 'RUNNING', 'i386': 'RUNNING'}, + 'darkgreen': {'amd64': 'RUNNING', 'i386': 'RUNNING'}, + }), + }) + + self.assertEqual(len(self.amqp_requests), 6) + self.assertEqual(self.pending_requests, + {'green/3': {'darkgreen': ['amd64', 'i386'], + 'green': ['amd64', 'i386'], + 'lightgreen': ['amd64', 'i386']}}) + + # third run gets the results for green and lightgreen, darkgreen is + # still running + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100020@': (0, 'green 3', tr('green/3')), + 'testing/amd64/g/green/20150101_100020@': (0, 'green 3', tr('green/3')), + 'testing/i386/l/lightgreen/20150101_100010@': (0, 'lightgreen 1', tr('green/3')), + 'testing/amd64/l/lightgreen/20150101_100010@': (0, 'lightgreen 1', tr('green/3')), + }}) + self.run_it( + [], + {'green': (False, {'green/3': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen': {'amd64': 'RUNNING', 'i386': 'RUNNING'}, + }), + }) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, + {'green/3': {'darkgreen': ['amd64', 'i386']}}) + + # fourth run finally gets the new darkgreen result + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100010@': (0, 'darkgreen 1', tr('green/3')), + 'testing/amd64/d/darkgreen/20150101_100010@': (0, 'darkgreen 1', tr('green/3')), + }}) + self.run_it( + [], + {'green': (True, {'green/3': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_different_versions_on_arches(self): + '''different tested package versions on different architectures''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('passedbefore/1')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('passedbefore/1')), + }}) + + # first run: no results yet + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green'}, 'autopkgtest')], + {'green': (False, {'darkgreen': {'amd64': 'RUNNING', 'i386': 'RUNNING'}})}) + + # second run: i386 result has version 1.1 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100010@': (0, 'darkgreen 1.1', tr('green/2')) + }}) + self.run_it( + [], + {'green': (False, {'darkgreen': {'amd64': 'RUNNING'}, + 'darkgreen/1.1': {'i386': 'PASS'}, + })}) + + # third run: amd64 result has version 1.2 + self.swift.set_results({'autopkgtest-testing': { + 'testing/amd64/d/darkgreen/20150101_100010@': (0, 'darkgreen 1.2', tr('green/2')), + }}) + self.run_it( + [], + {'green': (True, {'darkgreen/1.2': {'amd64': 'PASS'}, + 'darkgreen/1.1': {'i386': 'PASS'}, + })}) + + def test_tmpfail(self): + '''tmpfail results''' + + self.data.add_default_packages(lightgreen=False) + + # one tmpfail result without testpkg-version, should be ignored + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('lightgreen/1')), + 'testing/i386/l/lightgreen/20150101_100101@': (16, None, tr('lightgreen/2')), + 'testing/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('lightgreen/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (16, 'lightgreen 2', tr('lightgreen/2')), + }}) + + self.run_it( + [('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 1)'}, 'autopkgtest')], + {'lightgreen': (False, {'lightgreen/2': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}})}) + self.assertEqual(self.pending_requests, + {'lightgreen/2': {'lightgreen': ['i386']}}) + + # one more tmpfail result, should not confuse britney with None version + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100201@': (16, None, tr('lightgreen/2')), + }}) + self.run_it( + [], + {'lightgreen': (False, {'lightgreen/2': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}})}) + with open(os.path.join(self.data.path, 'data/testing/state/autopkgtest-results.cache')) as f: + contents = f.read() + self.assertNotIn('null', contents) + self.assertNotIn('None', contents) + + def test_rerun_failure(self): + '''manually re-running failed tests gets picked up''' + + self.data.add_default_packages(green=False) + + # first run fails + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 2', tr('green/1')), + 'testing/i386/g/green/20150101_100101@': (4, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100000@': (0, 'green 2', tr('green/1')), + 'testing/amd64/g/green/20150101_100101@': (4, 'green 2', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), + }}) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }) + self.assertEqual(self.pending_requests, {}) + + # re-running test manually succeeded (note: darkgreen result should be + # cached already) + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100201@': (0, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100201@': (0, 'lightgreen 1', tr('green/2')), + }}) + self.run_it( + [], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }) + self.assertEqual(self.pending_requests, {}) + + def test_new_runs_dont_clobber_pass(self): + '''passing once is sufficient + + If a test succeeded once for a particular version and trigger, + subsequent failures (which might be triggered by other unstable + uploads) should not invalidate the PASS, as that new failure is the + fault of the new upload, not the original one. + ''' + + self.data.add_default_packages(libc6=False) + + # new libc6 works fine with green + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('libc6/2')), + 'testing/amd64/g/green/20150101_100000@': (0, 'green 1', tr('libc6/2')), + }}) + + self.run_it( + [('libc6', {'Version': '2'}, None)], + {'libc6': (True, {'green/1': {'amd64': 'PASS', 'i386': 'PASS'}})}) + self.assertEqual(self.pending_requests, {}) + + self.data.remove_all(True) + self.data.add('libc6', True, {'Version': '2'}) + self.data.add('lightgreen', True, {'Depends': 'libgreen1'}, + testsuite='autopkgtest') + self.data.add('blue', True, {'Depends': 'libc6 (>= 0.9)', + 'Conflicts': 'green'}, + testsuite='specialtest') + self.data.add('black', True, {}, + testsuite='autopkgtest') + self.data.add('grey', True, {}, + testsuite='autopkgtest') + + # new green fails; that's not libc6's fault though, so it should stay + # valid + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100100@': (4, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100100@': (4, 'green 2', tr('green/2')), + }}) + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}}), + 'libc6': (True, {'green/1': {'amd64': 'PASS', 'i386': 'PASS'}}), + }) + self.assertEqual( + self.amqp_requests, + set(['debci-testing-i386:darkgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:darkgreen {"triggers": ["green/2"]}', + 'debci-testing-i386:lightgreen {"triggers": ["green/2"]}', + 'debci-testing-amd64:lightgreen {"triggers": ["green/2"]}', + ])) + + def test_remove_from_unstable(self): + '''broken package gets removed from unstable''' + + self.data.add_default_packages(green=False, lightgreen=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100101@': (0, 'green 1', tr('green/1')), + 'testing/amd64/g/green/20150101_100101@': (0, 'green 1', tr('green/1')), + 'testing/i386/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100201@': (4, 'lightgreen 2', tr('green/2 lightgreen/2')), + 'testing/amd64/l/lightgreen/20150101_100201@': (4, 'lightgreen 2', tr('green/2 lightgreen/2')), + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100001@': (0, 'darkgreen 1', tr('green/2')), + }}) + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'), + ('lightgreen', {'Version': '2', 'Depends': 'libgreen1 (>= 2)'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/2': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + }), + }) + self.assertEqual(self.pending_requests, {}) + self.assertEqual(self.amqp_requests, set()) + + # remove new lightgreen by resetting archive indexes, and re-adding + # green + self.data.remove_all(True) + + self.swift.set_results({'autopkgtest-testing': { + # add new result for lightgreen 1 + 'testing/i386/l/lightgreen/20150101_100301@': (0, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100301@': (0, 'lightgreen 1', tr('green/2')), + }}) + + # next run should re-trigger lightgreen 1 to test against green/2 + exc = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + })[1] + self.assertNotIn('lightgreen 2', exc['green']['policy_info']['autopkgtest']) + + # should not trigger new requests + self.assertEqual(self.pending_requests, {}) + self.assertEqual(self.amqp_requests, set()) + + # but the next run should not trigger anything new + self.run_it( + [], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }) + self.assertEqual(self.pending_requests, {}) + self.assertEqual(self.amqp_requests, set()) + +### def test_multiarch_dep(self): +### '''multi-arch dependency''' +### # needs changes in britney2/installability/builder.py +### +### self.data.add_default_packages(lightgreen=False) +### +### # lightgreen has passed before on i386 only, therefore ALWAYSFAIL on amd64 +### self.swift.set_results({'autopkgtest-testing': { +### 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('passedbefore/1')), +### }}) +### +### self.data.add('rainbow', False, {'Depends': 'lightgreen:any'}, +### testsuite='autopkgtest') +### self.data.add('rainbow', True, {'Depends': 'lightgreen:any'}, +### testsuite='autopkgtest') +### +### self.run_it( +### [('lightgreen', {'Version': '2'}, 'autopkgtest')], +### {'lightgreen': (False, {'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, +### 'rainbow': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, +### }), +### }, +### {'lightgreen': [('old-version', '1'), ('new-version', '2')]} +### ) + + def test_nbs(self): + '''source-less binaries do not cause harm''' + + self.data.add_default_packages(green=False) + + # NBS in testing + self.data.add('liboldgreen0', False, add_src=False) + # NBS in unstable + self.data.add('liboldgreen1', True, add_src=False) + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green'}, 'autopkgtest')], + {'green': (True, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }), + }, + {'green': [('old-version', '1'), ('new-version', '2')]}) + + def test_newer_version_in_testing(self): + '''Testing version is newer than in unstable''' + + self.data.add_default_packages(lightgreen=False) + + exc = self.run_it( + [('lightgreen', {'Version': '0.9~beta'}, 'autopkgtest')], + {'lightgreen': (False, {})}, + {'lightgreen': [('old-version', '1'), ('new-version', '0.9~beta'), + ('reason', 'newerintesting'), + ('excuses', 'ALERT: lightgreen is newer in testing (1 0.9~beta)') + ] + })[1] + + # autopkgtest should not be triggered + self.assertNotIn('autopkgtest', exc['lightgreen'].get('policy_info', {})) + self.assertEqual(self.pending_requests, {}) + self.assertEqual(self.amqp_requests, set()) + + def test_testsuite_triggers(self): + '''Testsuite-Triggers''' + + self.data.add_default_packages(lightgreen=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/r/rainbow/20150101_100000@': (0, 'rainbow 1', tr('passedbefore/1')), + }}) + + self.data.add('rainbow', False, testsuite='autopkgtest', + srcfields={'Testsuite-Triggers': 'unicorn, lightgreen, sugar'}) + + self.run_it( + [('lightgreen', {'Version': '2'}, 'autopkgtest')], + {'lightgreen': (False, {'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'rainbow': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + }), + } + ) + + def test_huge_number_of_tests(self): + '''package triggers huge number of tests''' + + self.data.add_default_packages(green=False) + + for i in range(30): + self.data.add('green%i' % i, False, {'Depends': 'libgreen1'}, testsuite='autopkgtest') + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green'}, 'autopkgtest')], + {'green': (True, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'green0': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'green29': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }) + }, + ) + + # requests should all go into the -huge queues + self.assertEqual([x for x in self.amqp_requests if 'huge' not in x], []) + for i in range(30): + for arch in ['i386', 'amd64']: + self.assertIn('debci-huge-testing-%s:green%i {"triggers": ["green/2"]}' % + (arch, i), self.amqp_requests) + + ################################################################ + # Tests for hint processing + ################################################################ + + def test_hint_force_badtest(self): + '''force-badtest hint''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + }}) + + self.create_hint('autopkgtest', 'force-badtest lightgreen/1') + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'IGNORE-FAIL', 'i386': 'IGNORE-FAIL'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'green': [('old-version', '1'), ('new-version', '2')] + }) + + def test_hint_force_badtest_multi_version(self): + '''force-badtest hint''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 2', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + }}) + + self.create_hint('autopkgtest', 'force-badtest lightgreen/1') + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'i386': 'IGNORE-FAIL'}, + 'lightgreen/2': {'amd64': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'green': [('old-version', '1'), ('new-version', '2')] + }) + + # hint the version on amd64 too + self.create_hint('autopkgtest', 'force-badtest lightgreen/2') + + self.run_it( + [], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'i386': 'IGNORE-FAIL'}, + 'lightgreen/2': {'amd64': 'IGNORE-FAIL'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'green': [('old-version', '1'), ('new-version', '2')] + }) + + def test_hint_force_badtest_different_version(self): + '''force-badtest hint with non-matching version''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + }}) + + # lower hint version should not apply + self.create_hint('autopkgtest', 'force-badtest lightgreen/0.1') + + exc = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'green': [('reason', 'autopkgtest')]} + )[1] + self.assertNotIn('forced-reason', exc['green']) + + # higher hint version should apply + self.create_hint('autopkgtest', 'force-badtest lightgreen/3') + self.run_it( + [], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'IGNORE-FAIL', 'i386': 'IGNORE-FAIL'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {} + ) + + def test_hint_force_badtest_arch(self): + '''force-badtest hint for architecture instead of version''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + }}) + + self.create_hint('autopkgtest', 'force-badtest lightgreen/amd64/all') + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'IGNORE-FAIL', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'green': [('old-version', '1'), ('new-version', '2')] + }) + + # hint i386 too, then it should become valid + self.create_hint('autopkgtest', 'force-badtest lightgreen/i386/all') + + self.run_it( + [], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'IGNORE-FAIL', 'i386': 'IGNORE-FAIL'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'green': [('old-version', '1'), ('new-version', '2')] + }) + + def test_hint_force_badtest_running(self): + '''force-badtest hint on running test''' + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + }}) + + self.create_hint('autopkgtest', 'force-badtest lightgreen/1') + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (True, {'green/2': {'amd64': 'PASS', 'i386': 'PASS'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'green': [('old-version', '1'), ('new-version', '2')] + }) + + def test_hint_force_skiptest(self): + '''force-skiptest hint''' + + self.data.add_default_packages(green=False) + + self.create_hint('autopkgtest', 'force-skiptest green/2') + + # regression of green, darkgreen ok, lightgreen running + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('passedbefore/1')), + 'testing/i386/g/green/20150101_100200@': (4, 'green 2', tr('green/2')), + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + }}) + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (True, {'green/2': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'REGRESSION'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }), + }, + {'green': [('old-version', '1'), ('new-version', '2'), + ('reason', 'skiptest'), + ('excuses', 'Should wait for tests relating to green 2, but forced by autopkgtest')] + }) + + def test_hint_force_skiptest_different_version(self): + '''force-skiptest hint with non-matching version''' + + self.data.add_default_packages(green=False) + + # green has passed before on i386 only, therefore ALWAYSFAIL on amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('passedbefore/1')), + }}) + + self.create_hint('autopkgtest', 'force-skiptest green/1') + exc = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }), + }, + {'green': [('reason', 'autopkgtest')]} + )[1] + self.assertNotIn('forced-reason', exc['green']) + + def test_hint_blockall_runs_tests(self): + '''block-all hint still runs tests''' + + self.data.add_default_packages(lightgreen=False) + + self.create_hint('freeze', 'block-all source') + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('passedbefore/1')), + 'testing/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('passedbefore/1')), + }}) + + self.run_it( + [('lightgreen', {'Version': '2'}, 'autopkgtest')], + {'lightgreen': (False, {'lightgreen': {'amd64': 'RUNNING', 'i386': 'RUNNING'}})} + ) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 2', tr('lightgreen/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 2', tr('lightgreen/2')), + }}) + + self.run_it( + [], + {'lightgreen': (False, {'lightgreen/2': {'amd64': 'PASS', 'i386': 'PASS'}})}, + {'lightgreen': [('reason', 'block')]} + ) + + + ################################################################ + # Tests for non-hint policies (Ubuntu only) + ################################################################ + +### def test_lp_bug_block(self): +### self.data.add_default_packages(darkgreen=False) +### +### with open(os.path.join(self.data.path, 'data/unstable/Blocks'), 'w') as f: +### f.write('darkgreen 12345 1471505000\ndarkgreen 98765 1471500000\n') +### +### exc = self.run_it( +### [('darkgreen', {'Version': '2'}, 'autopkgtest')], +### {'darkgreen': (False, {'darkgreen': {'i386': 'RUNNING-ALWAYSFAIL', 'amd64': 'RUNNING-ALWAYSFAIL'}})}, +### {'darkgreen': [('reason', 'block'), +### ('excuses', 'Not touching package as requested in bug 12345 on Thu Aug 18 07:23:20 2016'), +### ('is-candidate', False), +### ] +### } +### )[1] +### self.assertEqual(exc['darkgreen']['policy_info']['block-bugs'], +### {'12345': 1471505000, '98765': 1471500000}) + + + ################################################################ + # Kernel related tests + ################################################################ + + def test_detect_dkms_autodep8(self): + '''DKMS packages are autopkgtested (via autodep8)''' + + self.data.add('dkms', False, {}) + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/f/fancy/20150101_100101@': (0, 'fancy 0.1', tr('passedbefore/1')) + }}) + + self.run_it( + [('dkms', {'Version': '2'}, None)], + {'dkms': (False, {'fancy': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}})}, + {'dkms': [('old-version', '1'), ('new-version', '2')]}) + + def test_kernel_triggers_dkms(self): + '''DKMS packages get triggered by kernel uploads''' + + self.data.add('dkms', False, {}) + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) + + self.run_it( + [('linux-image-generic', {'Source': 'linux-meta'}, None), + ('linux-image-grumpy-generic', {'Source': 'linux-meta-lts-grumpy'}, None), + ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), + ], + {'linux-meta': (True, {'fancy': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}}), + 'linux-meta-lts-grumpy': (True, {'fancy': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}}), + 'linux-meta-64only': (True, {'fancy': {'amd64': 'RUNNING-ALWAYSFAIL'}}), + }) + + # one separate test should be triggered for each kernel + self.assertEqual( + self.amqp_requests, + set(['debci-testing-i386:fancy {"triggers": ["linux-meta/1"]}', + 'debci-testing-amd64:fancy {"triggers": ["linux-meta/1"]}', + 'debci-testing-i386:fancy {"triggers": ["linux-meta-lts-grumpy/1"]}', + 'debci-testing-amd64:fancy {"triggers": ["linux-meta-lts-grumpy/1"]}', + 'debci-testing-amd64:fancy {"triggers": ["linux-meta-64only/1"]}'])) + + # ... and that they get recorded as pending + self.assertEqual(self.pending_requests, + {'linux-meta-lts-grumpy/1': {'fancy': ['amd64', 'i386']}, + 'linux-meta/1': {'fancy': ['amd64', 'i386']}, + 'linux-meta-64only/1': {'fancy': ['amd64']}}) + + def test_dkms_results_per_kernel(self): + '''DKMS results get mapped to the triggering kernel version''' + + self.data.add('dkms', False, {}) + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) + + # works against linux-meta and -64only, fails against grumpy i386, no + # result yet for grumpy amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/amd64/f/fancy/20150101_100301@': (0, 'fancy 0.5', tr('passedbefore/1')), + 'testing/i386/f/fancy/20150101_100101@': (0, 'fancy 1', tr('linux-meta/1')), + 'testing/amd64/f/fancy/20150101_100101@': (0, 'fancy 1', tr('linux-meta/1')), + 'testing/amd64/f/fancy/20150101_100201@': (0, 'fancy 1', tr('linux-meta-64only/1')), + 'testing/i386/f/fancy/20150101_100301@': (4, 'fancy 1', tr('linux-meta-lts-grumpy/1')), + }}) + + self.run_it( + [('linux-image-generic', {'Source': 'linux-meta'}, None), + ('linux-image-grumpy-generic', {'Source': 'linux-meta-lts-grumpy'}, None), + ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), + ], + {'linux-meta': (True, {'fancy/1': {'amd64': 'PASS', 'i386': 'PASS'}}), + 'linux-meta-lts-grumpy': (False, {'fancy/1': {'amd64': 'RUNNING', 'i386': 'ALWAYSFAIL'}}), + 'linux-meta-64only': (True, {'fancy/1': {'amd64': 'PASS'}}), + }) + + self.assertEqual(self.pending_requests, + {'linux-meta-lts-grumpy/1': {'fancy': ['amd64']}}) + + def test_dkms_results_per_kernel_old_results(self): + '''DKMS results get mapped to the triggering kernel version, old results''' + + self.data.add('dkms', False, {}) + self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) + + # works against linux-meta and -64only, fails against grumpy i386, no + # result yet for grumpy amd64 + self.swift.set_results({'autopkgtest-testing': { + # old results without trigger info + 'testing/i386/f/fancy/20140101_100101@': (0, 'fancy 1', {}), + 'testing/amd64/f/fancy/20140101_100101@': (8, 'fancy 1', {}), + # current results with triggers + 'testing/i386/f/fancy/20150101_100101@': (0, 'fancy 1', tr('linux-meta/1')), + 'testing/amd64/f/fancy/20150101_100101@': (0, 'fancy 1', tr('linux-meta/1')), + 'testing/amd64/f/fancy/20150101_100201@': (0, 'fancy 1', tr('linux-meta-64only/1')), + 'testing/i386/f/fancy/20150101_100301@': (4, 'fancy 1', tr('linux-meta-lts-grumpy/1')), + }}) + + self.run_it( + [('linux-image-generic', {'Source': 'linux-meta'}, None), + ('linux-image-grumpy-generic', {'Source': 'linux-meta-lts-grumpy'}, None), + ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), + ], + {'linux-meta': (True, {'fancy/1': {'amd64': 'PASS', 'i386': 'PASS'}}), + # we don't have an explicit result for amd64 + 'linux-meta-lts-grumpy': (False, {'fancy/1': {'amd64': 'RUNNING', 'i386': 'ALWAYSFAIL'}}), + 'linux-meta-64only': (True, {'fancy/1': {'amd64': 'PASS'}}), + }) + + self.assertEqual(self.pending_requests, + {'linux-meta-lts-grumpy/1': {'fancy': ['amd64']}}) + +### def test_kernel_triggered_tests(self): +### '''linux, lxc, glibc, systemd, snapd tests get triggered by linux-meta* uploads''' +### +### self.data.add('libc6-dev', False, {'Source': 'glibc', 'Depends': 'linux-libc-dev'}, +### testsuite='autopkgtest') +### self.data.add('libc6-dev', True, {'Source': 'glibc', 'Depends': 'linux-libc-dev'}, +### testsuite='autopkgtest') +### self.data.add('lxc', False, {}, testsuite='autopkgtest') +### self.data.add('lxc', True, {}, testsuite='autopkgtest') +### self.data.add('systemd', False, {}, testsuite='autopkgtest') +### self.data.add('systemd', True, {}, testsuite='autopkgtest') +### self.data.add('snapd', False, {}, testsuite='autopkgtest') +### self.data.add('snapd', True, {}, testsuite='autopkgtest') +### self.data.add('linux-image-1', False, {'Source': 'linux'}, testsuite='autopkgtest') +### self.data.add('linux-image-1', True, {'Source': 'linux'}, testsuite='autopkgtest') +### self.data.add('linux-libc-dev', False, {'Source': 'linux'}, testsuite='autopkgtest') +### self.data.add('linux-image', False, {'Source': 'linux-meta', 'Depends': 'linux-image-1'}) +### +### self.swift.set_results({'autopkgtest-testing': { +### 'testing/amd64/l/lxc/20150101_100101@': (0, 'lxc 0.1', tr('passedbefore/1')) +### }}) +### +### exc = self.run_it( +### [('linux-image', {'Version': '2', 'Depends': 'linux-image-2', 'Source': 'linux-meta'}, None), +### ('linux-image-64only', {'Source': 'linux-meta-64only', 'Architecture': 'amd64'}, None), +### ('linux-image-2', {'Version': '2', 'Source': 'linux'}, 'autopkgtest'), +### ('linux-libc-dev', {'Version': '2', 'Source': 'linux'}, 'autopkgtest'), +### ], +### {'linux-meta': (False, {'lxc': {'amd64': 'RUNNING', 'i386': 'RUNNING-ALWAYSFAIL'}, +### 'glibc': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, +### 'linux': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, +### 'systemd': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, +### 'snapd': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, +### }), +### 'linux-meta-64only': (False, {'lxc': {'amd64': 'RUNNING'}}), +### 'linux': (False, {}), +### })[1] +### # the kernel itself should not trigger tests; we want to trigger +### # everything from -meta +### self.assertEqual(exc['linux']['policy_info']['autopkgtest'], {}) + +### def test_kernel_waits_on_meta(self): +### '''linux waits on linux-meta''' +### +### self.data.add('dkms', False, {}) +### self.data.add('dkms', True, {}) +### self.data.add('fancy-dkms', False, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) +### self.data.add('fancy-dkms', True, {'Source': 'fancy', 'Depends': 'dkms (>= 1)'}) +### self.data.add('linux-image-generic', False, {'Version': '0.1', 'Source': 'linux-meta', 'Depends': 'linux-image-1'}) +### self.data.add('linux-image-1', False, {'Source': 'linux'}, testsuite='autopkgtest') +### self.data.add('linux-firmware', False, {'Source': 'linux-firmware'}, testsuite='autopkgtest') +### +### self.swift.set_results({'autopkgtest-testing': { +### 'testing/i386/f/fancy/20150101_090000@': (0, 'fancy 0.5', tr('passedbefore/1')), +### 'testing/i386/l/linux/20150101_100000@': (0, 'linux 2', tr('linux-meta/0.2')), +### 'testing/amd64/l/linux/20150101_100000@': (0, 'linux 2', tr('linux-meta/0.2')), +### 'testing/i386/l/linux-firmware/20150101_100000@': (0, 'linux-firmware 2', tr('linux-firmware/2')), +### 'testing/amd64/l/linux-firmware/20150101_100000@': (0, 'linux-firmware 2', tr('linux-firmware/2')), +### }}) +### +### self.run_it( +### [('linux-image-generic', {'Version': '0.2', 'Source': 'linux-meta', 'Depends': 'linux-image-2'}, None), +### ('linux-image-2', {'Version': '2', 'Source': 'linux'}, 'autopkgtest'), +### ('linux-firmware', {'Version': '2', 'Source': 'linux-firmware'}, 'autopkgtest'), +### ], +### {'linux-meta': (False, {'fancy': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, +### 'linux/2': {'amd64': 'PASS', 'i386': 'PASS'} +### }), +### # no tests, but should wait on linux-meta +### 'linux': (False, {}), +### # this one does not have a -meta, so don't wait +### 'linux-firmware': (True, {'linux-firmware/2': {'amd64': 'PASS', 'i386': 'PASS'}}), +### }, +### {'linux': [('reason', 'depends'), +### ('excuses', 'Invalidated by dependency'), +### ('dependencies', {'blocked-by': ['linux-meta']})] +### } +### ) +### +### # now linux-meta is ready to go +### self.swift.set_results({'autopkgtest-testing': { +### 'testing/i386/f/fancy/20150101_100000@': (0, 'fancy 1', tr('linux-meta/0.2')), +### 'testing/amd64/f/fancy/20150101_100000@': (0, 'fancy 1', tr('linux-meta/0.2')), +### }}) +### self.run_it( +### [], +### {'linux-meta': (True, {'fancy/1': {'amd64': 'PASS', 'i386': 'PASS'}, +### 'linux/2': {'amd64': 'PASS', 'i386': 'PASS'}}), +### 'linux': (True, {}), +### 'linux-firmware': (True, {'linux-firmware/2': {'amd64': 'PASS', 'i386': 'PASS'}}), +### }, +### {'linux': [('dependencies', {'migrate-after': ['linux-meta']})] +### } +### ) + + ################################################################ + # Tests for special-cased packages + ################################################################ + + def test_gcc(self): + '''gcc only triggers some key packages''' + + self.data.add('binutils', False, {}, testsuite='autopkgtest') + self.data.add('linux', False, {}, testsuite='autopkgtest') + self.data.add('notme', False, {'Depends': 'libgcc1'}, testsuite='autopkgtest') + + # binutils has passed before on i386 only, therefore ALWAYSFAIL on amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/b/binutils/20150101_100000@': (0, 'binutils 1', tr('passedbefore/1')), + }}) + + exc = self.run_it( + [('libgcc1', {'Source': 'gcc-5', 'Version': '2'}, None)], + {'gcc-5': (False, {'binutils': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'linux': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}})})[1] + self.assertNotIn('notme 1', exc['gcc-5']['policy_info']['autopkgtest']) + + def test_alternative_gcc(self): + '''alternative gcc does not trigger anything''' + + self.data.add('binutils', False, {}, testsuite='autopkgtest') + self.data.add('notme', False, {'Depends': 'libgcc1'}, testsuite='autopkgtest') + + exc = self.run_it( + [('libgcc1', {'Source': 'gcc-snapshot', 'Version': '2'}, None)], + {'gcc-snapshot': (True, {})})[1] + self.assertEqual(exc['gcc-snapshot']['policy_info']['autopkgtest'], {'verdict': 'PASS'}) + + ################################################################ + # Tests for non-default ADT_* configuration modes + ################################################################ + + def test_disable_adt(self): + '''Run without autopkgtest requests''' + + # Disable AMQP server config, to ensure we don't touch them with ADT + # disabled + for line in fileinput.input(self.britney_conf, inplace=True): + if line.startswith('ADT_ENABLE'): + print('ADT_ENABLE = no') + elif not line.startswith('ADT_AMQP') and not line.startswith('ADT_SWIFT_URL'): + sys.stdout.write(line) + + self.data.add_default_packages(green=False) + + exc = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (True, {})}, + {'green': [('old-version', '1'), ('new-version', '2')]})[1] + self.assertNotIn('autopkgtest', exc['green']['policy_info']) + + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, None) + + def test_ppas(self): + '''Run test requests with additional PPAs''' + + self.data.add_default_packages(lightgreen=False) + + for line in fileinput.input(self.britney_conf, inplace=True): + if line.startswith('ADT_PPAS'): + print('ADT_PPAS = joe/foo awesome-developers/staging') + else: + sys.stdout.write(line) + + exc = self.run_it( + [('lightgreen', {'Version': '2'}, 'autopkgtest')], + {'lightgreen': (True, {'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL'}})}, + {'lightgreen': [('old-version', '1'), ('new-version', '2')]} + )[1] + self.assertEqual(exc['lightgreen']['policy_info']['autopkgtest'], + {'lightgreen': { + 'amd64': ['RUNNING-ALWAYSFAIL', + 'https://autopkgtest.ubuntu.com/status/pending', + None, + None, + None], + 'i386': ['RUNNING-ALWAYSFAIL', + 'https://autopkgtest.ubuntu.com/status/pending', + None, + None, + None]}, + 'verdict': 'PASS'}) + + for arch in ['i386', 'amd64']: + self.assertTrue('debci-ppa-testing-%s:lightgreen {"triggers": ["lightgreen/2"], "ppas": ["joe/foo", "awesome-developers/staging"]}' % arch in self.amqp_requests or + 'debci-ppa-testing-%s:lightgreen {"ppas": ["joe/foo", "awesome-developers/staging"], "triggers": ["lightgreen/2"]}' % arch in self.amqp_requests, + self.amqp_requests) + self.assertEqual(len(self.amqp_requests), 2) + + # add results to PPA specific swift container + self.swift.set_results({'autopkgtest-testing-awesome-developers-staging': { + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 1', tr('passedbefore/1')), + 'testing/i386/l/lightgreen/20150101_100100@': (4, 'lightgreen 2', tr('lightgreen/2')), + 'testing/amd64/l/lightgreen/20150101_100101@': (0, 'lightgreen 2', tr('lightgreen/2')), + }}) + + exc = self.run_it( + [], + {'lightgreen': (False, {'lightgreen/2': {'i386': 'REGRESSION', 'amd64': 'PASS'}})}, + {'lightgreen': [('old-version', '1'), ('new-version', '2')]} + )[1] + self.assertEqual(exc['lightgreen']['policy_info']['autopkgtest'], + {'lightgreen/2': { + 'amd64': ['PASS', + 'http://localhost:18085/autopkgtest-testing-awesome-developers-staging/testing/amd64/l/lightgreen/20150101_100101@/log.gz', + None, + 'http://localhost:18085/autopkgtest-testing-awesome-developers-staging/testing/amd64/l/lightgreen/20150101_100101@/artifacts.tar.gz', + None], + 'i386': ['REGRESSION', + 'http://localhost:18085/autopkgtest-testing-awesome-developers-staging/testing/i386/l/lightgreen/20150101_100100@/log.gz', + None, + 'http://localhost:18085/autopkgtest-testing-awesome-developers-staging/testing/i386/l/lightgreen/20150101_100100@/artifacts.tar.gz', + 'https://autopkgtest.ubuntu.com/request.cgi?release=testing&arch=i386&package=lightgreen&' + 'trigger=lightgreen%2F2&ppa=joe%2Ffoo&ppa=awesome-developers%2Fstaging']}, + 'verdict': 'REJECTED_PERMANENTLY'}) + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + def test_disable_upgrade_tester(self): + '''Run without second stage upgrade tester''' + + self.data.add_default_packages(green=False) + + self.data.add('green', True, {'Depends': 'libc6 (>= 0.9), libgreen1', + 'Conflicts': 'blue', 'Version': '2'}, + testsuite='autopkgtest') + + self.data.compute_migrations='--no-compute-migrations' + + self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {})[1] + + self.assertFalse(os.path.exists(os.path.join(self.data.path, 'output', 'output.txt'))) + self.assertNotEqual(self.amqp_requests, set()) + # must still record pending tests +#### Not sure why this doesn't work in the debian env. +### self.assertEqual(self.pending_requests, {'green/2': {'green': ['amd64', 'i386'], +### 'darkgreen': ['amd64', 'i386'], +### 'lightgreen': ['amd64', 'i386']}}) + + def test_shared_results_cache(self): + '''Run with shared r/o autopkgtest-results.cache''' + + self.data.add_default_packages(lightgreen=False) + + # first run to create autopkgtest-results.cache + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100000@': (0, 'lightgreen 2', tr('lightgreen/2')), + 'testing/amd64/l/lightgreen/20150101_100000@': (0, 'lightgreen 2', tr('lightgreen/2')), + }}) + + self.run_it( + [('lightgreen', {'Version': '2', 'Depends': 'libc6'}, 'autopkgtest')], + {'lightgreen': (True, {'lightgreen/2': {'i386': 'PASS', 'amd64': 'PASS'}})}, + ) + + # move and remember original contents + local_path = os.path.join(self.data.path, 'data/testing/state/autopkgtest-results.cache') + shared_path = os.path.join(self.data.path, 'shared_results.cache') + os.rename(local_path, shared_path) + with open(shared_path) as f: + orig_contents = f.read() + + # enable shared cache + for line in fileinput.input(self.britney_conf, inplace=True): + if 'ADT_SHARED_RESULTS_CACHE' in line: + print('ADT_SHARED_RESULTS_CACHE = %s' % shared_path) + else: + sys.stdout.write(line) + + # second run, should now not update cache + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 3', tr('lightgreen/3')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 3', tr('lightgreen/3')), + }}) + + self.data.remove_all(True) + self.run_it( + [('lightgreen', {'Version': '3', 'Depends': 'libc6'}, 'autopkgtest')], + {'lightgreen': (True, {'lightgreen/3': {'i386': 'PASS', 'amd64': 'PASS'}})}, + ) + + # leaves autopkgtest-results.cache untouched + self.assertFalse(os.path.exists(local_path)) + with open(shared_path) as f: + self.assertEqual(orig_contents, f.read()) + + ################################################################ + # Tests for source ppa grouping + ################################################################ + +### def test_sourceppa_policy(self): +### '''Packages from same source PPA get rejected for failed peer policy''' +### +### self.data.add_default_packages(green=False) +### +### ppa = 'devel/~ci-train-ppa-service/+archive/NNNN' +### self.sourceppa_cache['green'] = {'2': ppa} +### self.sourceppa_cache['red'] = {'2': ppa} +### with open(os.path.join(self.data.path, 'data/unstable/Blocks'), 'w') as f: +### f.write('green 12345 1471505000\ndarkgreen 98765 1471500000\n') +### +### exc = self.run_it( +### [('green', {'Version': '2'}, 'autopkgtest'), +### ('red', {'Version': '2'}, 'autopkgtest'), +### ('gcc-5', {}, 'autopkgtest')], +### {'green': (False, {'green': {'i386': 'RUNNING-ALWAYSFAIL', 'amd64': 'RUNNING-ALWAYSFAIL'}}), +### 'red': (False, {'red': {'i386': 'RUNNING-ALWAYSFAIL', 'amd64': 'RUNNING-ALWAYSFAIL'}}), +### 'gcc-5': (True, {}), +### }, +### {'green': [('reason', 'block')], +### 'red': [('reason', 'source-ppa')]} +### )[1] +### self.assertEqual(exc['red']['policy_info']['source-ppa'], {'red': ppa, 'green': ppa}) +### +### with open(os.path.join(self.data.path, 'data/unstable/SourcePPA')) as f: +### res = json.load(f) +### self.assertEqual(res, {'red': {'2': ppa}, +### 'green': {'2': ppa}, +### 'gcc-5': {'1': ''}}) + +### def test_sourceppa_missingbuild(self): +### '''Packages from same source PPA get rejected for failed peer FTBFS''' +### +### self.data.add_default_packages(green=False) +### +### ppa = 'devel/~ci-train-ppa-service/+archive/ZZZZ' +### self.sourceppa_cache['green'] = {'2': ppa} +### self.sourceppa_cache['red'] = {'2': ppa} +### +### self.data.add_src('green', True, {'Version': '2', 'Testsuite': 'autopkgtest'}) +### self.data.add('libgreen1', True, {'Version': '2', 'Source': 'green', 'Architecture': 'i386'}, add_src=False) +### self.data.add('green', True, {'Version': '2', 'Source': 'green'}, add_src=False) +### +### exc = self.run_it( +### [('red', {'Version': '2'}, 'autopkgtest')], +### {'green': (False, {}), 'red': (False, {})}, +### {'green': [('missing-builds', {'on-architectures': ['amd64', 'arm64', 'armhf', 'powerpc', 'ppc64el'], +### 'on-unimportant-architectures': []})], +### 'red': [('reason', 'source-ppa')]} +### )[1] +### self.assertEqual(exc['red']['policy_info']['source-ppa'], {'red': ppa, 'green': ppa}) + + + def test_swift_url_is_file(self): + '''Run without swift but with debci file (as Debian does)''' + '''Based on test_multi_rdepends_with_tests_regression''' + '''Multiple reverse dependencies with tests (regression)''' + + debci_file = os.path.join(self.data.path, 'debci.output') + + # Don't use swift but debci output file + for line in fileinput.input(self.britney_conf, inplace=True): + if line.startswith('ADT_SWIFT_URL'): + print('ADT_SWIFT_URL = file://%s' % debci_file) + else: + sys.stdout.write(line) + + with open(debci_file, 'w') as f: + f.write(''' +{ + "until": 12345, + "results": [ + {"trigger": "green/2", "package": "darkgreen", "arch": "i386", "version": "1", "status": "pass", "run_id": "100000"}, + {"trigger": "green/2", "package": "darkgreen", "arch": "amd64", "version": "1", "status": "pass", "run_id": "100000"}, + {"trigger": "green/1", "package": "lightgreen", "arch": "i386", "version": "1", "status": "pass", "run_id": "101000"}, + {"trigger": "green/2", "package": "lightgreen", "arch": "i386", "version": "1", "status": "fail", "run_id": "101001"}, + {"trigger": "green/1", "package": "lightgreen", "arch": "amd64", "version": "1", "status": "pass", "run_id": "101000"}, + {"trigger": "green/2", "package": "lightgreen", "arch": "amd64", "version": "1", "status": "fail", "run_id": "101001"}, + {"trigger": "green/2", "package": "green", "arch": "i386", "version": "2", "status": "pass", "run_id": "102000"}, + {"trigger": "green/1", "package": "green", "arch": "amd64", "version": "2", "status": "pass", "run_id": "102000"}, + {"trigger": "green/2", "package": "green", "arch": "amd64", "version": "2", "status": "fail", "run_id": "102001"} + ] +} +''') + + self.data.add_default_packages(green=False) + + out, exc, _ = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'REGRESSION', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'REGRESSION'}, + 'darkgreen/1': {'amd64': 'PASS', 'i386': 'PASS'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2')]} + ) + + # should have links to log and history, but no artifacts (as this is + # not a PPA) + self.assertEqual(exc['green']['policy_info']['autopkgtest']['lightgreen/1']['amd64'][0], + 'REGRESSION') + link = urllib.parse.urlparse(exc['green']['policy_info']['autopkgtest']['lightgreen/1']['amd64'][1]) + self.assertEqual(link.path[-53:], '/autopkgtest/testing/amd64/l/lightgreen/101001/log.gz') + self.assertEqual(exc['green']['policy_info']['autopkgtest']['lightgreen/1']['amd64'][2:4], + ['https://autopkgtest.ubuntu.com/packages/l/lightgreen/testing/amd64', + None]) + + # should have retry link for the regressions (not a stable URL, test + # separately) + link = urllib.parse.urlparse(exc['green']['policy_info']['autopkgtest']['lightgreen/1']['amd64'][4]) + self.assertEqual(link.netloc, 'autopkgtest.ubuntu.com') + self.assertEqual(link.path, '/request.cgi') + self.assertEqual(urllib.parse.parse_qs(link.query), + {'release': ['testing'], 'arch': ['amd64'], + 'package': ['lightgreen'], 'trigger': ['green/2']}) + + # we already had all results before the run, so this should not trigger + # any new requests + self.assertEqual(self.amqp_requests, set()) + self.assertEqual(self.pending_requests, {}) + + # not expecting any failures to retrieve from swift + self.assertNotIn('Failure', out, out) + + def test_multi_rdepends_with_tests_mixed_penalty(self): + '''Bounty/penalty system instead of blocking + based on "Multiple reverse dependencies with tests (mixed results)"''' + + # Don't use policy verdics, but age packages appropriate + for line in fileinput.input(self.britney_conf, inplace=True): + if line.startswith('MINDAYS_MEDIUM'): + print('MINDAYS_MEDIUM = 13') + elif line.startswith('ADT_SUCCESS_BOUNTY'): + print('ADT_SUCCESS_BOUNTY = 6') + elif line.startswith('ADT_REGRESSION_PENALTY'): + print('ADT_REGRESSION_PENALTY = 27') + else: + sys.stdout.write(line) + + self.data.add_default_packages(green=False) + + # green has passed before on i386 only, therefore ALWAYSFAIL on amd64 + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/g/green/20150101_100000@': (0, 'green 1', tr('passedbefore/1')), + }}) + + # first run requests tests and marks them as pending + exc = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING'}, + 'lightgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'RUNNING-ALWAYSFAIL'}, + }) + }, + {'green': [('old-version', '1'), ('new-version', '2')]})[1] + + # while no autopkgtest results are known, penalty applies + self.assertEqual(exc['green']['policy_info']['age']['age-requirement'], 40) + + # second run collects the results + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), + # unrelated results (wrong trigger), ignore this! + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/1')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('blue/1')), + }}) + + res = self.run_it( + [], + {'green': (False, {'green/2': {'amd64': 'ALWAYSFAIL', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'RUNNING'}, + 'darkgreen/1': {'amd64': 'RUNNING', 'i386': 'PASS'}, + }) + }) + out = res[0] + exc = res[1] + + self.assertIn('Update Excuses generation completed', out) + # not expecting any failures to retrieve from swift + self.assertNotIn('Failure', out) + + # there should be some pending ones + self.assertEqual(self.pending_requests, + {'green/2': {'darkgreen': ['amd64'], 'lightgreen': ['i386']}}) + + # autopkgtest should not cause the package to be blocked + self.assertEqual(exc['green']['policy_info']['autopkgtest']['verdict'], 'PASS') + # instead, it should cause the age to sky-rocket + self.assertEqual(exc['green']['policy_info']['age']['age-requirement'], 40) + + def test_multi_rdepends_with_tests_no_penalty(self): + '''Check that penalties are not applied for "urgency >= high"''' + + # Don't use policy verdics, but age packages appropriate + for line in fileinput.input(self.britney_conf, inplace=True): + if line.startswith('MINDAYS_MEDIUM'): + print('MINDAYS_MEDIUM = 13') + elif line.startswith('ADT_SUCCESS_BOUNTY'): + print('ADT_SUCCESS_BOUNTY = 6') + elif line.startswith('ADT_REGRESSION_PENALTY'): + print('ADT_REGRESSION_PENALTY = 27') + elif line.startswith('NO_PENALTIES'): + print('NO_PENALTIES = medium') + else: + sys.stdout.write(line) + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/1')), + 'testing/amd64/l/lightgreen/20150101_100101@': (4, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (4, 'green 2', tr('green/2')), + }}) + + exc = self.run_it( + [('libgreen1', {'Version': '2', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest')], + {'green': (False, {'green/2': {'amd64': 'ALWAYSFAIL', 'i386': 'PASS'}, + 'lightgreen/1': {'amd64': 'REGRESSION', 'i386': 'RUNNING-ALWAYSFAIL'}, + 'darkgreen/1': {'amd64': 'RUNNING-ALWAYSFAIL', 'i386': 'PASS'}, + }) + })[1] + + # age-requirement should remain the same despite regression + self.assertEqual(exc['green']['policy_info']['age']['age-requirement'], 13) + + def test_passing_package_receives_bounty(self): + '''Test bounty system (instead of policy verdict)''' + + # Don't use policy verdics, but age packages appropriate + for line in fileinput.input(self.britney_conf, inplace=True): + if line.startswith('MINDAYS_MEDIUM'): + print('MINDAYS_MEDIUM = 13') + elif line.startswith('ADT_SUCCESS_BOUNTY'): + print('ADT_SUCCESS_BOUNTY = 6') + elif line.startswith('ADT_REGRESSION_PENALTY'): + print('ADT_REGRESSION_PENALTY = 27') + else: + sys.stdout.write(line) + + self.data.add_default_packages(green=False) + + self.swift.set_results({'autopkgtest-testing': { + 'testing/i386/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/amd64/d/darkgreen/20150101_100000@': (0, 'darkgreen 1', tr('green/2')), + 'testing/i386/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), + 'testing/amd64/l/lightgreen/20150101_100100@': (0, 'lightgreen 1', tr('green/2')), + 'testing/i386/g/green/20150101_100200@': (0, 'green 2', tr('green/2')), + 'testing/amd64/g/green/20150101_100201@': (0, 'green 2', tr('green/2')), + }}) + + exc = self.run_it( + [('green', {'Version': '2'}, 'autopkgtest')], + {'green': (False, {})}, + {})[1] + + # it should cause the age to drop + self.assertEqual(exc['green']['policy_info']['age']['age-requirement'], 8) + self.assertEqual(exc['green']['excuses'][-1], 'Required age is not allowed to drop below 8 days') + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_policy.py b/tests/test_policy.py index efc6b04..7dc0ec0 100644 --- a/tests/test_policy.py +++ b/tests/test_policy.py @@ -39,7 +39,7 @@ def create_excuse(name): def create_source_package(version, section='devel', binaries=None): if binaries is None: binaries = [] - return SourcePackage(version, section, binaries, 'Random tester', False, None) + return SourcePackage(version, section, binaries, 'Random tester', False, None, '', '') def create_policy_objects(source_name, target_version, source_version):