mirror of
https://git.launchpad.net/~ubuntu-release/britney/+git/britney2-ubuntu
synced 2025-03-12 03:41:08 +00:00
Merge with trunk, port to Python 3
This commit is contained in:
commit
32f33baf09
@ -16,8 +16,6 @@
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
@ -25,7 +23,8 @@ import tarfile
|
||||
import io
|
||||
import copy
|
||||
import itertools
|
||||
from urllib import urlencode, urlopen
|
||||
from urllib.parse import urlencode
|
||||
from urllib.request import urlopen
|
||||
|
||||
import apt_pkg
|
||||
import kombu
|
||||
@ -209,7 +208,7 @@ class AutoPackageTest(object):
|
||||
tests.append((rdep_src, rdep_src_info[VERSION]))
|
||||
reported_pkgs.add(rdep_src)
|
||||
|
||||
tests.sort(key=lambda (s, v): s)
|
||||
tests.sort(key=lambda s_v: s_v[0])
|
||||
return tests
|
||||
|
||||
#
|
||||
@ -321,7 +320,7 @@ class AutoPackageTest(object):
|
||||
try:
|
||||
f = urlopen(url)
|
||||
if f.getcode() == 200:
|
||||
result_paths = f.read().strip().splitlines()
|
||||
result_paths = f.read().decode().strip().splitlines()
|
||||
elif f.getcode() == 204: # No content
|
||||
result_paths = []
|
||||
else:
|
||||
@ -417,9 +416,9 @@ class AutoPackageTest(object):
|
||||
'''Return (src, arch) set for failed tests for given trigger pkg'''
|
||||
|
||||
result = set()
|
||||
for src, srcinfo in self.test_results.iteritems():
|
||||
for arch, (stamp, vermap, ever_passed) in srcinfo.iteritems():
|
||||
for ver, (passed, triggers) in vermap.iteritems():
|
||||
for src, srcinfo in self.test_results.items():
|
||||
for arch, (stamp, vermap, ever_passed) in srcinfo.items():
|
||||
for ver, (passed, triggers) in vermap.items():
|
||||
if not passed:
|
||||
# triggers might contain tuples or lists (after loading
|
||||
# from json), so iterate/check manually
|
||||
@ -491,7 +490,7 @@ class AutoPackageTest(object):
|
||||
# update results from swift for all packages that we are waiting
|
||||
# for, and remove pending tests that we have results for on all
|
||||
# arches
|
||||
for pkg, verinfo in copy.deepcopy(self.pending_tests.items()):
|
||||
for pkg, verinfo in copy.deepcopy(self.pending_tests).items():
|
||||
for archinfo in verinfo.values():
|
||||
for arch in archinfo:
|
||||
self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch)
|
||||
|
10
boottest.py
10
boottest.py
@ -11,7 +11,7 @@
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
from collections import defaultdict
|
||||
from contextlib import closing
|
||||
@ -20,7 +20,7 @@ import subprocess
|
||||
import tempfile
|
||||
from textwrap import dedent
|
||||
import time
|
||||
import urllib
|
||||
import urllib.request
|
||||
|
||||
import apt_pkg
|
||||
|
||||
@ -90,7 +90,7 @@ class TouchManifest(object):
|
||||
print("I: [%s] - saving it to %s" %
|
||||
(time.asctime(), self.path))
|
||||
try:
|
||||
response = urllib.urlopen(url)
|
||||
response = urllib.request.urlopen(url)
|
||||
if response.code == 200:
|
||||
# Only [re]create the manifest file if one was successfully
|
||||
# downloaded. This allows for an existing image to be used
|
||||
@ -98,7 +98,7 @@ class TouchManifest(object):
|
||||
path_dir = os.path.dirname(self.path)
|
||||
if not os.path.exists(path_dir):
|
||||
os.makedirs(path_dir)
|
||||
with open(self.path, 'w') as fp:
|
||||
with open(self.path, 'wb') as fp:
|
||||
fp.write(response.read())
|
||||
success = True
|
||||
break
|
||||
@ -266,7 +266,7 @@ class BootTest(object):
|
||||
if not self.britney.options.verbose:
|
||||
return
|
||||
for src in sorted(self.pkglist):
|
||||
for ver in sorted(self.pkglist[src], cmp=apt_pkg.version_compare):
|
||||
for ver in sorted(self.pkglist[src]):
|
||||
status = self.pkglist[src][ver]
|
||||
print("I: [%s] - Collected boottest status for %s_%s: "
|
||||
"%s" % (time.asctime(), src, ver, status))
|
||||
|
524
britney.py
524
britney.py
File diff suppressed because it is too large
Load Diff
@ -24,7 +24,7 @@
|
||||
import apt_pkg
|
||||
from functools import partial
|
||||
from datetime import datetime
|
||||
from itertools import chain, ifilter, ifilterfalse, izip, repeat
|
||||
from itertools import chain, repeat, filterfalse
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
@ -76,8 +76,8 @@ def ifilter_except(container, iterable=None):
|
||||
iterators that are not known on beforehand.
|
||||
"""
|
||||
if iterable is not None:
|
||||
return ifilterfalse(container.__contains__, iterable)
|
||||
return partial(ifilterfalse, container.__contains__)
|
||||
return filterfalse(container.__contains__, iterable)
|
||||
return partial(filterfalse, container.__contains__)
|
||||
|
||||
|
||||
def ifilter_only(container, iterable=None):
|
||||
@ -89,8 +89,8 @@ def ifilter_only(container, iterable=None):
|
||||
iterators that are not known on beforehand.
|
||||
"""
|
||||
if iterable is not None:
|
||||
return ifilter(container.__contains__, iterable)
|
||||
return partial(ifilter, container.__contains__)
|
||||
return filter(container.__contains__, iterable)
|
||||
return partial(filter, container.__contains__)
|
||||
|
||||
|
||||
# iter_except is from the "itertools" recipe
|
||||
@ -120,7 +120,7 @@ def iter_except(func, exception, first=None):
|
||||
|
||||
|
||||
def undo_changes(lundo, inst_tester, sources, binaries,
|
||||
BINARIES=BINARIES, PROVIDES=PROVIDES):
|
||||
BINARIES=BINARIES):
|
||||
"""Undoes one or more changes to testing
|
||||
|
||||
* lundo is a list of (undo, item)-tuples
|
||||
@ -225,7 +225,7 @@ def register_reverses(packages, provides, check_doubles=True, iterator=None,
|
||||
the loops.
|
||||
"""
|
||||
if iterator is None:
|
||||
iterator = packages.iterkeys()
|
||||
iterator = packages.keys()
|
||||
else:
|
||||
iterator = ifilter_only(packages, iterator)
|
||||
|
||||
@ -303,7 +303,7 @@ def compute_reverse_tree(packages_s, pkg, arch,
|
||||
# generate the next iteration, which is the reverse-dependencies of
|
||||
# the current iteration
|
||||
rev_deps = set(revfilt(flatten( binaries[x][RDEPENDS] for x in binfilt(rev_deps) )))
|
||||
return izip(seen, repeat(arch))
|
||||
return zip(seen, repeat(arch))
|
||||
|
||||
|
||||
def write_nuninst(filename, nuninst):
|
||||
@ -312,7 +312,7 @@ def write_nuninst(filename, nuninst):
|
||||
Write the non-installable report derived from "nuninst" to the
|
||||
file denoted by "filename".
|
||||
"""
|
||||
with open(filename, 'w') as f:
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
# Having two fields with (almost) identical dates seems a bit
|
||||
# redundant.
|
||||
f.write("Built on: " + time.strftime("%Y.%m.%d %H:%M:%S %z", time.gmtime(time.time())) + "\n")
|
||||
@ -329,7 +329,7 @@ def read_nuninst(filename, architectures):
|
||||
will be included in the report.
|
||||
"""
|
||||
nuninst = {}
|
||||
with open(filename) as f:
|
||||
with open(filename, encoding='ascii') as f:
|
||||
for r in f:
|
||||
if ":" not in r: continue
|
||||
arch, packages = r.strip().split(":", 1)
|
||||
@ -387,7 +387,7 @@ def write_heidi(filename, sources_t, packages_t,
|
||||
The "X=X" parameters are optimizations to avoid "load global" in
|
||||
the loops.
|
||||
"""
|
||||
with open(filename, 'w') as f:
|
||||
with open(filename, 'w', encoding='ascii') as f:
|
||||
|
||||
# write binary packages
|
||||
for arch in sorted(packages_t):
|
||||
@ -426,7 +426,7 @@ def write_heidi_delta(filename, all_selected):
|
||||
|
||||
The order corresponds to that shown in update_output.
|
||||
"""
|
||||
with open(filename, "w") as fd:
|
||||
with open(filename, "w", encoding='ascii') as fd:
|
||||
|
||||
fd.write("#HeidiDelta\n")
|
||||
|
||||
@ -463,7 +463,7 @@ def write_excuses(excuses, dest_file, output_format="yaml"):
|
||||
"""
|
||||
if output_format == "yaml":
|
||||
ensuredir(os.path.dirname(dest_file))
|
||||
with open(dest_file, 'w') as f:
|
||||
with open(dest_file, 'w', encoding='utf-8') as f:
|
||||
excuselist = []
|
||||
for e in excuses:
|
||||
excuselist.append(e.excusedata())
|
||||
@ -473,7 +473,7 @@ def write_excuses(excuses, dest_file, output_format="yaml"):
|
||||
f.write(yaml.dump(excusesdata, default_flow_style=False, allow_unicode=True))
|
||||
elif output_format == "legacy-html":
|
||||
ensuredir(os.path.dirname(dest_file))
|
||||
with open(dest_file, 'w') as f:
|
||||
with open(dest_file, 'w', encoding='utf-8') as f:
|
||||
f.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n")
|
||||
f.write("<html><head><title>excuses...</title>")
|
||||
f.write("<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\"></head><body>\n")
|
||||
@ -491,13 +491,13 @@ def write_sources(sources_s, filename):
|
||||
"""Write a sources file from Britney's state for a given suite
|
||||
|
||||
Britney discards fields she does not care about, so the resulting
|
||||
file omitts a lot of regular fields.
|
||||
file omits a lot of regular fields.
|
||||
"""
|
||||
|
||||
key_pairs = ((VERSION, 'Version'), (SECTION, 'Section'),
|
||||
(MAINTAINER, 'Maintainer'))
|
||||
|
||||
with open(filename, 'w') as f:
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
for src in sources_s:
|
||||
src_data = sources_s[src]
|
||||
output = "Package: %s\n" % src
|
||||
@ -528,7 +528,7 @@ def write_controlfiles(sources, packages, suite, basedir):
|
||||
for arch in packages_s:
|
||||
filename = os.path.join(basedir, 'Packages_%s' % arch)
|
||||
binaries = packages_s[arch][0]
|
||||
with open(filename, 'w') as f:
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
for pkg in binaries:
|
||||
output = "Package: %s\n" % pkg
|
||||
bin_data = binaries[pkg]
|
||||
@ -605,3 +605,18 @@ def is_nuninst_asgood_generous(architectures, old, new, break_arches=frozenset()
|
||||
continue
|
||||
diff = diff + (len(new[arch]) - len(old[arch]))
|
||||
return diff <= 0
|
||||
|
||||
|
||||
def clone_nuninst(nuninst, packages_s, architectures):
|
||||
"""Selectively deep clone nuninst
|
||||
|
||||
Given nuninst table, the package table for a given suite and
|
||||
a list of architectures, this function will clone the nuninst
|
||||
table. Only the listed architectures will be deep cloned -
|
||||
the rest will only be shallow cloned.
|
||||
"""
|
||||
clone = nuninst.copy()
|
||||
for arch in architectures:
|
||||
clone[arch] = set(x for x in nuninst[arch] if x in packages_s[arch][0])
|
||||
clone[arch + "+all"] = set(x for x in nuninst[arch + "+all"] if x in packages_s[arch][0])
|
||||
return clone
|
||||
|
@ -39,7 +39,6 @@ class Completer(object):
|
||||
complete = []
|
||||
tpu = []
|
||||
for e in britney.excuses:
|
||||
ver = None
|
||||
pkg = e.name
|
||||
suite = 'unstable'
|
||||
if pkg[0] == '-':
|
||||
|
25
excuse.py
25
excuse.py
@ -15,8 +15,6 @@
|
||||
# GNU General Public License for more details.
|
||||
|
||||
import re
|
||||
import string
|
||||
|
||||
|
||||
class Excuse(object):
|
||||
"""Excuse class
|
||||
@ -65,6 +63,11 @@ class Excuse(object):
|
||||
self.reason = {}
|
||||
self.htmlline = []
|
||||
|
||||
def sortkey(self):
|
||||
if self.daysold == None:
|
||||
return (-1, self.name)
|
||||
return (self.daysold, self.name)
|
||||
|
||||
@property
|
||||
def is_valid(self):
|
||||
return self._is_valid
|
||||
@ -151,7 +154,7 @@ class Excuse(object):
|
||||
(self.name, self.name, lp_pkg, self.name, lp_old, lp_new))
|
||||
if self.maint:
|
||||
res = res + "<li>Maintainer: %s\n" % (self.maint)
|
||||
if self.section and string.find(self.section, "/") > -1:
|
||||
if self.section and self.section.find("/") > -1:
|
||||
res = res + "<li>Section: %s\n" % (self.section)
|
||||
if self.daysold != None:
|
||||
if self.mindays == 0:
|
||||
@ -165,7 +168,7 @@ class Excuse(object):
|
||||
for x in self.htmlline:
|
||||
res = res + "<li>" + x + "\n"
|
||||
lastdep = ""
|
||||
for x in sorted(self.deps, lambda x,y: cmp(x.split('/')[0], y.split('/')[0])):
|
||||
for x in sorted(self.deps, key=lambda x: x.split('/')[0]):
|
||||
dep = x.split('/')[0]
|
||||
if dep == lastdep: continue
|
||||
lastdep = dep
|
||||
@ -198,14 +201,8 @@ class Excuse(object):
|
||||
(self.name, self.ver[0], self.ver[1]))
|
||||
if self.maint:
|
||||
maint = self.maint
|
||||
# ugly hack to work around strange encoding in pyyaml
|
||||
# should go away with pyyaml in python 3
|
||||
try:
|
||||
maint.decode('ascii')
|
||||
except UnicodeDecodeError:
|
||||
maint = unicode(self.maint,'utf-8')
|
||||
res.append("Maintainer: %s" % maint)
|
||||
if self.section and string.find(self.section, "/") > -1:
|
||||
if self.section and self.section.find("/") > -1:
|
||||
res.append("Section: %s" % (self.section))
|
||||
if self.daysold != None:
|
||||
if self.mindays == 0:
|
||||
@ -219,7 +216,7 @@ class Excuse(object):
|
||||
for x in self.htmlline:
|
||||
res.append("" + x + "")
|
||||
lastdep = ""
|
||||
for x in sorted(self.deps, lambda x,y: cmp(x.split('/')[0], y.split('/')[0])):
|
||||
for x in sorted(self.deps, key=lambda x: x.split('/')[0]):
|
||||
dep = x.split('/')[0]
|
||||
if dep == lastdep: continue
|
||||
lastdep = dep
|
||||
@ -246,10 +243,10 @@ class Excuse(object):
|
||||
excusedata["new-bugs"] = sorted(self.newbugs)
|
||||
excusedata["old-bugs"] = sorted(self.oldbugs)
|
||||
if self.forced:
|
||||
excusedata["forced-reason"] = self.reason.keys()
|
||||
excusedata["forced-reason"] = list(self.reason.keys())
|
||||
excusedata["reason"] = []
|
||||
else:
|
||||
excusedata["reason"] = self.reason.keys()
|
||||
excusedata["reason"] = list(self.reason.keys())
|
||||
excusedata["is-candidate"] = self.is_valid
|
||||
return excusedata
|
||||
|
||||
|
6
hints.py
6
hints.py
@ -12,6 +12,8 @@
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from migrationitem import MigrationItem
|
||||
|
||||
class HintCollection(object):
|
||||
@ -22,7 +24,7 @@ class HintCollection(object):
|
||||
return self.search(type)
|
||||
|
||||
def search(self, type=None, onlyactive=True, package=None, \
|
||||
version=None, days=None, removal=None):
|
||||
version=None, removal=None):
|
||||
|
||||
return [ hint for hint in self._hints if
|
||||
(type is None or type == hint.type) and
|
||||
@ -36,7 +38,7 @@ class HintCollection(object):
|
||||
try:
|
||||
self._hints.append(Hint(hint, user))
|
||||
except AssertionError:
|
||||
print "Ignoring broken hint %r from %s" % (hint, user)
|
||||
print("Ignoring broken hint %r from %s" % (hint, user))
|
||||
|
||||
class Hint(object):
|
||||
NO_VERSION = [ 'block', 'block-all', 'block-udeb' ]
|
||||
|
@ -29,7 +29,7 @@ class _RelationBuilder(object):
|
||||
self._new_breaks = set(binary_data[1])
|
||||
|
||||
|
||||
def add_dependency_clause(self, or_clause, frozenset=frozenset):
|
||||
def add_dependency_clause(self, or_clause):
|
||||
"""Add a dependency clause
|
||||
|
||||
The clause must be a sequence of (name, version, architecture)
|
||||
@ -48,7 +48,6 @@ class _RelationBuilder(object):
|
||||
clause = self._itbuilder._intern_set(or_clause)
|
||||
binary = self._binary
|
||||
itbuilder = self._itbuilder
|
||||
package_table = itbuilder._package_table
|
||||
okay = False
|
||||
for dep_tuple in clause:
|
||||
okay = True
|
||||
@ -388,7 +387,7 @@ class InstallabilityTesterBuilder(object):
|
||||
ekey = (deps, con, rdeps)
|
||||
find_eqv_table[ekey].append(pkg)
|
||||
|
||||
for pkg_list in find_eqv_table.itervalues():
|
||||
for pkg_list in find_eqv_table.values():
|
||||
if len(pkg_list) < 2:
|
||||
continue
|
||||
|
||||
|
@ -14,7 +14,8 @@
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
from functools import partial
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
|
||||
from installability.tester import InstallabilityTester
|
||||
@ -43,8 +44,8 @@ class InstallabilitySolver(InstallabilityTester):
|
||||
- NB: arch:all packages are "re-mapped" to given architecture.
|
||||
(simplifies caches and dependency checking)
|
||||
"""
|
||||
InstallabilityTester.__init__(self, universe, revuniverse, testing,
|
||||
broken, essentials, safe_set, eqv_table)
|
||||
super().__init__(universe, revuniverse, testing,
|
||||
broken, essentials, safe_set, eqv_table)
|
||||
|
||||
|
||||
def solve_groups(self, groups):
|
||||
@ -106,7 +107,7 @@ class InstallabilitySolver(InstallabilityTester):
|
||||
# "Self-conflicts" => ignore
|
||||
continue
|
||||
if debug_solver and other not in order[key]['before']:
|
||||
print "N: Conflict induced order: %s before %s" % (key, other)
|
||||
print("N: Conflict induced order: %s before %s" % (key, other))
|
||||
order[key]['before'].add(other)
|
||||
order[other]['after'].add(key)
|
||||
|
||||
@ -125,7 +126,7 @@ class InstallabilitySolver(InstallabilityTester):
|
||||
# "Self-dependency" => ignore
|
||||
continue
|
||||
if debug_solver and other not in order[key]['after']:
|
||||
print "N: Removal induced order: %s before %s" % (key, other)
|
||||
print("N: Removal induced order: %s before %s" % (key, other))
|
||||
order[key]['after'].add(other)
|
||||
order[other]['before'].add(key)
|
||||
|
||||
@ -162,13 +163,13 @@ class InstallabilitySolver(InstallabilityTester):
|
||||
|
||||
for other in (other_adds - other_rms):
|
||||
if debug_solver and other != key and other not in order[key]['after']:
|
||||
print "N: Dependency induced order (add): %s before %s" % (key, other)
|
||||
print("N: Dependency induced order (add): %s before %s" % (key, other))
|
||||
order[key]['after'].add(other)
|
||||
order[other]['before'].add(key)
|
||||
|
||||
for other in (other_rms - other_adds):
|
||||
if debug_solver and other != key and other not in order[key]['before']:
|
||||
print "N: Dependency induced order (remove): %s before %s" % (key, other)
|
||||
print("N: Dependency induced order (remove): %s before %s" % (key, other))
|
||||
order[key]['before'].add(other)
|
||||
order[other]['after'].add(key)
|
||||
|
||||
@ -208,7 +209,7 @@ class InstallabilitySolver(InstallabilityTester):
|
||||
merged[n] = scc_id
|
||||
del order[n]
|
||||
if debug_solver:
|
||||
print "N: SCC: %s -- %s" % (scc_id, str(sorted(com)))
|
||||
print("N: SCC: %s -- %s" % (scc_id, str(sorted(com))))
|
||||
|
||||
for com in comps:
|
||||
node = com[0]
|
||||
@ -223,27 +224,27 @@ class InstallabilitySolver(InstallabilityTester):
|
||||
|
||||
|
||||
if debug_solver:
|
||||
print "N: -- PARTIAL ORDER --"
|
||||
print("N: -- PARTIAL ORDER --")
|
||||
|
||||
for com in sorted(order):
|
||||
if debug_solver and order[com]['before']:
|
||||
print "N: %s <= %s" % (com, str(sorted(order[com]['before'])))
|
||||
print("N: %s <= %s" % (com, str(sorted(order[com]['before']))))
|
||||
if not order[com]['after']:
|
||||
# This component can be scheduled immediately, add it
|
||||
# to "check"
|
||||
check.add(com)
|
||||
elif debug_solver:
|
||||
print "N: %s >= %s" % (com, str(sorted(order[com]['after'])))
|
||||
print("N: %s >= %s" % (com, str(sorted(order[com]['after']))))
|
||||
|
||||
if debug_solver:
|
||||
print "N: -- END PARTIAL ORDER --"
|
||||
print "N: -- LINEARIZED ORDER --"
|
||||
print("N: -- END PARTIAL ORDER --")
|
||||
print("N: -- LINEARIZED ORDER --")
|
||||
|
||||
for cur in iter_except(check.pop, KeyError):
|
||||
if order[cur]['after'] <= emitted:
|
||||
# This item is ready to be emitted right now
|
||||
if debug_solver:
|
||||
print "N: %s -- %s" % (cur, sorted(scc[cur]))
|
||||
print("N: %s -- %s" % (cur, sorted(scc[cur])))
|
||||
emitted.add(cur)
|
||||
result.append([key2item[x] for x in scc[cur]])
|
||||
if order[cur]['before']:
|
||||
@ -254,7 +255,7 @@ class InstallabilitySolver(InstallabilityTester):
|
||||
check.update(order[cur]['before'] - emitted)
|
||||
|
||||
if debug_solver:
|
||||
print "N: -- END LINEARIZED ORDER --"
|
||||
print("N: -- END LINEARIZED ORDER --")
|
||||
|
||||
return result
|
||||
|
||||
@ -301,8 +302,8 @@ class InstallabilitySolver(InstallabilityTester):
|
||||
return result
|
||||
|
||||
def _dump_groups(self, groups):
|
||||
print "N: === Groups ==="
|
||||
print("N: === Groups ===")
|
||||
for (item, adds, rms) in groups:
|
||||
print "N: %s => A: %s, R: %s" % (str(item), str(adds), str(rms))
|
||||
print "N: === END Groups ==="
|
||||
print("N: %s => A: %s, R: %s" % (str(item), str(adds), str(rms)))
|
||||
print("N: === END Groups ===")
|
||||
|
||||
|
@ -12,11 +12,13 @@
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
from itertools import ifilter, ifilterfalse
|
||||
from itertools import chain, filterfalse
|
||||
|
||||
from britney_util import iter_except
|
||||
|
||||
|
||||
class InstallabilityTester(object):
|
||||
|
||||
def __init__(self, universe, revuniverse, testing, broken, essentials,
|
||||
@ -52,6 +54,7 @@ class InstallabilityTester(object):
|
||||
self._revuniverse = revuniverse
|
||||
self._safe_set = safe_set
|
||||
self._eqv_table = eqv_table
|
||||
self._stats = InstallabilityStats()
|
||||
|
||||
# Cache of packages known to be broken - we deliberately do not
|
||||
# include "broken" in it. See _optimize for more info.
|
||||
@ -84,7 +87,7 @@ class InstallabilityTester(object):
|
||||
eqv_table = self._eqv_table
|
||||
testing = self._testing
|
||||
tcopy = [x for x in testing]
|
||||
for t in ifilterfalse(cache_inst.__contains__, tcopy):
|
||||
for t in filterfalse(cache_inst.__contains__, tcopy):
|
||||
if t in cbroken:
|
||||
continue
|
||||
res = check_inst(t)
|
||||
@ -97,13 +100,16 @@ class InstallabilityTester(object):
|
||||
testing -= eqv_set
|
||||
cbroken |= eqv_set
|
||||
|
||||
@property
|
||||
def stats(self):
|
||||
return self._stats
|
||||
|
||||
def are_equivalent(self, p1, p2):
|
||||
"""Test if p1 and p2 are equivalent
|
||||
|
||||
Returns True if p1 and p2 have the same "signature" in
|
||||
the package dependency graph (i.e. relations can not tell
|
||||
them appart sematically except for their name)
|
||||
them apart semantically except for their name)
|
||||
"""
|
||||
eqv_table = self._eqv_table
|
||||
return p1 in eqv_table and p2 in eqv_table[p1]
|
||||
@ -113,7 +119,7 @@ class InstallabilityTester(object):
|
||||
"""Add a binary package to "testing"
|
||||
|
||||
If the package is not known, this method will throw an
|
||||
Keyrror.
|
||||
KeyError.
|
||||
"""
|
||||
|
||||
t = (pkg_name, pkg_version, pkg_arch)
|
||||
@ -125,6 +131,8 @@ class InstallabilityTester(object):
|
||||
self._testing.add(t)
|
||||
elif t not in self._testing:
|
||||
self._testing.add(t)
|
||||
if self._cache_inst:
|
||||
self._stats.cache_drops += 1
|
||||
self._cache_inst = set()
|
||||
if self._cache_broken:
|
||||
# Re-add broken packages as some of them may now be installable
|
||||
@ -163,6 +171,7 @@ class InstallabilityTester(object):
|
||||
if t not in self._broken and t in self._cache_inst:
|
||||
# It is in our cache (and not guaranteed to be broken) - throw out the cache
|
||||
self._cache_inst = set()
|
||||
self._stats.cache_drops += 1
|
||||
|
||||
return True
|
||||
|
||||
@ -176,17 +185,21 @@ class InstallabilityTester(object):
|
||||
Returns False otherwise.
|
||||
"""
|
||||
|
||||
self._stats.is_installable_calls += 1
|
||||
t = (pkg_name, pkg_version, pkg_arch)
|
||||
|
||||
if t not in self._universe:
|
||||
raise KeyError(str(t))
|
||||
|
||||
if t not in self._testing or t in self._broken:
|
||||
self._stats.cache_hits += 1
|
||||
return False
|
||||
|
||||
if t in self._cache_inst:
|
||||
self._stats.cache_hits += 1
|
||||
return True
|
||||
|
||||
self._stats.cache_misses += 1
|
||||
return self._check_inst(t)
|
||||
|
||||
|
||||
@ -194,8 +207,9 @@ class InstallabilityTester(object):
|
||||
# See the explanation of musts, never and choices below.
|
||||
|
||||
cache_inst = self._cache_inst
|
||||
stats = self._stats
|
||||
|
||||
if t in cache_inst and not never:
|
||||
if musts and t in cache_inst and not never:
|
||||
# use the inst cache only for direct queries/simple queries.
|
||||
cache = True
|
||||
if choices:
|
||||
@ -213,7 +227,6 @@ class InstallabilityTester(object):
|
||||
if cache:
|
||||
return True
|
||||
|
||||
|
||||
universe = self._universe
|
||||
testing = self._testing
|
||||
cbroken = self._cache_broken
|
||||
@ -254,15 +267,14 @@ class InstallabilityTester(object):
|
||||
# set conflicts with t - either way, t is f***ed
|
||||
cbroken.add(t)
|
||||
testing.remove(t)
|
||||
stats.conflicts_essential += 1
|
||||
return False
|
||||
musts.update(start)
|
||||
never.update(ess_never)
|
||||
|
||||
# curry check_loop
|
||||
check_loop = partial(self._check_loop, universe, testing,
|
||||
eqv_table, musts, never, choices,
|
||||
cbroken)
|
||||
|
||||
eqv_table, stats, musts, never, cbroken)
|
||||
|
||||
# Useful things to remember:
|
||||
#
|
||||
@ -277,7 +289,7 @@ class InstallabilityTester(object):
|
||||
#
|
||||
# * check never includes choices (these are always in choices)
|
||||
#
|
||||
# * A package is installable if never and musts are disjoined
|
||||
# * A package is installable if never and musts are disjointed
|
||||
# and both check and choices are empty.
|
||||
# - exception: _pick_choice may determine the installability
|
||||
# of t via recursion (calls _check_inst). In this case
|
||||
@ -297,9 +309,9 @@ class InstallabilityTester(object):
|
||||
rebuild.
|
||||
"""
|
||||
|
||||
# We already satisfied/chosen at least one of the litterals
|
||||
# We already satisfied/chosen at least one of the literals
|
||||
# in the choice, so the choice is gone
|
||||
for choice in ifilter(musts.isdisjoint, choices):
|
||||
for choice in filter(musts.isdisjoint, choices):
|
||||
# cbroken is needed here because (in theory) it could
|
||||
# have changed since the choice was discovered and it
|
||||
# is smaller than testing (so presumably faster)
|
||||
@ -307,7 +319,7 @@ class InstallabilityTester(object):
|
||||
|
||||
if len(remain) > 1 and not remain.isdisjoint(safe_set):
|
||||
first = None
|
||||
for r in ifilter(safe_set.__contains__, remain):
|
||||
for r in filter(safe_set.__contains__, remain):
|
||||
# don't bother giving extra arguments to _check_inst. "safe" packages are
|
||||
# usually trivial to satisfy on their own and will not involve conflicts
|
||||
# (so never will not help)
|
||||
@ -317,6 +329,7 @@ class InstallabilityTester(object):
|
||||
if first:
|
||||
musts.add(first)
|
||||
check.add(first)
|
||||
stats.choice_resolved_using_safe_set += 1
|
||||
continue
|
||||
# None of the safe set choices are installable, so drop them
|
||||
remain -= safe_set
|
||||
@ -325,11 +338,13 @@ class InstallabilityTester(object):
|
||||
# the choice was reduced to one package we haven't checked - check that
|
||||
check.update(remain)
|
||||
musts.update(remain)
|
||||
stats.choice_presolved += 1
|
||||
continue
|
||||
|
||||
if not remain:
|
||||
# all alternatives would violate the conflicts or are uninstallable
|
||||
# => package is not installable
|
||||
stats.choice_presolved += 1
|
||||
return None
|
||||
|
||||
# The choice is still deferred
|
||||
@ -346,8 +361,8 @@ class InstallabilityTester(object):
|
||||
choices_tmp = set()
|
||||
check_tmp = set([p])
|
||||
if not self._check_loop(universe, testing, eqv_table,
|
||||
musts_copy, never_tmp,
|
||||
choices_tmp, cbroken,
|
||||
stats, musts_copy, never_tmp,
|
||||
cbroken, choices_tmp,
|
||||
check_tmp):
|
||||
# p cannot be chosen/is broken (unlikely, but ...)
|
||||
continue
|
||||
@ -363,6 +378,7 @@ class InstallabilityTester(object):
|
||||
# routine, but to conserve stack-space, we return
|
||||
# and expect to be called again later.
|
||||
musts.update(musts_copy)
|
||||
stats.choice_resolved_without_restore_point += 1
|
||||
return False
|
||||
|
||||
if not musts.isdisjoint(never_tmp):
|
||||
@ -370,6 +386,7 @@ class InstallabilityTester(object):
|
||||
# t uninstallable, so p is a no-go.
|
||||
continue
|
||||
|
||||
stats.backtrace_restore_point_created += 1
|
||||
# We are not sure that p is safe, setup a backtrack
|
||||
# point and recurse.
|
||||
never_tmp |= never
|
||||
@ -386,6 +403,7 @@ class InstallabilityTester(object):
|
||||
# to satisfy the dependencies, so pretend to conflict
|
||||
# with it - hopefully it will reduce future choices.
|
||||
never.add(p)
|
||||
stats.backtrace_restore_point_used += 1
|
||||
|
||||
# Optimization for the last case; avoid the recursive call
|
||||
# and just assume the last will lead to a solution. If it
|
||||
@ -393,11 +411,12 @@ class InstallabilityTester(object):
|
||||
# have to back-track anyway.
|
||||
check.add(last)
|
||||
musts.add(last)
|
||||
stats.backtrace_last_option += 1
|
||||
return False
|
||||
# END _pick_choice
|
||||
|
||||
while check:
|
||||
if not check_loop(check):
|
||||
if not check_loop(choices, check):
|
||||
verdict = False
|
||||
break
|
||||
|
||||
@ -417,12 +436,14 @@ class InstallabilityTester(object):
|
||||
if verdict:
|
||||
# if t is installable, then so are all packages in musts
|
||||
self._cache_inst.update(musts)
|
||||
stats.solved_installable += 1
|
||||
else:
|
||||
stats.solved_uninstallable += 1
|
||||
|
||||
return verdict
|
||||
|
||||
|
||||
def _check_loop(self, universe, testing, eqv_table, musts, never,
|
||||
choices, cbroken, check, len=len,
|
||||
def _check_loop(self, universe, testing, eqv_table, stats, musts, never,
|
||||
cbroken, choices, check, len=len,
|
||||
frozenset=frozenset):
|
||||
"""Finds all guaranteed dependencies via "check".
|
||||
|
||||
@ -431,7 +452,7 @@ class InstallabilityTester(object):
|
||||
returns True, then t is installable.
|
||||
"""
|
||||
# Local variables for faster access...
|
||||
not_satisfied = partial(ifilter, musts.isdisjoint)
|
||||
not_satisfied = partial(filter, musts.isdisjoint)
|
||||
|
||||
# While we have guaranteed dependencies (in check), examine all
|
||||
# of them.
|
||||
@ -453,7 +474,7 @@ class InstallabilityTester(object):
|
||||
# so "obviously" we can never choose any of its conflicts
|
||||
never.update(cons & testing)
|
||||
|
||||
# depgroup can be satisifed by picking something that is
|
||||
# depgroup can be satisfied by picking something that is
|
||||
# already in musts - lets pick that (again). :)
|
||||
for depgroup in not_satisfied(deps):
|
||||
|
||||
@ -492,13 +513,19 @@ class InstallabilityTester(object):
|
||||
# _build_eqv_packages_table method for more
|
||||
# information on how this works.
|
||||
new_cand = set(x for x in candidates if x not in possible_eqv)
|
||||
stats.eqv_table_times_used += 1
|
||||
|
||||
for chosen in iter_except(possible_eqv.pop, KeyError):
|
||||
new_cand.add(chosen)
|
||||
possible_eqv -= eqv_table[chosen]
|
||||
stats.eqv_table_total_number_of_alternatives_eliminated += len(candidates) - len(new_cand)
|
||||
if len(new_cand) == 1:
|
||||
check.update(new_cand)
|
||||
musts.update(new_cand)
|
||||
stats.eqv_table_reduced_to_one += 1
|
||||
continue
|
||||
elif len(candidates) == len(new_cand):
|
||||
stats.eqv_table_reduced_by_zero += 1
|
||||
candidates = frozenset(new_cand)
|
||||
# defer this choice till later
|
||||
choices.add(candidates)
|
||||
@ -513,18 +540,19 @@ class InstallabilityTester(object):
|
||||
eqv_table = self._eqv_table
|
||||
cbroken = self._cache_broken
|
||||
universe = self._universe
|
||||
stats = self._stats
|
||||
safe_set = self._safe_set
|
||||
|
||||
ess_base = set(x for x in self._essentials if x[2] == arch and x in testing)
|
||||
start = set(ess_base)
|
||||
ess_never = set()
|
||||
ess_choices = set()
|
||||
not_satisified = partial(ifilter, start.isdisjoint)
|
||||
not_satisified = partial(filter, start.isdisjoint)
|
||||
|
||||
while ess_base:
|
||||
self._check_loop(universe, testing, eqv_table,
|
||||
start, ess_never, ess_choices,
|
||||
cbroken, ess_base)
|
||||
self._check_loop(universe, testing, eqv_table, stats,
|
||||
start, ess_never, cbroken,
|
||||
ess_choices, ess_base)
|
||||
if ess_choices:
|
||||
# Try to break choices where possible
|
||||
nchoice = set()
|
||||
@ -548,3 +576,118 @@ class InstallabilityTester(object):
|
||||
|
||||
return self._cache_ess[arch]
|
||||
|
||||
def compute_stats(self):
|
||||
universe = self._universe
|
||||
eqv_table = self._eqv_table
|
||||
graph_stats = defaultdict(ArchStats)
|
||||
seen_eqv = defaultdict(set)
|
||||
|
||||
for pkg in universe:
|
||||
(pkg_name, pkg_version, pkg_arch) = pkg
|
||||
deps, con = universe[pkg]
|
||||
arch_stats = graph_stats[pkg_arch]
|
||||
|
||||
arch_stats.nodes += 1
|
||||
|
||||
if pkg in eqv_table and pkg not in seen_eqv[pkg_arch]:
|
||||
eqv = [e for e in eqv_table[pkg] if e[2] == pkg_arch]
|
||||
arch_stats.eqv_nodes += len(eqv)
|
||||
|
||||
arch_stats.add_dep_edges(deps)
|
||||
arch_stats.add_con_edges(con)
|
||||
|
||||
for stat in graph_stats.values():
|
||||
stat.compute_all()
|
||||
|
||||
return graph_stats
|
||||
|
||||
|
||||
class InstallabilityStats(object):
|
||||
|
||||
def __init__(self):
|
||||
self.cache_hits = 0
|
||||
self.cache_misses = 0
|
||||
self.cache_drops = 0
|
||||
self.backtrace_restore_point_created = 0
|
||||
self.backtrace_restore_point_used = 0
|
||||
self.backtrace_last_option = 0
|
||||
self.choice_presolved = 0
|
||||
self.choice_resolved_using_safe_set = 0
|
||||
self.choice_resolved_without_restore_point = 0
|
||||
self.is_installable_calls = 0
|
||||
self.solved_installable = 0
|
||||
self.solved_uninstallable = 0
|
||||
self.conflicts_essential = 0
|
||||
self.eqv_table_times_used = 0
|
||||
self.eqv_table_reduced_to_one = 0
|
||||
self.eqv_table_reduced_by_zero = 0
|
||||
self.eqv_table_total_number_of_alternatives_eliminated = 0
|
||||
|
||||
def stats(self):
|
||||
formats = [
|
||||
"Requests - is_installable: {is_installable_calls}",
|
||||
"Cache - hits: {cache_hits}, misses: {cache_misses}, drops: {cache_drops}",
|
||||
"Choices - pre-solved: {choice_presolved}, safe-set: {choice_resolved_using_safe_set}, No RP: {choice_resolved_without_restore_point}",
|
||||
"Backtrace - RP created: {backtrace_restore_point_created}, RP used: {backtrace_restore_point_used}, reached last option: {backtrace_last_option}",
|
||||
"Solved - installable: {solved_installable}, uninstallable: {solved_uninstallable}, conflicts essential: {conflicts_essential}",
|
||||
"Eqv - times used: {eqv_table_times_used}, perfect reductions: {eqv_table_reduced_to_one}, failed reductions: {eqv_table_reduced_by_zero}, total no. of alternatives pruned: {eqv_table_total_number_of_alternatives_eliminated}",
|
||||
]
|
||||
return [x.format(**self.__dict__) for x in formats]
|
||||
|
||||
|
||||
class ArchStats(object):
|
||||
|
||||
def __init__(self):
|
||||
self.nodes = 0
|
||||
self.eqv_nodes = 0
|
||||
self.dep_edges = []
|
||||
self.con_edges = []
|
||||
self.stats = defaultdict(lambda: defaultdict(int))
|
||||
|
||||
def stat(self, statname):
|
||||
return self.stats[statname]
|
||||
|
||||
def stat_summary(self):
|
||||
text = []
|
||||
for statname in ['nodes', 'dependency-clauses', 'dependency-clause-alternatives', 'negative-dependency-clauses']:
|
||||
stat = self.stats[statname]
|
||||
if statname != 'nodes':
|
||||
format_str = "%s, max: %d, min: %d, median: %d, average: %f (%d/%d)"
|
||||
values = [statname, stat['max'], stat['min'], stat['median'], stat['average'], stat['sum'], stat['size']]
|
||||
if 'average-per-node' in stat:
|
||||
format_str += ", average-per-node: %f"
|
||||
values.append(stat['average-per-node'])
|
||||
else:
|
||||
format_str = "nodes: %d, eqv-nodes: %d"
|
||||
values = (self.nodes, self.eqv_nodes)
|
||||
text.append(format_str % tuple(values))
|
||||
return text
|
||||
|
||||
def add_dep_edges(self, edges):
|
||||
self.dep_edges.append(edges)
|
||||
|
||||
def add_con_edges(self, edges):
|
||||
self.con_edges.append(edges)
|
||||
|
||||
def _list_stats(self, stat_name, sorted_list, average_per_node=False):
|
||||
if sorted_list:
|
||||
stats = self.stats[stat_name]
|
||||
stats['max'] = sorted_list[-1]
|
||||
stats['min'] = sorted_list[0]
|
||||
stats['sum'] = sum(sorted_list)
|
||||
stats['size'] = len(sorted_list)
|
||||
stats['average'] = float(stats['sum'])/len(sorted_list)
|
||||
stats['median'] = sorted_list[len(sorted_list)//2]
|
||||
if average_per_node:
|
||||
stats['average-per-node'] = float(stats['sum'])/self.nodes
|
||||
|
||||
def compute_all(self):
|
||||
dep_edges = self.dep_edges
|
||||
con_edges = self.con_edges
|
||||
sorted_no_dep_edges = sorted(len(x) for x in dep_edges)
|
||||
sorted_size_dep_edges = sorted(len(x) for x in chain.from_iterable(dep_edges))
|
||||
sorted_no_con_edges = sorted(len(x) for x in con_edges)
|
||||
self._list_stats('dependency-clauses', sorted_no_dep_edges)
|
||||
self._list_stats('dependency-clause-alternatives', sorted_size_dep_edges, average_per_node=True)
|
||||
self._list_stats('negative-dependency-clauses', sorted_no_con_edges)
|
||||
|
||||
|
@ -54,6 +54,9 @@ class MigrationItem(object):
|
||||
def __hash__(self):
|
||||
return hash((self.uvname, self.version))
|
||||
|
||||
def __lt__(self, other):
|
||||
return (self.uvname, self.version) < (other.uvname, other.version)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
@ -142,5 +145,5 @@ class MigrationItem(object):
|
||||
return self._uvname
|
||||
|
||||
class UnversionnedMigrationItem(MigrationItem):
|
||||
def __init__(self, name = None):
|
||||
MigrationItem.__init__(self, name = name, versionned = False)
|
||||
def __init__(self, name=None):
|
||||
super().__init__(name=name, versionned=False)
|
||||
|
@ -113,6 +113,8 @@ class AutoPkgTestSwiftServer:
|
||||
|
||||
def start(self):
|
||||
assert self.server_pid is None, 'already started'
|
||||
if self.log:
|
||||
self.log.close()
|
||||
self.log = tempfile.TemporaryFile()
|
||||
p = os.fork()
|
||||
if p:
|
||||
@ -123,6 +125,7 @@ class AutoPkgTestSwiftServer:
|
||||
if s.connect_ex(('127.0.0.1', self.port)) == 0:
|
||||
break
|
||||
time.sleep(0.1)
|
||||
s.close()
|
||||
return
|
||||
|
||||
# child; quiesce logging on stderr
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/python3
|
||||
# (C) 2014 Canonical Ltd.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
@ -87,10 +87,10 @@ class TestAutoPkgTest(TestBase):
|
||||
|
||||
if excuses_expect:
|
||||
for re in excuses_expect:
|
||||
self.assertRegexpMatches(excuses, re, excuses)
|
||||
self.assertRegex(excuses, re, excuses)
|
||||
if excuses_no_expect:
|
||||
for re in excuses_no_expect:
|
||||
self.assertNotRegexpMatches(excuses, re, excuses)
|
||||
self.assertNotRegex(excuses, re, excuses)
|
||||
|
||||
self.amqp_requests = set()
|
||||
try:
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/python3
|
||||
# (C) 2014 Canonical Ltd.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
@ -48,7 +48,7 @@ class TestTouchManifest(unittest.TestCase):
|
||||
self.imagesdir = os.path.join(self.path, 'boottest/images')
|
||||
os.makedirs(self.imagesdir)
|
||||
self.addCleanup(shutil.rmtree, self.path)
|
||||
_p = mock.patch('urllib.urlopen')
|
||||
_p = mock.patch('urllib.request.urlopen')
|
||||
self.mocked_urlopen = _p.start()
|
||||
self.mocked_urlopen.side_effect = [
|
||||
FakeResponse(code=404),
|
||||
@ -71,7 +71,7 @@ class TestTouchManifest(unittest.TestCase):
|
||||
def test_fetch(self):
|
||||
# Missing manifest file is fetched dynamically
|
||||
self.mocked_urlopen.side_effect = [
|
||||
FakeResponse(code=200, content='foo 1.0'),
|
||||
FakeResponse(code=200, content=b'foo 1.0'),
|
||||
]
|
||||
manifest = boottest.TouchManifest('ubuntu-touch', 'vivid')
|
||||
self.assertNotEqual([], manifest._manifest)
|
||||
@ -244,10 +244,10 @@ args.func()
|
||||
# print('-------\nout: %s\n-----' % out)
|
||||
if expect:
|
||||
for re in expect:
|
||||
self.assertRegexpMatches(excuses, re)
|
||||
self.assertRegex(excuses, re)
|
||||
if no_expect:
|
||||
for re in no_expect:
|
||||
self.assertNotRegexpMatches(excuses, re)
|
||||
self.assertNotRegex(excuses, re)
|
||||
|
||||
def test_runs(self):
|
||||
# `Britney` runs and considers binary packages for boottesting
|
||||
@ -283,8 +283,9 @@ args.func()
|
||||
# '<source> <version>\n'
|
||||
test_input_path = os.path.join(
|
||||
self.data.path, 'boottest/work/test_input')
|
||||
self.assertEqual(
|
||||
['green 1.1~beta\n'], open(test_input_path).readlines())
|
||||
with open(test_input_path) as f:
|
||||
self.assertEqual(
|
||||
['green 1.1~beta\n'], f.readlines())
|
||||
|
||||
def test_pass(self):
|
||||
# `Britney` updates boottesting information in excuses when the
|
||||
|
Loading…
x
Reference in New Issue
Block a user