mirror of
https://git.launchpad.net/~ubuntu-release/britney/+git/britney2-ubuntu
synced 2025-04-22 08:31:17 +00:00
Merge trunk up to 2014-07-13
This commit is contained in:
commit
24e20560e7
303
britney.py
303
britney.py
@ -276,8 +276,6 @@ class Britney(object):
|
||||
return
|
||||
|
||||
# read the source and binary packages for the involved distributions
|
||||
# if this takes a very long time, try increasing SIZEOFHASHMAP in
|
||||
# lib/dpkg.c and rebuilding
|
||||
if 'testing' not in self.sources:
|
||||
self.sources['testing'] = self.read_sources(self.options.testing)
|
||||
self.sources['unstable'] = self.read_sources(self.options.unstable)
|
||||
@ -1255,10 +1253,10 @@ class Britney(object):
|
||||
if not (ssrc and suite != 'unstable'):
|
||||
# for every binary package produced by this source in testing for this architecture
|
||||
source_data = self.sources['testing'][src]
|
||||
_, smoothbins = self.find_upgraded_binaries(src,
|
||||
source_data,
|
||||
arch,
|
||||
suite)
|
||||
_, _, smoothbins = self._compute_groups(src,
|
||||
"unstable",
|
||||
arch,
|
||||
False)
|
||||
|
||||
for pkg in sorted(x.split("/")[0] for x in source_data[BINARIES] if x.endswith("/"+arch)):
|
||||
# if the package is architecture-independent, then ignore it
|
||||
@ -1277,7 +1275,7 @@ class Britney(object):
|
||||
# it "interesting" on its own. This case happens quite often with smooth updatable
|
||||
# packages, where the old binary "survives" a full run because it still has
|
||||
# reverse dependencies.
|
||||
name = pkg + "/" + tpkg_data[ARCHITECTURE]
|
||||
name = (pkg, tpkg_data[VERSION], tpkg_data[ARCHITECTURE])
|
||||
if name not in smoothbins:
|
||||
anyworthdoing = True
|
||||
|
||||
@ -1982,117 +1980,182 @@ class Britney(object):
|
||||
return diff <= 0
|
||||
|
||||
|
||||
def find_upgraded_binaries(self, source_name, source_data,
|
||||
architecture, suite):
|
||||
# XXX: not the best name - really.
|
||||
"""Find smooth and non-smooth updatable binaries for upgrades
|
||||
def _compute_groups(self, source_name, suite, migration_architecture,
|
||||
is_removal, include_hijacked=False,
|
||||
allow_smooth_updates=True,
|
||||
removals=frozenset()):
|
||||
"""Compute the groups of binaries being migrated by item
|
||||
|
||||
This method will compute the binaries that will be replaced in
|
||||
testing and which of them are smooth updatable.
|
||||
This method will compute the binaries that will be added,
|
||||
replaced in testing and which of them are smooth updatable.
|
||||
|
||||
Parameters:
|
||||
* "source_name" is the name of the source package, whose
|
||||
binaries are migrating.
|
||||
* "source_data" is the fields of that source package from
|
||||
testing.
|
||||
* "architecture" is the architecture determines architecture of
|
||||
the migrating binaries (can be "source" for a
|
||||
"source"-migration, meaning all binaries regardless of
|
||||
architecture).
|
||||
* "suite" is the suite from which the binaries are migrating.
|
||||
[Same as item.suite, where available]
|
||||
* "migration_architecture" is the architecture determines
|
||||
architecture of the migrating binaries (can be "source" for
|
||||
a "source"-migration, meaning all binaries regardless of
|
||||
architecture). [Same as item.architecture, where available]
|
||||
* "is_removal" is a boolean determining if this is a removal
|
||||
or not [Same as item.is_removal, where available]
|
||||
* "include_hijacked" determines whether hijacked binaries should
|
||||
be included in results or not. (defaults: False)
|
||||
* "allow_smooth_updates" is a boolean determing whether smooth-
|
||||
updates are permitted in this migration. When set to False,
|
||||
the "smoothbins" return value will always be the empty set.
|
||||
Any value that would have been there will now be in "rms"
|
||||
instead. (defaults: True)
|
||||
* "removals" is a set of binaries that is assumed to be
|
||||
removed at the same time as this migration (e.g. in the same
|
||||
"easy"-hint). This may affect what if some binaries are
|
||||
smooth updated or not. (defaults: empty-set)
|
||||
- Binaries must be given as ("package-name", "version",
|
||||
"architecture") tuples.
|
||||
|
||||
Returns a tuple (adds, rms, smoothbins). "adds" is a set of
|
||||
binaries that will updated in or appear after the migration.
|
||||
"rms" is a set of binaries that are not smooth-updatable (or
|
||||
binaries that could be, but there is no reason to let them be
|
||||
smooth updated). "smoothbins" is set of binaries that are to
|
||||
be smooth-updated.
|
||||
|
||||
Each "binary" in "adds", "rms" and "smoothbins" will be a
|
||||
tuple of ("package-name", "version", "architecture") and are
|
||||
thus tuples suitable for passing on to the
|
||||
InstallabilityTester.
|
||||
|
||||
Returns a tuple (bins, smoothbins). "bins" is a set of binaries
|
||||
that are not smooth-updatable (or binaries that could be, but
|
||||
there is no reason to let them be smooth updated).
|
||||
"smoothbins" is set of binaries that are to be smooth-updated
|
||||
|
||||
Pre-Conditions: The source package must be in testing and this
|
||||
should only be used when considering to do an upgrade
|
||||
migration from the input suite. (e.g. do not use this for
|
||||
removals).
|
||||
|
||||
Unlike doop_source, this will not modify any data structure.
|
||||
"""
|
||||
bins = set()
|
||||
smoothbins = set()
|
||||
check = []
|
||||
|
||||
# local copies for better performances
|
||||
sources = self.sources
|
||||
binaries_t = self.binaries['testing']
|
||||
# first, build a list of eligible binaries
|
||||
for p in source_data[BINARIES]:
|
||||
binary, parch = p.split("/")
|
||||
if architecture != 'source':
|
||||
# for a binary migration, binaries should not be removed:
|
||||
# - unless they are for the correct architecture
|
||||
if parch != architecture:
|
||||
|
||||
adds = set()
|
||||
rms = set()
|
||||
smoothbins = {}
|
||||
|
||||
# remove all binary packages (if the source already exists)
|
||||
if migration_architecture == 'source' or not is_removal:
|
||||
if source_name in sources['testing']:
|
||||
source_data = sources['testing'][source_name]
|
||||
|
||||
bins = []
|
||||
check = {}
|
||||
# remove all the binaries
|
||||
|
||||
# first, build a list of eligible binaries
|
||||
for p in source_data[BINARIES]:
|
||||
binary, parch = p.split("/")
|
||||
if (migration_architecture != 'source'
|
||||
and parch != migration_architecture):
|
||||
continue
|
||||
|
||||
if (not include_hijacked
|
||||
and binaries_t[parch][0][binary][SOURCE] != source_name):
|
||||
continue
|
||||
|
||||
bins.append(p)
|
||||
|
||||
for p in bins:
|
||||
binary, parch = p.split("/")
|
||||
# if a smooth update is possible for the package, skip it
|
||||
if allow_smooth_updates and suite == 'unstable' and \
|
||||
binary not in self.binaries[suite][parch][0] and \
|
||||
('ALL' in self.options.smooth_updates or \
|
||||
binaries_t[parch][0][binary][SECTION] in self.options.smooth_updates):
|
||||
|
||||
# if the package has reverse-dependencies which are
|
||||
# built from other sources, it's a valid candidate for
|
||||
# a smooth update. if not, it may still be a valid
|
||||
# candidate if one if its r-deps is itself a candidate,
|
||||
# so note it for checking later
|
||||
bin_data = binaries_t[parch][0][binary]
|
||||
rdeps = bin_data[RDEPENDS]
|
||||
|
||||
# the list of reverse-dependencies may be outdated
|
||||
# if, for example, we're processing a hint and
|
||||
# a new version of one of the apparent reverse-dependencies
|
||||
# migrated earlier in the hint. walk the list to make
|
||||
# sure that at least one of the entries is still
|
||||
# valid
|
||||
rrdeps = [x for x in rdeps if x not in [y.split("/")[0] for y in bins]]
|
||||
if rrdeps:
|
||||
for dep in rrdeps:
|
||||
if dep in binaries_t[parch][0]:
|
||||
bin = binaries_t[parch][0][dep]
|
||||
deps = []
|
||||
# If the package is being removed
|
||||
# together with dep, then it is
|
||||
# not a reason to smooth update
|
||||
# the binary
|
||||
t = (dep, bin[VERSION], parch)
|
||||
if t in removals:
|
||||
continue
|
||||
|
||||
if bin[DEPENDS] is not None:
|
||||
deps.extend(apt_pkg.parse_depends(bin[DEPENDS], False))
|
||||
if any(binary == entry[0] for deplist in deps for entry in deplist):
|
||||
smoothbins[p] = (binary, bin_data[VERSION], parch)
|
||||
break
|
||||
else:
|
||||
check[p] = (binary, bin_data[VERSION], parch)
|
||||
|
||||
# check whether we should perform a smooth update for
|
||||
# packages which are candidates but do not have r-deps
|
||||
# outside of the current source
|
||||
for p in check:
|
||||
ptuple = check[p]
|
||||
binary, _, parch = ptuple
|
||||
rdeps = [ bin for bin in binaries_t[parch][0][binary][RDEPENDS] \
|
||||
if bin in [y[0] for y in smoothbins.itervalues()] ]
|
||||
if rdeps:
|
||||
smoothbins[p] = ptuple
|
||||
|
||||
# remove all the binaries which aren't being smooth updated
|
||||
for p in ( bin for bin in bins if bin not in smoothbins ):
|
||||
binary, parch = p.split("/")
|
||||
version = binaries_t[parch][0][binary][VERSION]
|
||||
rms.add((binary, version, parch))
|
||||
|
||||
# single binary removal; used for clearing up after smooth
|
||||
# updates but not supported as a manual hint
|
||||
elif source_name in binaries_t[migration_architecture][0]:
|
||||
version = binaries_t[migration_architecture][0][source_name][VERSION]
|
||||
rms.add((source_name, version, migration_architecture))
|
||||
|
||||
# add the new binary packages (if we are not removing)
|
||||
if not is_removal:
|
||||
source_data = sources[suite][source_name]
|
||||
for p in source_data[BINARIES]:
|
||||
binary, parch = p.split("/")
|
||||
if migration_architecture not in ['source', parch]:
|
||||
continue
|
||||
# - if they are arch:all and the migration is via *pu,
|
||||
# as the packages will not have been rebuilt and the
|
||||
# source suite will not contain them
|
||||
if binaries_t[parch][0][binary][ARCHITECTURE] == 'all' and \
|
||||
suite != 'unstable':
|
||||
continue
|
||||
# do not remove binaries which have been hijacked by other sources
|
||||
if binaries_t[parch][0][binary][SOURCE] != source_name:
|
||||
continue
|
||||
bins.add(p)
|
||||
version = self.binaries[suite][parch][0][binary][VERSION]
|
||||
adds.add((binary, version, parch))
|
||||
|
||||
if suite != 'unstable':
|
||||
# We only allow smooth updates from unstable, so if it we
|
||||
# are not migrating from unstable just exit now.
|
||||
return (bins, smoothbins)
|
||||
return (adds, rms, set(smoothbins.itervalues()))
|
||||
|
||||
for p in bins:
|
||||
binary, parch = p.split("/")
|
||||
# if a smooth update is possible for the package, skip it
|
||||
if binary not in self.binaries[suite][parch][0] and \
|
||||
('ALL' in self.options.smooth_updates or \
|
||||
binaries_t[parch][0][binary][SECTION] in self.options.smooth_updates):
|
||||
|
||||
# if the package has reverse-dependencies which are
|
||||
# built from other sources, it's a valid candidate for
|
||||
# a smooth update. if not, it may still be a valid
|
||||
# candidate if one if its r-deps is itself a candidate,
|
||||
# so note it for checking later
|
||||
rdeps = binaries_t[parch][0][binary][RDEPENDS]
|
||||
|
||||
# the list of reverse-dependencies may be outdated
|
||||
# if, for example, we're processing a hint and
|
||||
# a new version of one of the apparent reverse-dependencies
|
||||
# migrated earlier in the hint. walk the list to make
|
||||
# sure that at least one of the entries is still
|
||||
# valid
|
||||
rrdeps = [x for x in rdeps if x not in [y.split("/")[0] for y in bins]]
|
||||
if rrdeps:
|
||||
for dep in rrdeps:
|
||||
if dep in binaries_t[parch][0]:
|
||||
bin = binaries_t[parch][0][dep]
|
||||
deps = []
|
||||
if bin[DEPENDS] is not None:
|
||||
deps.extend(apt_pkg.parse_depends(bin[DEPENDS], False))
|
||||
if any(binary == entry[0] for deplist in deps for entry in deplist):
|
||||
smoothbins.add(p)
|
||||
break
|
||||
else:
|
||||
check.append(p)
|
||||
|
||||
|
||||
# check whether we should perform a smooth update for
|
||||
# packages which are candidates but do not have r-deps
|
||||
# outside of the current source
|
||||
for p in check:
|
||||
binary, parch = p.split("/")
|
||||
if any(bin for bin in binaries_t[parch][0][binary][RDEPENDS] \
|
||||
if bin in [y.split("/")[0] for y in smoothbins]):
|
||||
smoothbins.add(p)
|
||||
|
||||
bins -= smoothbins
|
||||
return (bins, smoothbins)
|
||||
|
||||
def doop_source(self, item, hint_undo=[]):
|
||||
def doop_source(self, item, hint_undo=[], removals=frozenset()):
|
||||
"""Apply a change to the testing distribution as requested by `pkg`
|
||||
|
||||
An optional list of undo actions related to packages processed earlier
|
||||
in a hint may be passed in `hint_undo`.
|
||||
|
||||
An optional set of binaries may be passed in "removals". Binaries listed
|
||||
in this set will be assumined to be removed at the same time as the "item"
|
||||
will migrate. This may change what binaries will be smooth-updated.
|
||||
- Binaries in this set must be ("package-name", "version", "architecture")
|
||||
tuples.
|
||||
|
||||
This method applies the changes required by the action `item` tracking
|
||||
them so it will be possible to revert them.
|
||||
|
||||
@ -2113,14 +2176,16 @@ class Britney(object):
|
||||
if item.package in sources['testing']:
|
||||
source = sources['testing'][item.package]
|
||||
|
||||
bins, _ = self.find_upgraded_binaries(item.package,
|
||||
source,
|
||||
item.architecture,
|
||||
item.suite)
|
||||
_, bins, _ = self._compute_groups(item.package,
|
||||
item.suite,
|
||||
item.architecture,
|
||||
item.is_removal,
|
||||
removals=removals)
|
||||
|
||||
# remove all the binaries which aren't being smooth updated
|
||||
for p in bins:
|
||||
binary, parch = p.split("/")
|
||||
for bin_data in bins:
|
||||
binary, _, parch = bin_data
|
||||
p = binary + "/" + parch
|
||||
# save the old binary for undo
|
||||
undo['binaries'][p] = binaries[parch][0][binary]
|
||||
# all the reverse dependencies are affected by the change
|
||||
@ -2291,8 +2356,16 @@ class Britney(object):
|
||||
# pre-process a hint batch
|
||||
pre_process = {}
|
||||
if selected and hint:
|
||||
removals = set()
|
||||
for item in selected:
|
||||
_, rms, _ = self._compute_groups(item.package, item.suite,
|
||||
item.architecture,
|
||||
item.is_removal,
|
||||
allow_smooth_updates=False)
|
||||
removals.update(rms)
|
||||
for package in selected:
|
||||
pkg, affected, undo = self.doop_source(package)
|
||||
pkg, affected, undo = self.doop_source(package,
|
||||
removals=removals)
|
||||
pre_process[package] = (pkg, affected, undo)
|
||||
|
||||
if lundo is None:
|
||||
@ -2789,10 +2862,34 @@ class Britney(object):
|
||||
excuses relationships. If they build a circular dependency, which we already
|
||||
know as not-working with the standard do_all algorithm, try to `easy` them.
|
||||
"""
|
||||
self.__log("> Processing hints from the auto hinter", type="I")
|
||||
self.__log("> Processing hints from the auto hinter [Partial-ordering]",
|
||||
type="I")
|
||||
|
||||
# consider only excuses which are valid candidates
|
||||
excuses = dict((x.name, x) for x in self.excuses if x.name in [y.uvname for y in self.upgrade_me])
|
||||
sources_t = self.sources['testing']
|
||||
|
||||
groups = set()
|
||||
for y in sorted((y for y in self.upgrade_me if y.uvname in excuses), key=attrgetter('uvname')):
|
||||
if y.is_removal and y.package not in sources_t:
|
||||
# Already removed
|
||||
continue
|
||||
if not y.is_removal:
|
||||
excuse = excuses[y.uvname]
|
||||
if y.architecture == 'source' and y.uvname in sources_t and sources_t[y.uvname][VERSION] == excuse.ver[1]:
|
||||
# Already migrated
|
||||
continue
|
||||
adds, rms, _ = self._compute_groups(y.package, y.suite,
|
||||
y.architecture, y.is_removal,
|
||||
include_hijacked=True)
|
||||
groups.add((y, frozenset(adds), frozenset(rms)))
|
||||
|
||||
for comp in self._inst_tester.solve_groups(groups):
|
||||
if len(comp) > 1:
|
||||
self.do_hint("easy", "autohinter", comp)
|
||||
|
||||
self.__log("> Processing hints from the auto hinter [Original]",
|
||||
type="I")
|
||||
|
||||
def find_related(e, hint, circular_first=False):
|
||||
if e not in excuses:
|
||||
|
@ -15,7 +15,7 @@
|
||||
from contextlib import contextmanager
|
||||
|
||||
from britney_util import ifilter_except, iter_except
|
||||
from installability.tester import InstallabilityTester
|
||||
from installability.solver import InstallabilitySolver
|
||||
|
||||
class _RelationBuilder(object):
|
||||
"""Private helper class to "build" relations"""
|
||||
@ -302,7 +302,7 @@ class InstallabilityTesterBuilder(object):
|
||||
check.update(reverse_package_table[pkg][0] - safe_set)
|
||||
|
||||
|
||||
return InstallabilityTester(package_table,
|
||||
frozenset(reverse_package_table),
|
||||
return InstallabilitySolver(package_table,
|
||||
reverse_package_table,
|
||||
self._testing, self._broken,
|
||||
self._essentials, safe_set)
|
||||
|
308
installability/solver.py
Normal file
308
installability/solver.py
Normal file
@ -0,0 +1,308 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2012 Niels Thykier <niels@thykier.net>
|
||||
# - Includes code by Paul Harrison
|
||||
# (http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py)
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
from functools import partial
|
||||
import os
|
||||
|
||||
from installability.tester import InstallabilityTester
|
||||
from britney_util import (ifilter_only, iter_except)
|
||||
|
||||
|
||||
class InstallabilitySolver(InstallabilityTester):
|
||||
|
||||
def __init__(self, universe, revuniverse, testing, broken, essentials,
|
||||
safe_set):
|
||||
"""Create a new installability solver
|
||||
|
||||
universe is a dict mapping package tuples to their
|
||||
dependencies and conflicts.
|
||||
|
||||
revuniverse is a dict mapping package tuples to their reverse
|
||||
dependencies and reverse conflicts.
|
||||
|
||||
testing is a (mutable) set of package tuples that determines
|
||||
which of the packages in universe are currently in testing.
|
||||
|
||||
broken is a (mutable) set of package tuples that are known to
|
||||
be uninstallable.
|
||||
|
||||
Package tuple: (pkg_name, pkg_version, pkg_arch)
|
||||
- NB: arch:all packages are "re-mapped" to given architecture.
|
||||
(simplifies caches and dependency checking)
|
||||
"""
|
||||
InstallabilityTester.__init__(self, universe, revuniverse, testing,
|
||||
broken, essentials, safe_set)
|
||||
|
||||
|
||||
def solve_groups(self, groups):
|
||||
sat_in_testing = self._testing.isdisjoint
|
||||
universe = self._universe
|
||||
revuniverse = self._revuniverse
|
||||
result = []
|
||||
emitted = set()
|
||||
check = set()
|
||||
order = {}
|
||||
ptable = {}
|
||||
key2item = {}
|
||||
going_out = set()
|
||||
going_in = set()
|
||||
debug_solver = 0
|
||||
|
||||
try:
|
||||
debug_solver = int(os.environ.get('BRITNEY_DEBUG', '0'))
|
||||
except:
|
||||
pass
|
||||
|
||||
# Build the tables
|
||||
for (item, adds, rms) in groups:
|
||||
key = str(item)
|
||||
key2item[key] = item
|
||||
order[key] = {'before': set(), 'after': set()}
|
||||
going_in.update(adds)
|
||||
going_out.update(rms)
|
||||
for a in adds:
|
||||
ptable[a] = key
|
||||
for r in rms:
|
||||
ptable[r] = key
|
||||
|
||||
if debug_solver > 1:
|
||||
self._dump_groups(groups)
|
||||
|
||||
# This large loop will add ordering constrains on each "item"
|
||||
# that migrates based on various rules.
|
||||
for (item, adds, rms) in groups:
|
||||
key = str(item)
|
||||
oldcons = set()
|
||||
newcons = set()
|
||||
for r in rms:
|
||||
oldcons.update(universe[r][1])
|
||||
for a in adds:
|
||||
newcons.update(universe[a][1])
|
||||
current = newcons & oldcons
|
||||
oldcons -= current
|
||||
newcons -= current
|
||||
if oldcons:
|
||||
# Some of the old binaries have "conflicts" that will
|
||||
# be removed.
|
||||
for o in ifilter_only(ptable, oldcons):
|
||||
# "key" removes a conflict with one of
|
||||
# "other"'s binaries, so it is probably a good
|
||||
# idea to migrate "key" before "other"
|
||||
other = ptable[o]
|
||||
if other == key:
|
||||
# "Self-conflicts" => ignore
|
||||
continue
|
||||
if debug_solver and other not in order[key]['before']:
|
||||
print "N: Conflict induced order: %s before %s" % (key, other)
|
||||
order[key]['before'].add(other)
|
||||
order[other]['after'].add(key)
|
||||
|
||||
for r in ifilter_only(revuniverse, rms):
|
||||
# The binaries have reverse dependencies in testing;
|
||||
# check if we can/should migrate them first.
|
||||
for rdep in revuniverse[r][0]:
|
||||
for depgroup in universe[rdep][0]:
|
||||
rigid = depgroup - going_out
|
||||
if not sat_in_testing(rigid):
|
||||
# (partly) satisfied by testing, assume it is okay
|
||||
continue
|
||||
if rdep in ptable:
|
||||
other = ptable[rdep]
|
||||
if other == key:
|
||||
# "Self-dependency" => ignore
|
||||
continue
|
||||
if debug_solver and other not in order[key]['after']:
|
||||
print "N: Removal induced order: %s before %s" % (key, other)
|
||||
order[key]['after'].add(other)
|
||||
order[other]['before'].add(key)
|
||||
|
||||
for a in adds:
|
||||
# Check if this item should migrate before others
|
||||
# (e.g. because they depend on a new [version of a]
|
||||
# binary provided by this item).
|
||||
for depgroup in universe[a][0]:
|
||||
rigid = depgroup - going_out
|
||||
if not sat_in_testing(rigid):
|
||||
# (partly) satisfied by testing, assume it is okay
|
||||
continue
|
||||
# okay - we got three cases now.
|
||||
# - "swap" (replace existing binary with a newer version)
|
||||
# - "addition" (add new binary without removing any)
|
||||
# - "removal" (remove binary without providing a new)
|
||||
#
|
||||
# The problem is that only the two latter requires
|
||||
# an ordering. A "swap" (in itself) should not
|
||||
# affect us.
|
||||
other_adds = set()
|
||||
other_rms = set()
|
||||
for d in ifilter_only(ptable, depgroup):
|
||||
if d in going_in:
|
||||
# "other" provides something "key" needs,
|
||||
# schedule accordingly.
|
||||
other = ptable[d]
|
||||
other_adds.add(other)
|
||||
else:
|
||||
# "other" removes something "key" needs,
|
||||
# schedule accordingly.
|
||||
other = ptable[d]
|
||||
other_rms.add(other)
|
||||
|
||||
for other in (other_adds - other_rms):
|
||||
if debug_solver and other != key and other not in order[key]['after']:
|
||||
print "N: Dependency induced order (add): %s before %s" % (key, other)
|
||||
order[key]['after'].add(other)
|
||||
order[other]['before'].add(key)
|
||||
|
||||
for other in (other_rms - other_adds):
|
||||
if debug_solver and other != key and other not in order[key]['before']:
|
||||
print "N: Dependency induced order (remove): %s before %s" % (key, other)
|
||||
order[key]['before'].add(other)
|
||||
order[other]['after'].add(key)
|
||||
|
||||
### MILESTONE: Partial-order constrains computed ###
|
||||
|
||||
# At this point, we have computed all the partial-order
|
||||
# constrains needed. Some of these may have created strongly
|
||||
# connected components (SSC) [of size 2 or greater], which
|
||||
# represents a group of items that (we believe) must migrate
|
||||
# together.
|
||||
#
|
||||
# Each one of those components will become an "easy" hint.
|
||||
|
||||
comps = self._compute_scc(order, ptable)
|
||||
merged = {}
|
||||
scc = {}
|
||||
# Now that we got the SSCs (in comps), we select on item from
|
||||
# each SSC to represent the group and become an ID for that
|
||||
# SSC.
|
||||
# * ssc[ssc_id] => All the items in that SSC
|
||||
# * merged[item] => The ID of the SSC to which the item belongs.
|
||||
#
|
||||
# We also "repair" the ordering, so we know in which order the
|
||||
# hints should be emitted.
|
||||
for com in comps:
|
||||
scc_id = com[0]
|
||||
scc[scc_id] = com
|
||||
merged[scc_id] = scc_id
|
||||
if len(com) > 1:
|
||||
so_before = order[scc_id]['before']
|
||||
so_after = order[scc_id]['after']
|
||||
for n in com:
|
||||
if n == scc_id:
|
||||
continue
|
||||
so_before.update(order[n]['before'])
|
||||
so_after.update(order[n]['after'])
|
||||
merged[n] = scc_id
|
||||
del order[n]
|
||||
if debug_solver:
|
||||
print "N: SCC: %s -- %s" % (scc_id, str(sorted(com)))
|
||||
|
||||
for com in comps:
|
||||
node = com[0]
|
||||
nbefore = set(merged[b] for b in order[node]['before'])
|
||||
nafter = set(merged[b] for b in order[node]['after'])
|
||||
|
||||
# Drop self-relations (usually caused by the merging)
|
||||
nbefore.discard(node)
|
||||
nafter.discard(node)
|
||||
order[node]['before'] = nbefore
|
||||
order[node]['after'] = nafter
|
||||
|
||||
|
||||
if debug_solver:
|
||||
print "N: -- PARTIAL ORDER --"
|
||||
|
||||
for com in sorted(order):
|
||||
if debug_solver and order[com]['before']:
|
||||
print "N: %s <= %s" % (com, str(sorted(order[com]['before'])))
|
||||
if not order[com]['after']:
|
||||
# This component can be scheduled immediately, add it
|
||||
# to "check"
|
||||
check.add(com)
|
||||
elif debug_solver:
|
||||
print "N: %s >= %s" % (com, str(sorted(order[com]['after'])))
|
||||
|
||||
if debug_solver:
|
||||
print "N: -- END PARTIAL ORDER --"
|
||||
print "N: -- LINEARIZED ORDER --"
|
||||
|
||||
for cur in iter_except(check.pop, KeyError):
|
||||
if order[cur]['after'] <= emitted:
|
||||
# This item is ready to be emitted right now
|
||||
if debug_solver:
|
||||
print "N: %s -- %s" % (cur, sorted(scc[cur]))
|
||||
emitted.add(cur)
|
||||
result.append([key2item[x] for x in scc[cur]])
|
||||
if order[cur]['before']:
|
||||
# There are components that come after this one.
|
||||
# Add it to "check":
|
||||
# - if it is ready, it will be emitted.
|
||||
# - else, it will be dropped and re-added later.
|
||||
check.update(order[cur]['before'] - emitted)
|
||||
|
||||
if debug_solver:
|
||||
print "N: -- END LINEARIZED ORDER --"
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _compute_scc(self, order, ptable):
|
||||
"""
|
||||
Tarjan's algorithm and topological sorting implementation in Python
|
||||
|
||||
Find the strongly connected components in a graph using
|
||||
Tarjan's algorithm.
|
||||
|
||||
by Paul Harrison
|
||||
|
||||
Public domain, do with it as you will
|
||||
"""
|
||||
|
||||
result = [ ]
|
||||
stack = [ ]
|
||||
low = { }
|
||||
|
||||
def visit(node):
|
||||
if node in low:
|
||||
return
|
||||
|
||||
num = len(low)
|
||||
low[node] = num
|
||||
stack_pos = len(stack)
|
||||
stack.append(node)
|
||||
|
||||
for successor in order[node]['before']:
|
||||
visit(successor)
|
||||
low[node] = min(low[node], low[successor])
|
||||
|
||||
if num == low[node]:
|
||||
component = tuple(stack[stack_pos:])
|
||||
del stack[stack_pos:]
|
||||
result.append(component)
|
||||
for item in component:
|
||||
low[item] = len(ptable)
|
||||
|
||||
for node in order:
|
||||
visit(node)
|
||||
|
||||
return result
|
||||
|
||||
def _dump_groups(self, groups):
|
||||
print "N: === Groups ==="
|
||||
for (item, adds, rms) in groups:
|
||||
print "N: %s => A: %s, R: %s" % (str(item), str(adds), str(rms))
|
||||
print "N: === END Groups ==="
|
||||
|
Loading…
x
Reference in New Issue
Block a user