Move to MM and rename doop_source to apply_item_to_target_suite

Signed-off-by: Niels Thykier <niels@thykier.net>
ubuntu/rebased
Niels Thykier 6 years ago
parent 4ae6a34a27
commit 28c7a052fa
No known key found for this signature in database
GPG Key ID: A65B78DBE67C7AAC

@ -204,8 +204,7 @@ from britney2.policies import PolicyVerdict
from britney2.policies.policy import AgePolicy, RCBugPolicy, PiupartsPolicy, BuildDependsPolicy from britney2.policies.policy import AgePolicy, RCBugPolicy, PiupartsPolicy, BuildDependsPolicy
from britney2.policies.autopkgtest import AutopkgtestPolicy from britney2.policies.autopkgtest import AutopkgtestPolicy
from britney2.transaction import start_transaction from britney2.transaction import start_transaction
from britney2.utils import (log_and_format_old_libraries, from britney2.utils import (log_and_format_old_libraries, get_dependency_solvers,
compute_reverse_tree, get_dependency_solvers,
read_nuninst, write_nuninst, write_heidi, read_nuninst, write_nuninst, write_heidi,
format_and_log_uninst, newly_uninst, make_migrationitem, format_and_log_uninst, newly_uninst, make_migrationitem,
write_excuses, write_heidi_delta, write_controlfiles, write_excuses, write_heidi_delta, write_controlfiles,
@ -1490,169 +1489,6 @@ class Britney(object):
res.append("%s-%d" % (arch[0], n)) res.append("%s-%d" % (arch[0], n))
return "%d+%d: %s" % (total, totalbreak, ":".join(res)) return "%d+%d: %s" % (total, totalbreak, ":".join(res))
def doop_source(self, item, transaction, removals=frozenset()):
"""Apply a change to the target suite as requested by `item`
A transaction in which all changes will be recorded. Can be None (e.g.
during a "force-hint"), when the changes will not be rolled back.
An optional set of binaries may be passed in "removals". Binaries listed
in this set will be assumed to be removed at the same time as the "item"
will migrate. This may change what binaries will be smooth-updated.
- Binaries in this set must be instances of BinaryPackageId.
This method applies the changes required by the action `item` tracking
them so it will be possible to revert them.
The method returns a tuple containing a set of packages
affected by the change (as (name, arch)-tuples) and the
dictionary undo which can be used to rollback the changes.
"""
undo = {'binaries': {}, 'sources': {}, 'virtual': {}, 'nvirtual': []}
affected_direct = set()
updated_binaries = set()
# local copies for better performance
source_suite = item.suite
target_suite = self.suite_info.target_suite
packages_t = target_suite.binaries
provides_t = target_suite.provides_table
pkg_universe = self.pkg_universe
eqv_set = set()
mm = self._migration_manager
updates, rms, _ = mm._compute_groups(item, removals=removals)
# Handle the source package
if item.architecture == 'source':
sources_t = target_suite.sources
if item.package in sources_t:
source = sources_t[item.package]
undo['sources'][item.package] = source
del sources_t[item.package]
else:
# the package didn't exist, so we mark it as to-be-removed in case of undo
undo['sources']['-' + item.package] = True
# add/update the source package
if not item.is_removal:
sources_t[item.package] = source_suite.sources[item.package]
# If we are removing *and* updating packages, then check for eqv. packages
if rms and updates:
eqv_table = {}
for rm_pkg_id in rms:
binary, _, parch = rm_pkg_id
key = (binary, parch)
eqv_table[key] = rm_pkg_id
for new_pkg_id in updates:
binary, _, parch = new_pkg_id
key = (binary, parch)
old_pkg_id = eqv_table.get(key)
if old_pkg_id is not None:
if pkg_universe.are_equivalent(new_pkg_id, old_pkg_id):
eqv_set.add(key)
# remove all the binaries which aren't being smooth updated
for rm_pkg_id in rms:
binary, version, parch = rm_pkg_id
pkey = (binary, parch)
binaries_t_a = packages_t[parch]
provides_t_a = provides_t[parch]
pkg_data = binaries_t_a[binary]
# save the old binary for undo
undo['binaries'][pkey] = rm_pkg_id
if pkey not in eqv_set:
# all the reverse dependencies are affected by
# the change
affected_direct.update(pkg_universe.reverse_dependencies_of(rm_pkg_id))
affected_direct.update(pkg_universe.negative_dependencies_of(rm_pkg_id))
# remove the provided virtual packages
for provided_pkg, prov_version, _ in pkg_data.provides:
key = (provided_pkg, parch)
if key not in undo['virtual']:
undo['virtual'][key] = provides_t_a[provided_pkg].copy()
provides_t_a[provided_pkg].remove((binary, prov_version))
if not provides_t_a[provided_pkg]:
del provides_t_a[provided_pkg]
# finally, remove the binary package
del binaries_t_a[binary]
target_suite.remove_binary(rm_pkg_id)
# Add/Update binary packages in testing
if updates:
packages_s = source_suite.binaries
for updated_pkg_id in updates:
binary, new_version, parch = updated_pkg_id
key = (binary, parch)
binaries_t_a = packages_t[parch]
provides_t_a = provides_t[parch]
equivalent_replacement = key in eqv_set
# obviously, added/modified packages are affected
if not equivalent_replacement:
affected_direct.add(updated_pkg_id)
# if the binary already exists in testing, it is currently
# built by another source package. we therefore remove the
# version built by the other source package, after marking
# all of its reverse dependencies as affected
if binary in binaries_t_a:
old_pkg_data = binaries_t_a[binary]
old_pkg_id = old_pkg_data.pkg_id
# save the old binary package
undo['binaries'][key] = old_pkg_id
if not equivalent_replacement:
# all the reverse conflicts
affected_direct.update(pkg_universe.reverse_dependencies_of(old_pkg_id))
target_suite.remove_binary(old_pkg_id)
elif transaction and transaction.parent_transaction:
# the binary isn't in the target suite, but it may have been at
# the start of the current hint and have been removed
# by an earlier migration. if that's the case then we
# will have a record of the older instance of the binary
# in the undo information. we can use that to ensure
# that the reverse dependencies of the older binary
# package are also checked.
# reverse dependencies built from this source can be
# ignored as their reverse trees are already handled
# by this function
for (tundo, tpkg) in transaction.parent_transaction.undo_items:
if key in tundo['binaries']:
tpkg_id = tundo['binaries'][key]
affected_direct.update(pkg_universe.reverse_dependencies_of(tpkg_id))
# add/update the binary package from the source suite
new_pkg_data = packages_s[parch][binary]
binaries_t_a[binary] = new_pkg_data
target_suite.add_binary(updated_pkg_id)
updated_binaries.add(updated_pkg_id)
# register new provided packages
for provided_pkg, prov_version, _ in new_pkg_data.provides:
key = (provided_pkg, parch)
if provided_pkg not in provides_t_a:
undo['nvirtual'].append(key)
provides_t_a[provided_pkg] = set()
elif key not in undo['virtual']:
undo['virtual'][key] = provides_t_a[provided_pkg].copy()
provides_t_a[provided_pkg].add((binary, prov_version))
if not equivalent_replacement:
# all the reverse dependencies are affected by the change
affected_direct.add(updated_pkg_id)
affected_direct.update(pkg_universe.negative_dependencies_of(updated_pkg_id))
# Also include the transitive rdeps of the packages found so far
affected_all = affected_direct.copy()
compute_reverse_tree(pkg_universe, affected_all)
if transaction:
transaction.add_undo_item(undo, updated_binaries)
# return the affected packages (direct and than all)
return (affected_direct, affected_all)
def try_migration(self, actions, nuninst_now, transaction, stop_on_first_regression=True): def try_migration(self, actions, nuninst_now, transaction, stop_on_first_regression=True):
is_accepted = True is_accepted = True
affected_architectures = set() affected_architectures = set()
@ -1664,11 +1500,12 @@ class Britney(object):
new_arches = self.options.new_arches new_arches = self.options.new_arches
break_arches = self.options.break_arches break_arches = self.options.break_arches
arch = None arch = None
mm = self._migration_manager
if len(actions) == 1: if len(actions) == 1:
item = actions[0] item = actions[0]
# apply the changes # apply the changes
affected_direct, affected_all = self.doop_source(item, transaction) affected_direct, affected_all = mm._apply_item_to_suite(item, transaction)
if item.architecture == 'source': if item.architecture == 'source':
affected_architectures = set(self.options.architectures) affected_architectures = set(self.options.architectures)
else: else:
@ -1677,7 +1514,6 @@ class Britney(object):
removals = set() removals = set()
affected_direct = set() affected_direct = set()
affected_all = set() affected_all = set()
mm = self._migration_manager
for item in actions: for item in actions:
_, rms, _ = mm._compute_groups(item, allow_smooth_updates=False) _, rms, _ = mm._compute_groups(item, allow_smooth_updates=False)
removals.update(rms) removals.update(rms)
@ -1687,9 +1523,9 @@ class Britney(object):
affected_architectures = set(self.options.architectures) affected_architectures = set(self.options.architectures)
for item in actions: for item in actions:
item_affected_direct, item_affected_all = self.doop_source(item, item_affected_direct, item_affected_all = mm._apply_item_to_suite(item,
transaction, transaction,
removals=removals) removals=removals)
affected_direct.update(item_affected_direct) affected_direct.update(item_affected_direct)
affected_all.update(item_affected_all) affected_all.update(item_affected_all)

@ -1,6 +1,6 @@
import apt_pkg import apt_pkg
from britney2.utils import MigrationConstraintException, find_smooth_updateable_binaries from britney2.utils import MigrationConstraintException, compute_reverse_tree, find_smooth_updateable_binaries
class MigrationManager(object): class MigrationManager(object):
@ -159,3 +159,165 @@ class MigrationManager(object):
adds.add(pkg_id) adds.add(pkg_id)
return (adds, rms, smoothbins) return (adds, rms, smoothbins)
def _apply_item_to_suite(self, item, transaction, removals=frozenset()):
"""Apply a change to the target suite as requested by `item`
A transaction in which all changes will be recorded. Can be None (e.g.
during a "force-hint"), when the changes will not be rolled back.
An optional set of binaries may be passed in "removals". Binaries listed
in this set will be assumed to be removed at the same time as the "item"
will migrate. This may change what binaries will be smooth-updated.
- Binaries in this set must be instances of BinaryPackageId.
This method applies the changes required by the action `item` tracking
them so it will be possible to revert them.
The method returns a tuple containing a set of packages
affected by the change (as (name, arch)-tuples) and the
dictionary undo which can be used to rollback the changes.
"""
undo = {'binaries': {}, 'sources': {}, 'virtual': {}, 'nvirtual': []}
affected_direct = set()
updated_binaries = set()
# local copies for better performance
source_suite = item.suite
target_suite = self.suite_info.target_suite
packages_t = target_suite.binaries
provides_t = target_suite.provides_table
pkg_universe = self.pkg_universe
eqv_set = set()
updates, rms, _ = self._compute_groups(item, removals=removals)
# Handle the source package
if item.architecture == 'source':
sources_t = target_suite.sources
if item.package in sources_t:
source = sources_t[item.package]
undo['sources'][item.package] = source
del sources_t[item.package]
else:
# the package didn't exist, so we mark it as to-be-removed in case of undo
undo['sources']['-' + item.package] = True
# add/update the source package
if not item.is_removal:
sources_t[item.package] = source_suite.sources[item.package]
# If we are removing *and* updating packages, then check for eqv. packages
if rms and updates:
eqv_table = {}
for rm_pkg_id in rms:
binary, _, parch = rm_pkg_id
key = (binary, parch)
eqv_table[key] = rm_pkg_id
for new_pkg_id in updates:
binary, _, parch = new_pkg_id
key = (binary, parch)
old_pkg_id = eqv_table.get(key)
if old_pkg_id is not None:
if pkg_universe.are_equivalent(new_pkg_id, old_pkg_id):
eqv_set.add(key)
# remove all the binaries which aren't being smooth updated
for rm_pkg_id in rms:
binary, version, parch = rm_pkg_id
pkey = (binary, parch)
binaries_t_a = packages_t[parch]
provides_t_a = provides_t[parch]
pkg_data = binaries_t_a[binary]
# save the old binary for undo
undo['binaries'][pkey] = rm_pkg_id
if pkey not in eqv_set:
# all the reverse dependencies are affected by
# the change
affected_direct.update(pkg_universe.reverse_dependencies_of(rm_pkg_id))
affected_direct.update(pkg_universe.negative_dependencies_of(rm_pkg_id))
# remove the provided virtual packages
for provided_pkg, prov_version, _ in pkg_data.provides:
key = (provided_pkg, parch)
if key not in undo['virtual']:
undo['virtual'][key] = provides_t_a[provided_pkg].copy()
provides_t_a[provided_pkg].remove((binary, prov_version))
if not provides_t_a[provided_pkg]:
del provides_t_a[provided_pkg]
# finally, remove the binary package
del binaries_t_a[binary]
target_suite.remove_binary(rm_pkg_id)
# Add/Update binary packages in testing
if updates:
packages_s = source_suite.binaries
for updated_pkg_id in updates:
binary, new_version, parch = updated_pkg_id
key = (binary, parch)
binaries_t_a = packages_t[parch]
provides_t_a = provides_t[parch]
equivalent_replacement = key in eqv_set
# obviously, added/modified packages are affected
if not equivalent_replacement:
affected_direct.add(updated_pkg_id)
# if the binary already exists in testing, it is currently
# built by another source package. we therefore remove the
# version built by the other source package, after marking
# all of its reverse dependencies as affected
if binary in binaries_t_a:
old_pkg_data = binaries_t_a[binary]
old_pkg_id = old_pkg_data.pkg_id
# save the old binary package
undo['binaries'][key] = old_pkg_id
if not equivalent_replacement:
# all the reverse conflicts
affected_direct.update(pkg_universe.reverse_dependencies_of(old_pkg_id))
target_suite.remove_binary(old_pkg_id)
elif transaction and transaction.parent_transaction:
# the binary isn't in the target suite, but it may have been at
# the start of the current hint and have been removed
# by an earlier migration. if that's the case then we
# will have a record of the older instance of the binary
# in the undo information. we can use that to ensure
# that the reverse dependencies of the older binary
# package are also checked.
# reverse dependencies built from this source can be
# ignored as their reverse trees are already handled
# by this function
for (tundo, tpkg) in transaction.parent_transaction.undo_items:
if key in tundo['binaries']:
tpkg_id = tundo['binaries'][key]
affected_direct.update(pkg_universe.reverse_dependencies_of(tpkg_id))
# add/update the binary package from the source suite
new_pkg_data = packages_s[parch][binary]
binaries_t_a[binary] = new_pkg_data
target_suite.add_binary(updated_pkg_id)
updated_binaries.add(updated_pkg_id)
# register new provided packages
for provided_pkg, prov_version, _ in new_pkg_data.provides:
key = (provided_pkg, parch)
if provided_pkg not in provides_t_a:
undo['nvirtual'].append(key)
provides_t_a[provided_pkg] = set()
elif key not in undo['virtual']:
undo['virtual'][key] = provides_t_a[provided_pkg].copy()
provides_t_a[provided_pkg].add((binary, prov_version))
if not equivalent_replacement:
# all the reverse dependencies are affected by the change
affected_direct.add(updated_pkg_id)
affected_direct.update(pkg_universe.negative_dependencies_of(updated_pkg_id))
# Also include the transitive rdeps of the packages found so far
affected_all = affected_direct.copy()
compute_reverse_tree(pkg_universe, affected_all)
if transaction:
transaction.add_undo_item(undo, updated_binaries)
# return the affected packages (direct and than all)
return (affected_direct, affected_all)

Loading…
Cancel
Save