mirror of
https://git.launchpad.net/~ubuntu-release/britney/+git/britney2-ubuntu
synced 2025-04-15 13:11:16 +00:00
Autopkgtest: Collect results for requested tests before submitting
When we need to blow away and rebuild results.cache we want to avoid re-triggering all tests. Thus collect already existing results for requested tests before submitting new requests. This is rather hackish now, as fetch_one_result() now has to deal with both self.requested_tests and self.pending_tests. The code should be refactored to eliminate one of these maps.
This commit is contained in:
parent
19cd69cb47
commit
9b70fe361d
@ -418,38 +418,39 @@ class AutoPackageTest(object):
|
||||
|
||||
# remove matching test requests, remember triggers
|
||||
satisfied_triggers = set()
|
||||
for pending_ver, pending_archinfo in self.pending_tests.get(src, {}).copy().items():
|
||||
# don't consider newer requested versions
|
||||
if apt_pkg.version_compare(pending_ver, ver) > 0:
|
||||
continue
|
||||
for request_map in [self.requested_tests, self.pending_tests]:
|
||||
for pending_ver, pending_archinfo in request_map.get(src, {}).copy().items():
|
||||
# don't consider newer requested versions
|
||||
if apt_pkg.version_compare(pending_ver, ver) > 0:
|
||||
continue
|
||||
|
||||
if result_triggers:
|
||||
# explicitly recording/retrieving test triggers is the
|
||||
# preferred (and robust) way of matching results to pending
|
||||
# requests
|
||||
for result_trigger in result_triggers:
|
||||
if result_triggers:
|
||||
# explicitly recording/retrieving test triggers is the
|
||||
# preferred (and robust) way of matching results to pending
|
||||
# requests
|
||||
for result_trigger in result_triggers:
|
||||
try:
|
||||
request_map[src][pending_ver][arch].remove(result_trigger)
|
||||
self.log_verbose('-> matches pending request %s/%s/%s for trigger %s' %
|
||||
(src, pending_ver, arch, str(result_trigger)))
|
||||
satisfied_triggers.add(result_trigger)
|
||||
except (KeyError, ValueError):
|
||||
self.log_verbose('-> does not match any pending request for %s/%s/%s' %
|
||||
(src, pending_ver, arch))
|
||||
else:
|
||||
# ... but we still need to support results without
|
||||
# testinfo.json and recorded triggers until we stop caring about
|
||||
# existing wily and trusty results; match the latest result to all
|
||||
# triggers for src that have at least the requested version
|
||||
try:
|
||||
self.pending_tests[src][pending_ver][arch].remove(result_trigger)
|
||||
self.log_verbose('-> matches pending request %s/%s/%s for trigger %s' %
|
||||
(src, pending_ver, arch, str(result_trigger)))
|
||||
satisfied_triggers.add(result_trigger)
|
||||
except (KeyError, ValueError):
|
||||
self.log_verbose('-> does not match any pending request for %s/%s/%s' %
|
||||
(src, pending_ver, arch))
|
||||
else:
|
||||
# ... but we still need to support results without
|
||||
# testinfo.json and recorded triggers until we stop caring about
|
||||
# existing wily and trusty results; match the latest result to all
|
||||
# triggers for src that have at least the requested version
|
||||
try:
|
||||
t = pending_archinfo[arch]
|
||||
self.log_verbose('-> matches pending request %s/%s for triggers %s' %
|
||||
(src, pending_ver, str(t)))
|
||||
satisfied_triggers.update(t)
|
||||
del self.pending_tests[src][pending_ver][arch]
|
||||
except KeyError:
|
||||
self.log_verbose('-> does not match any pending request for %s/%s' %
|
||||
(src, pending_ver))
|
||||
t = pending_archinfo[arch]
|
||||
self.log_verbose('-> matches pending request %s/%s for triggers %s' %
|
||||
(src, pending_ver, str(t)))
|
||||
satisfied_triggers.update(t)
|
||||
del request_map[src][pending_ver][arch]
|
||||
except KeyError:
|
||||
self.log_verbose('-> does not match any pending request for %s/%s' %
|
||||
(src, pending_ver))
|
||||
|
||||
# FIXME: this is a hack that mostly applies to re-running tests
|
||||
# manually without giving a trigger. Tests which don't get
|
||||
@ -581,10 +582,24 @@ class AutoPackageTest(object):
|
||||
# mark them as pending now
|
||||
self.update_pending_tests()
|
||||
|
||||
def collect_requested(self):
|
||||
'''Update results from swift for all requested packages
|
||||
|
||||
This is normally redundant with collect(), but avoids actually
|
||||
sending test requests if results are already available. This mostly
|
||||
happens when you have to blow away results.cache and let it rebuild
|
||||
from scratch.
|
||||
'''
|
||||
for pkg, verinfo in copy.deepcopy(self.requested_tests).items():
|
||||
for archinfo in verinfo.values():
|
||||
for arch in archinfo:
|
||||
self.fetch_swift_results(self.britney.options.adt_swift_url, pkg, arch)
|
||||
|
||||
def collect(self, packages):
|
||||
# update results from swift for all packages that we are waiting
|
||||
# for, and remove pending tests that we have results for on all
|
||||
# arches
|
||||
'''Update results from swift for all pending packages
|
||||
|
||||
Remove pending tests for which we have results.
|
||||
'''
|
||||
for pkg, verinfo in copy.deepcopy(self.pending_tests).items():
|
||||
for archinfo in verinfo.values():
|
||||
for arch in archinfo:
|
||||
|
@ -1902,6 +1902,7 @@ class Britney(object):
|
||||
autopkgtest_packages.append((e.name, e.ver[1]))
|
||||
autopkgtest.request(autopkgtest_packages, autopkgtest_excludes)
|
||||
if not self.options.dry_run:
|
||||
autopkgtest.collect_requested()
|
||||
autopkgtest.submit()
|
||||
autopkgtest.collect(autopkgtest_packages)
|
||||
cloud_url = "http://autopkgtest.ubuntu.com/packages/%(h)s/%(s)s/%(r)s/%(a)s"
|
||||
|
@ -365,7 +365,11 @@ lightgreen 1 i386 green 2
|
||||
{'green': [('old-version', '1'), ('new-version', '2')]}
|
||||
)[0]
|
||||
|
||||
# we already had all results before the run, so this should not trigger
|
||||
# any new requests
|
||||
self.assertEqual(self.amqp_requests, set())
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
|
||||
# not expecting any failures to retrieve from swift
|
||||
self.assertNotIn('Failure', out, out)
|
||||
|
||||
@ -542,7 +546,7 @@ lightgreen 1 i386 green 2
|
||||
}
|
||||
)
|
||||
|
||||
self.assertEqual(len(self.amqp_requests), 6)
|
||||
self.assertEqual(self.amqp_requests, set())
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
|
||||
# next run should not trigger any new requests
|
||||
@ -551,14 +555,22 @@ lightgreen 1 i386 green 2
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
|
||||
# now lightgreen 2 gets built, should trigger a new test run
|
||||
self.swift.set_results({'autopkgtest-series': {
|
||||
'series/i386/l/lightgreen/20150101_100200@': (0, 'lightgreen 2'),
|
||||
'series/amd64/l/lightgreen/20150101_102000@': (0, 'lightgreen 2'),
|
||||
}})
|
||||
self.data.remove_all(True)
|
||||
self.do_test(
|
||||
[('libgreen1', {'Version': '1.1', 'Source': 'green', 'Depends': 'libc6'}, 'autopkgtest'),
|
||||
('lightgreen', {'Version': '2'}, 'autopkgtest')],
|
||||
{})
|
||||
self.assertEqual(self.amqp_requests,
|
||||
set(['debci-series-amd64:lightgreen {"triggers": ["lightgreen/2"]}',
|
||||
'debci-series-i386:lightgreen {"triggers": ["lightgreen/2"]}']))
|
||||
|
||||
# next run collects the results
|
||||
self.swift.set_results({'autopkgtest-series': {
|
||||
'series/i386/l/lightgreen/20150101_100200@': (0, 'lightgreen 2'),
|
||||
'series/amd64/l/lightgreen/20150101_102000@': (0, 'lightgreen 2'),
|
||||
}})
|
||||
self.do_test(
|
||||
[],
|
||||
{'green': (True, {'green 1.1': {'amd64': 'PASS', 'i386': 'PASS'},
|
||||
# FIXME: expecting a lightgreen test here
|
||||
# 'lightgreen 2': {'amd64': 'PASS', 'i386': 'PASS'},
|
||||
@ -570,9 +582,7 @@ lightgreen 1 i386 green 2
|
||||
'lightgreen': [('old-version', '1'), ('new-version', '2')],
|
||||
}
|
||||
)
|
||||
self.assertEqual(self.amqp_requests,
|
||||
set(['debci-series-amd64:lightgreen {"triggers": ["lightgreen/2"]}',
|
||||
'debci-series-i386:lightgreen {"triggers": ["lightgreen/2"]}']))
|
||||
self.assertEqual(self.amqp_requests, set())
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
|
||||
def test_rdepends_unbuilt_unstable_only(self):
|
||||
@ -646,7 +656,7 @@ lightgreen 1 i386 green 2
|
||||
('excuses', 'lightgreen has no up-to-date binaries on any arch')]
|
||||
}
|
||||
)
|
||||
self.assertEqual(len(self.amqp_requests), 6)
|
||||
self.assertEqual(self.amqp_requests, set())
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
|
||||
# lightgreen 2 stays unbuilt in britney, but we get a test result for it
|
||||
@ -896,7 +906,7 @@ newgreen 2 i386 newgreen 2
|
||||
},
|
||||
{'newgreen': [('old-version', '-'), ('new-version', '2')]})
|
||||
|
||||
self.assertEqual(len(self.amqp_requests), 6)
|
||||
self.assertEqual(self.amqp_requests, set())
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
|
||||
def test_result_from_older_version(self):
|
||||
@ -965,7 +975,7 @@ newgreen 2 i386 newgreen 2
|
||||
}),
|
||||
})
|
||||
|
||||
self.assertEqual(len(self.amqp_requests), 6)
|
||||
self.assertEqual(self.amqp_requests, set())
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
self.data.remove_all(True)
|
||||
|
||||
@ -1129,6 +1139,7 @@ lightgreen 1 i386 green 3
|
||||
}),
|
||||
})
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
self.assertEqual(self.amqp_requests, set())
|
||||
|
||||
# remove new lightgreen by resetting archive indexes, and re-adding
|
||||
# green
|
||||
@ -1161,9 +1172,7 @@ lightgreen 1 i386 green 3
|
||||
|
||||
# should not trigger new requests
|
||||
self.assertEqual(self.pending_requests, '')
|
||||
self.assertEqual(self.amqp_requests,
|
||||
set(['debci-series-amd64:lightgreen {"triggers": ["green/2"]}',
|
||||
'debci-series-i386:lightgreen {"triggers": ["green/2"]}']))
|
||||
self.assertEqual(self.amqp_requests, set())
|
||||
|
||||
# but the next run should not trigger anything new
|
||||
self.do_test(
|
||||
|
Loading…
x
Reference in New Issue
Block a user