mirror of
https://git.launchpad.net/livecd-rootfs
synced 2025-08-21 13:44:08 +00:00
Compare commits
235 Commits
ubuntu/mas
...
2.408.56
Author | SHA1 | Date | |
---|---|---|---|
|
1f4b996077 | ||
|
baf2b5bcdd | ||
|
2a5370e7b6 | ||
|
53e8eeaee3 | ||
|
24e1ea3bc2 | ||
|
27ef9b07b4 | ||
|
684c0c6888 | ||
|
6049019a8b | ||
|
ca8e2b2c7b | ||
|
ca945b9a34 | ||
|
566b3f3a3e | ||
|
badff9dcb7 | ||
|
ae9b91d124 | ||
|
c3d1a92933 | ||
|
8f8ea8922e | ||
|
9bea8296ee | ||
|
313fd0af9b | ||
|
2d1ee4da19 | ||
|
fa98de9a7d | ||
|
39db5175d6 | ||
|
2a568381e3 | ||
|
6e73a2b3a6 | ||
|
76527b27cf | ||
|
63b3bd10bc | ||
|
4c113d4906 | ||
|
c10042acb0 | ||
|
996d7e20d5 | ||
|
75a3b3e6ae | ||
|
aafa682254 | ||
|
77ae8d704f | ||
|
9fd7370758 | ||
|
6db40c7d93 | ||
|
64985baafd | ||
|
8003f3a395 | ||
|
4e77d07a6c | ||
|
518b18db0f | ||
|
5ca9cb6c3c | ||
|
ce058ad359 | ||
|
493035c7d2 | ||
|
a3a7f047d0 | ||
|
d7b59e5df3 | ||
|
def18aa7f7 | ||
|
8414b699da | ||
|
086c93aa28 | ||
|
6c9fc6f3ec | ||
|
6e9d7f35ca | ||
|
aec5f3666a | ||
|
5fca228f83 | ||
|
657500f59f | ||
|
205a77bbd9 | ||
|
e3a00b65b1 | ||
|
f4a3f469d7 | ||
|
1f05fbea69 | ||
|
c381065c00 | ||
|
fef73e9a90 | ||
|
fd02a30830 | ||
|
b1aa0279f6 | ||
|
f292ab1847 | ||
|
1f990b1bf2 | ||
|
29324714c0 | ||
|
0ae8b1fc0e | ||
|
1ed68b0b6d | ||
|
f64e3c9981 | ||
|
91ba932fec | ||
|
8e8fb0bf09 | ||
|
49d1dce530 | ||
|
872d4f6e7f | ||
|
3291e971de | ||
|
64e90c0cca | ||
|
b8b2e34382 | ||
|
706685a5a2 | ||
|
11a9a25fbb | ||
|
903ec4659a | ||
|
f7eed7977c | ||
|
fe7226cff2 | ||
|
21b2f1fa46 | ||
|
badc9aa7d1 | ||
|
aadb032e68 | ||
|
b882fed343 | ||
|
928a9e6c71 | ||
|
7349c0b322 | ||
|
4a759235a8 | ||
|
6a8dcc2bfe | ||
|
67d979da0d | ||
|
809028f239 | ||
|
8e38e80cd6 | ||
|
d6c5d54dfa | ||
|
b925874fe0 | ||
|
8e53c77a11 | ||
|
c7878bdc76 | ||
|
f4424e223e | ||
|
7757190789 | ||
|
2400f4cfec | ||
|
c482ce867f | ||
|
4fe56fe700 | ||
|
0c41250141 | ||
|
e8a9bb58ab | ||
|
18c8049a23 | ||
|
3a068b6b62 | ||
|
6e65da4968 | ||
|
1198bb65d4 | ||
|
94314872ba | ||
|
4e83114045 | ||
|
8044452929 | ||
|
92026e3a26 | ||
|
51bff098b7 | ||
|
8cc5fb43fa | ||
|
8a230baa24 | ||
|
972a9dcc7d | ||
|
e10db2de53 | ||
|
f260665f8e | ||
|
7854028092 | ||
|
382038c703 | ||
|
7e458f4f15 | ||
|
9777b39aea | ||
|
cbf0a7c417 | ||
|
646a88b5ee | ||
|
7cc9576845 | ||
|
4a7b5f2f77 | ||
|
580aa70969 | ||
|
03e8249758 | ||
|
492200ba16 | ||
|
9ba235936b | ||
|
38a7739cf5 | ||
|
9f56606604 | ||
|
67dab61787 | ||
|
5da7c23c81 | ||
|
6c56522961 | ||
|
1c42ecba1b | ||
|
cb73e2475e | ||
|
3d6660594d | ||
|
d5c4f11daa | ||
|
813eb816ca | ||
|
22a8f817ca | ||
|
13c0c8ffd5 | ||
|
cc3cb4e0d8 | ||
|
b2fba90356 | ||
|
b66cc28507 | ||
|
2a70314f27 | ||
|
0987c6a7ea | ||
|
04ffcc07dc | ||
|
60df0277fd | ||
|
4a4bac20f6 | ||
|
dfb4c593b1 | ||
|
2e3ca4b5a9 | ||
|
87d69d902a | ||
|
aee9079732 | ||
|
53f5c1c79c | ||
|
06cdc3f46d | ||
|
413a53d482 | ||
|
0900d20265 | ||
|
88d854ffb5 | ||
|
b90b04a7b7 | ||
|
d33ab3825f | ||
|
febe06642c | ||
|
4d7509f570 | ||
|
3f9753b2c0 | ||
|
86ed851b5a | ||
|
b32298ede1 | ||
|
1ca59f1c3a | ||
|
a40bd1d55b | ||
|
8244beb6d1 | ||
|
93fc7c56f2 | ||
|
e5cbd2384a | ||
|
98f27745b7 | ||
|
e9fc1d8c9f | ||
|
5e1d0f0ee7 | ||
|
9dd178a0e5 | ||
|
c2d66c7d96 | ||
|
bd2a11774f | ||
|
cd15b730f7 | ||
|
5a6f68a1a9 | ||
|
86108d69d9 | ||
|
95b06ff341 | ||
|
527a7b165e | ||
|
9c9e8c4e4f | ||
|
f16611774f | ||
|
2b208d7287 | ||
|
13e9dc0089 | ||
|
d330c595ae | ||
|
410703eae5 | ||
|
a623618e2c | ||
|
cf1b95b854 | ||
|
6219443b4c | ||
|
0acda846dd | ||
|
f6897fae71 | ||
|
a99dd8bf9f | ||
|
2c98112f93 | ||
|
11a92795fe | ||
|
b81173b1b5 | ||
|
3fdf3933ed | ||
|
314a6b95a3 | ||
|
880d4f8b89 | ||
|
95aabdf668 | ||
|
a4988ccf13 | ||
|
a75544e0c3 | ||
|
f48670cec5 | ||
|
e3b44822d7 | ||
|
0cabfc3781 | ||
|
02223103c1 | ||
|
8d72b588a1 | ||
|
2569995de8 | ||
|
0fcff1923c | ||
|
2685b836bd | ||
|
95c239f301 | ||
|
e25936cc61 | ||
|
2048477b0d | ||
|
b5bb3c3a6e | ||
|
5081c333bb | ||
|
2c9f9dd323 | ||
|
a0983db864 | ||
|
d50b3ba529 | ||
|
e6793cfa3a | ||
|
28e14a0b77 | ||
|
dacc1ea41e | ||
|
ececc9789d | ||
|
92a10c0df0 | ||
|
4bc9def5d6 | ||
|
188d485441 | ||
|
f2f8665598 | ||
|
40a4d16f3e | ||
|
66aaa4ab40 | ||
|
58dd50b464 | ||
|
f10c47f913 | ||
|
a4f597c84b | ||
|
24c2b2773f | ||
|
8f735f316b | ||
|
5efb2007af | ||
|
8bcc5ca1c2 | ||
|
455d0a6af4 | ||
|
6b04c07371 | ||
|
4079a1acce | ||
|
d51fef0426 | ||
|
b0e649a83c | ||
|
caedaee057 |
536
debian/changelog
vendored
536
debian/changelog
vendored
@ -1,3 +1,539 @@
|
||||
livecd-rootfs (2.408.56) xenial; urgency=medium
|
||||
|
||||
* Backport enabling of adding extra snaps to ubuntu-core images via the
|
||||
EXTRA_SNAPS environment (LP: #1849491).
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Wed, 30 Oct 2019 17:28:57 +0100
|
||||
|
||||
livecd-rootfs (2.408.55) xenial; urgency=medium
|
||||
|
||||
* magic-proxy: dump proxy log to stdout on failure (LP: #1847300)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Tue, 08 Oct 2019 11:02:08 -0500
|
||||
|
||||
livecd-rootfs (2.408.54) xenial; urgency=medium
|
||||
|
||||
* Revert exclusion of makedev from buildd chroots, as it turned out not to
|
||||
be the problem. Instead, fix up /dev/ptmx to be a character device node
|
||||
rather than a symlink to /dev/pts/ptmx, in line with the discussion in
|
||||
https://bugs.debian.org/817236; I think this is safer than
|
||||
cherry-picking the fix to debootstrap at this point in a stable release
|
||||
cycle (LP: #1844504).
|
||||
|
||||
-- Colin Watson <cjwatson@ubuntu.com> Thu, 26 Sep 2019 10:53:23 +0100
|
||||
|
||||
livecd-rootfs (2.408.53) xenial; urgency=medium
|
||||
|
||||
* Fix exclusion of makedev from buildd chroots; debootstrap doesn't
|
||||
respect --exclude for "Priority: required" packages, so we have to purge
|
||||
makedev later (LP: #1844504).
|
||||
|
||||
-- Colin Watson <cjwatson@ubuntu.com> Tue, 24 Sep 2019 15:05:26 +0100
|
||||
|
||||
livecd-rootfs (2.408.52) xenial; urgency=medium
|
||||
|
||||
* Exclude makedev from buildd chroots, since it was historically excluded
|
||||
and apparently breaks some builds (e.g. mir; LP: #1844504).
|
||||
|
||||
-- Colin Watson <cjwatson@ubuntu.com> Wed, 18 Sep 2019 11:01:45 +0200
|
||||
|
||||
livecd-rootfs (2.408.51) xenial; urgency=medium
|
||||
|
||||
* ubuntu-cpc: Only produce explicitly specified artifacts (LP: #1837254)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Mon, 26 Aug 2019 16:32:41 -0500
|
||||
|
||||
livecd-rootfs (2.408.50) xenial; urgency=medium
|
||||
|
||||
* Actually, do not depend on snapd on powerpc as well. Snaps are not
|
||||
supported on this platform at all. (LP: #1830823)
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Tue, 25 Jun 2019 22:54:10 +0200
|
||||
|
||||
livecd-rootfs (2.408.49) xenial; urgency=medium
|
||||
|
||||
* Add explicit dependency on python3-yaml, which is used in the code but
|
||||
was previously pulled in indirectly via ubuntu-image. LP: #1830823.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Thu, 30 May 2019 14:20:30 -0700
|
||||
|
||||
livecd-rootfs (2.408.48) xenial; urgency=medium
|
||||
|
||||
* Do not depend on ubuntu-image on powerpc as it is not a supported platform
|
||||
for building ubuntu-core images (and we don't build classic preinstalled
|
||||
images on xenial). (LP: #1830823)
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Wed, 29 May 2019 18:50:31 +0200
|
||||
|
||||
livecd-rootfs (2.408.47) xenial; urgency=medium
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* ubuntu-cpc: parallel builds (LP: #1829938)
|
||||
- Inject a proxy into the build providing a snapshot view of the package repo.
|
||||
- Use series files with dependency handling to generate hook symlinks dynamically
|
||||
* ubuntu-cpc: Don't ignore failures to find a base VMDK for OVAs
|
||||
- Backport of c1a36eb3cdd07b5180d7f9cfe1748fcdb21261c7
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Tue, 21 May 2019 17:19:42 -0700
|
||||
|
||||
livecd-rootfs (2.408.46) xenial; urgency=medium
|
||||
|
||||
* Build WSL rootfs tarball (LP: #1827930)
|
||||
|
||||
-- Balint Reczey <rbalint@ubuntu.com> Fri, 17 May 2019 15:14:38 +0200
|
||||
|
||||
livecd-rootfs (2.408.45) xenial; urgency=medium
|
||||
|
||||
* Drop /etc/update-motd.d/51-cloudguest from cloud images; this is not
|
||||
consistent with current Ubuntu Advantage product language. Any future
|
||||
customizations to update-motd for cloud images should be done via a
|
||||
package instead. LP: #1823776.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Thu, 14 Mar 2019 14:11:57 -0700
|
||||
|
||||
livecd-rootfs (2.408.44) xenial; urgency=medium
|
||||
|
||||
* Make sure buildd images have a /usr/sbin/policy-rc.d symlink
|
||||
(LP: #1815251).
|
||||
|
||||
-- Colin Watson <cjwatson@ubuntu.com> Mon, 18 Feb 2019 16:05:32 +0000
|
||||
|
||||
livecd-rootfs (2.408.43) xenial; urgency=medium
|
||||
|
||||
* Add a buildd subproject (LP: #1815251).
|
||||
* Add a LXD image to builds for the buildd subproject.
|
||||
* Move buildd image building to binary hooks.
|
||||
|
||||
-- Colin Watson <cjwatson@ubuntu.com> Fri, 08 Feb 2019 22:56:03 +0000
|
||||
|
||||
livecd-rootfs (2.408.42) xenial; urgency=medium
|
||||
|
||||
[ Cody Shepherd ]
|
||||
* Backport adding snaps to image manifests (LP: #1805497)
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Mon, 10 Dec 2018 10:21:47 -0800
|
||||
|
||||
livecd-rootfs (2.408.41) xenial; urgency=medium
|
||||
|
||||
* Disable checksum generation (LP: #1799773)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Fri, 16 Nov 2018 08:37:57 -0800
|
||||
|
||||
livecd-rootfs (2.408.40) xenial; urgency=medium
|
||||
|
||||
[ Cody Shepherd ]
|
||||
* Increasing modularity in relocation of /etc/resolv.conf to enable users
|
||||
of live-build/functions to manipulate /etc/resolv.conf in a more
|
||||
granular and future-proof way. LP: #1801134.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Thu, 01 Nov 2018 13:46:47 -0700
|
||||
|
||||
livecd-rootfs (2.408.39) xenial; urgency=medium
|
||||
|
||||
* Backport support for building ubuntu-core images with ubuntu-image (using
|
||||
IMAGEFORMAT=ubuntu-image).
|
||||
* Decide what model assertion series to fetch depending on the suite. Use 16
|
||||
for xenial and 18 for other series (bionic+). This enables core18 image
|
||||
builds (LP: #1799736).
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Thu, 04 Oct 2018 19:30:17 +0200
|
||||
|
||||
livecd-rootfs (2.408.38) xenial; urgency=medium
|
||||
|
||||
* ubuntu-cpc: Handle a pre-existing /lib/modules in 030-root-tarball.hook
|
||||
(LP: #1797170).
|
||||
|
||||
-- Daniel Watkins <oddbloke@ubuntu.com> Wed, 10 Oct 2018 11:55:30 -0400
|
||||
|
||||
livecd-rootfs (2.408.37) xenial; urgency=medium
|
||||
|
||||
* debian/dirs: add debian/dirs entry for empty includes.chroot dir
|
||||
(LP: #1794383)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Tue, 25 Sep 2018 20:24:49 -0500
|
||||
|
||||
livecd-rootfs (2.408.36) xenial; urgency=medium
|
||||
|
||||
* Update unminimize script text and install ubuntu-standard when
|
||||
unminimizing a minimal image (LP: #1778777)
|
||||
|
||||
-- Francis Ginther <francis.ginther@canonical.com> Mon, 02 Jul 2018 16:42:18 -0500
|
||||
|
||||
livecd-rootfs (2.408.35) xenial; urgency=medium
|
||||
|
||||
* Fix the install command for pollinate which is added back to minimal
|
||||
images (LP: #1779406)
|
||||
|
||||
-- Francis Ginther <francis.ginther@canonical.com> Mon, 02 Jul 2018 14:44:34 -0500
|
||||
|
||||
livecd-rootfs (2.408.34) xenial; urgency=medium
|
||||
|
||||
* Add back pollinate for minimal after it is removed due to a dependency
|
||||
on vim-common. LP: #1779406.
|
||||
|
||||
-- Francis Ginther <francis.ginther@canonical.com> Fri, 29 Jun 2018 10:42:06 -0500
|
||||
|
||||
livecd-rootfs (2.408.33) xenial; urgency=medium
|
||||
|
||||
* Zero fill space in UEFI partitions, too
|
||||
* Set LB_ISO_PREPARER to livecd-rootfs to avoid unbound variable in default string
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Fri, 08 Jun 2018 09:12:03 -0700
|
||||
|
||||
livecd-rootfs (2.408.32) xenial; urgency=medium
|
||||
|
||||
* snaps: Fixes for snap pre-seeding (LP: #1775710)
|
||||
* Backport: Refactor functions out of ubuntu-cpc and ubuntu-server hooks.
|
||||
* Add dependency on distro-info for snap preseeding
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Thu, 07 Jun 2018 14:12:26 -0700
|
||||
|
||||
livecd-rootfs (2.408.31) xenial; urgency=medium
|
||||
|
||||
* Backport snap preseeding functions from bionic. (LP: #1771177)
|
||||
|
||||
-- Mathieu Trudel-Lapierre <cyphermox@ubuntu.com> Mon, 14 May 2018 14:16:35 -0400
|
||||
|
||||
livecd-rootfs (2.408.30) xenial; urgency=medium
|
||||
|
||||
* Set the default locale to C.UTF-8 in minimized cloud images.
|
||||
(LP: #1759003)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Tue, 08 May 2018 11:32:29 -0500
|
||||
|
||||
livecd-rootfs (2.408.29) xenial; urgency=medium
|
||||
|
||||
[ Christopher Glass ]
|
||||
* ubuntu-cpc: When performing a minimized build make sure the
|
||||
/etc/cloud/build.info file says "minimal" (LP: #1759519)
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Tue, 17 Apr 2018 13:28:50 -0700
|
||||
|
||||
livecd-rootfs (2.408.28) xenial; urgency=medium
|
||||
|
||||
* ubuntu-cpc: When performing a minimized build, don't generate artifacts
|
||||
that won't boot with the linux-kvm kernel (LP: #1757223).
|
||||
|
||||
-- Daniel Watkins <daniel.watkins@canonical.com> Tue, 20 Mar 2018 12:42:25 -0400
|
||||
|
||||
livecd-rootfs (2.408.27) xenial; urgency=medium
|
||||
|
||||
* Don't ask for password and GECOS while creating vagrant user
|
||||
(LP: #1569237)
|
||||
|
||||
-- Balint Reczey <rbalint@ubuntu.com> Thu, 21 Dec 2017 09:20:32 +0100
|
||||
|
||||
livecd-rootfs (2.408.26) xenial; urgency=medium
|
||||
|
||||
* Added a "vagrant" user to the vagrant image in addition to the "ubuntu"
|
||||
user, in accordance with the vagrant community's expectations (LP: #1569237)
|
||||
|
||||
-- Christopher Glass (Ubuntu) <tribaal@ubuntu.com> Thu, 07 Dec 2017 14:00:59 +0100
|
||||
|
||||
livecd-rootfs (2.408.25) xenial; urgency=medium
|
||||
|
||||
* Sort and list dependencies nicely
|
||||
* Depend on packages instead of installing them at run time
|
||||
* Use xz -T4 instead of pxz.
|
||||
Supported xz versions before 5.2 accept the -T4 option but ignore it.
|
||||
Also depend on xz-utils instead of downloading pxz. (LP: #1701132)
|
||||
|
||||
-- Balint Reczey <rbalint@ubuntu.com> Wed, 06 Dec 2017 11:34:04 +0100
|
||||
|
||||
livecd-rootfs (2.408.24) xenial; urgency=medium
|
||||
|
||||
* For the IMAGEFORMAT=none parts to work for ubuntu-image classic builds, we
|
||||
need to make sure IMAGEFORMAT is preserved and not overwritten if defined.
|
||||
Backporting from artful.
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Tue, 21 Nov 2017 17:01:14 +0100
|
||||
|
||||
livecd-rootfs (2.408.23) xenial; urgency=medium
|
||||
|
||||
[ Gary Wang ]
|
||||
* Add the IMAGEFORMAT=none to support generating a single rootfs
|
||||
(LP: #1730642)
|
||||
|
||||
[ Christopher Glass (Ubuntu) ]
|
||||
* Backport "minimized round 2" changes from trunk to Xenial (LP: #1731492)
|
||||
* Remove apt, debconf, dpkg cruft files from /var/cache and /var/lib in
|
||||
all our livefses.
|
||||
* Pass --cache false to lb config; otherwise we copy around caches of
|
||||
.debs that are never used properly, and which prevent us from emptying
|
||||
/var/cache/apt in images.
|
||||
* When building minimized cloud images, remove various packages that we
|
||||
don't want installed by default. Some are tools that aren't needed for
|
||||
non-interactive use; some are libraries whose reverse-dependencies
|
||||
will have already been removed; and one, open-vm-tools, should only be
|
||||
included in images that are targeted to VMWare (which is not the case
|
||||
for any of the current minimal images), rather than being included
|
||||
directly in the cloud-image seed.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Tue, 14 Nov 2017 11:21:48 -0800
|
||||
|
||||
livecd-rootfs (2.408.22) xenial; urgency=medium
|
||||
|
||||
* Make sure to remove any initramfs that was generated in a minimized
|
||||
image.
|
||||
* divert /usr/bin/systemd-detect-virt as part of the grub diversions, so
|
||||
that the grub kernel postinst hook is operational when we're building
|
||||
in a container. LP: #1729034.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Sat, 21 Oct 2017 20:53:02 -0700
|
||||
|
||||
livecd-rootfs (2.408.21) xenial; urgency=medium
|
||||
|
||||
[ Balint Reczey ]
|
||||
* Use kvm kernel only on amd64.
|
||||
* Make non-x86 minimized images consistent with x86 by not explicitly
|
||||
installing the server task.
|
||||
* Clean up dangling /boot/initrd.img symlink left behind on minimized
|
||||
builds.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Fri, 20 Oct 2017 23:10:29 -0700
|
||||
|
||||
livecd-rootfs (2.408.20) xenial; urgency=medium
|
||||
|
||||
* Now that grub-related diversions have been factored out in 2.466
|
||||
instead of having bogus root=stuff arg generated in grub.cfg, it is
|
||||
actually empty. Therefore update the sed command to make the arg in
|
||||
the root= token optional. This should resolve non-booting livecd cpc
|
||||
images.
|
||||
|
||||
-- Dimitri John Ledkov <xnox@ubuntu.com> Tue, 10 Oct 2017 13:49:49 +0100
|
||||
|
||||
livecd-rootfs (2.408.19) xenial; urgency=medium
|
||||
|
||||
[ Nishanth Aravamudan ]
|
||||
* live-build/ubuntu-cpc/hooks/061-open-iscsi.chroot: generate iSCSI
|
||||
Initiator Name at first iscsid run for cloud images to ensure it is
|
||||
unique (LP: #1444992).
|
||||
|
||||
[ Steve Langasek ]
|
||||
* Improve teardown_mountpoint to recursively find all submounts and
|
||||
unmount them, instead of working from a hard-coded list. This makes
|
||||
the code resilient against other submounts being added later, including
|
||||
downstream. LP: #1721279.
|
||||
* Also nuke the sleep / udevadm settle calls in the process, which should
|
||||
never be required and slow down the builds.
|
||||
* Fix a reference to an undefined variable in a script that's set -u.
|
||||
* Use /bin/sh, not /bin/bash, for autopkgtest.
|
||||
* debian/tests/default-bootstraps: minor adjustments to shell syntax,
|
||||
syncing with artful where this originated.
|
||||
|
||||
[ Steve Langasek, Balint Reczey ]
|
||||
* Introduce a new project-independent 'minimized' subproject
|
||||
(LP: #1721261):
|
||||
- omit ubuntu-minimal in favor of using only the minbase package set.
|
||||
- boot directly by partuuid, avoiding the use of an initramfs.
|
||||
- Bump needed live-build version which can build images without initrd
|
||||
- drop man pages and most of the documentation from minimized images
|
||||
(/usr/share/doc/*/copyright and changelog.Debian.gz files are still
|
||||
kept)
|
||||
- Add unminimize script for reverting minimization on a running system
|
||||
- Mention unminimize script in motd
|
||||
- Run autopkgtest for SUBPROJECT=minimized
|
||||
- If we're using SUBPROJECT=minimized, and tzdata is not installed,
|
||||
remove files that have been left behind. This is a workaround for a
|
||||
bug that should be fixed in tzdata.
|
||||
* Factor out grub-related diversions and use them consistently, so we
|
||||
don't end up with wrong os-probe output in our grub.cfg.
|
||||
|
||||
[ Balint Reczey ]
|
||||
* Mount using --make-rslave to ensure safe unmounts for rbind mounts
|
||||
* When SUBPROJECT environment variable is not set assume it to be ""
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Wed, 04 Oct 2017 18:43:48 +0000
|
||||
|
||||
livecd-rootfs (2.408.18) xenial; urgency=medium
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* Install udev before calls to udevadm
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Wed, 13 Sep 2017 22:10:11 -0700
|
||||
|
||||
livecd-rootfs (2.408.17) xenial; urgency=medium
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* live-build/ubuntu-cpc/functions: Add a function, teardown_mountpoint,
|
||||
to reverse the work done in setup_mountpoint. Lack of this function
|
||||
has forced users of setup_mountpoint to implement this separately
|
||||
and the implementations have diverged. (LP: #1716992)
|
||||
* live-build/ubuntu-cpc/functions: Remove umount_settle function.
|
||||
The was only used where teardown_mountpoint was lacking.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Wed, 13 Sep 2017 12:21:30 -0700
|
||||
|
||||
livecd-rootfs (2.408.16) xenial; urgency=medium
|
||||
|
||||
* live-build/ubuntu-cpc/functions: call apt-get update in the chroot
|
||||
after mounting a blank /var/lib/apt, so that further operations work
|
||||
as expected; otherwise, 'apt-get purge ^grub-.*' fails on s390x because
|
||||
no such packages are known to apt.
|
||||
* live-build/ubuntu-cpc/hooks/030-root-tarball.binary: correct a missing
|
||||
unmount of /var/{lib,cache}/apt on cleanup, detected via autopkgtests.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Tue, 12 Sep 2017 13:25:58 -0700
|
||||
|
||||
livecd-rootfs (2.408.15) xenial; urgency=medium
|
||||
|
||||
[ Mathieu Trudel-Lapierre ]
|
||||
* Drop preloading of grub modules that are built into the grub signed
|
||||
image. This is functionally a no-op, changed only to clean up the code.
|
||||
|
||||
[ Steve Langasek ]
|
||||
* live-build/ubuntu-cpc/functions: mount tmpfs on /var/cache/apt and
|
||||
/var/lib/apt, so we don't have to leave empty space in our derivative
|
||||
images for packages that have been downloaded/installed/removed. This
|
||||
normally isn't relevant for the installed system, since the root
|
||||
filesystem will auto-expand in place on the target disk, but lets us
|
||||
ship smaller images.
|
||||
* live-build/ubuntu-cpc/hooks/033-disk-image-uefi.binary: call apt-get
|
||||
update *before* installing packages, not after.
|
||||
|
||||
[ Colin Watson ]
|
||||
* Mount and unmount /dev recursively, to cope with setups where there are
|
||||
interesting bind-mounts under /dev (e.g. loop devices bind-mounted by
|
||||
LXD). LP: #1716465.
|
||||
|
||||
[ Balint Reczey ]
|
||||
* Fix suppression of kpartx error. LP: #1684090.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Mon, 11 Sep 2017 14:38:42 -0700
|
||||
|
||||
livecd-rootfs (2.408.14) xenial; urgency=medium
|
||||
|
||||
* live-build/auto/config: Filter libgles1-mesa out of tasks, as it is not a
|
||||
dependency of anything in updates anymore, fixing mate/myth (LP: #1704013)
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Wed, 12 Jul 2017 16:28:13 -0600
|
||||
|
||||
livecd-rootfs (2.408.13) xenial; urgency=medium
|
||||
|
||||
[ Balint Reczey ]
|
||||
* Source ubuntu-cpc functions from the right place
|
||||
* Use all config hooks from the proper place, not from /build/
|
||||
* Add basic but configurable autopkgtest (LP: #1690440)
|
||||
* sync before calling kpartx to let writing to loop devices finish
|
||||
* wrap kpartx and trap spurious errors, to work around kpartx
|
||||
unreliability.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Tue, 06 Jun 2017 21:52:21 -0700
|
||||
|
||||
livecd-rootfs (2.408.12) xenial; urgency=medium
|
||||
|
||||
* live-build/ubuntu-cpc/hooks/999-extras.binary: Exit on first failure.
|
||||
(LP: #1687752)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Tue, 09 May 2017 13:57:44 -0700
|
||||
|
||||
livecd-rootfs (2.408.11) xenial; urgency=medium
|
||||
|
||||
* Add `apt-get update` to ubuntu-cpc ppc64el builds so they use the new
|
||||
sources.
|
||||
|
||||
-- Daniel Watkins <daniel.watkins@canonical.com> Fri, 07 Apr 2017 16:12:53 -0400
|
||||
|
||||
livecd-rootfs (2.408.10) xenial; urgency=medium
|
||||
|
||||
[ Adam Conrad ]
|
||||
* Fix security mirror sources.list entries for non-x86 architectures.
|
||||
(LP: #1679252)
|
||||
|
||||
-- Daniel Watkins <daniel.watkins@canonical.com> Mon, 03 Apr 2017 14:05:25 -0400
|
||||
|
||||
livecd-rootfs (2.408.9) xenial; urgency=medium
|
||||
|
||||
[ Daniel Watkins ]
|
||||
* Don't overwrite the default sources.list in cloud images.
|
||||
* Replace sources.list generated using COMPONENTS with the sources.list from
|
||||
an Ubuntu Server installation (i.e. with all components enabled, and all
|
||||
deb-src lines commented). LP: #1513529.
|
||||
|
||||
[ Chris Glass ]
|
||||
* Fix the manifest generation in OVA files so that ovf files don't have
|
||||
double extensions. (LP: #1627931)
|
||||
* Fix the OVF's metadata to include Ubuntu specific identifiers and
|
||||
descriptions instead of the generic Linux ones. (LP: #1656293)
|
||||
|
||||
[ Daniel Watkins ]
|
||||
* Add replace_grub_root_with_label function thereby consolidating multiple
|
||||
uses of the same calls to sed.
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* ubuntu-cpc: Remove redundant copy of grub files. (LP: #1637290)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Thu, 23 Mar 2017 14:40:59 -0400
|
||||
|
||||
livecd-rootfs (2.408.8) xenial; urgency=medium
|
||||
|
||||
* Set device_tree_address for the new kernel in the RPi bootloader config.txt
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Thu, 16 Feb 2017 10:21:51 -0700
|
||||
|
||||
livecd-rootfs (2.408.7) xenial; urgency=medium
|
||||
|
||||
[ Łukasz 'sil2100' Zemczak ]
|
||||
* Add additional hints to ubuntu-touch* i386/amd64 image builds to pull in
|
||||
the gles version of the UITK.
|
||||
* Now that we have have systemd working on ubuntu-touch, remove the
|
||||
systemd-sysv- in add_package config to make sure we can build an image with
|
||||
systemd as the init system.
|
||||
|
||||
[ Adam Conrad ]
|
||||
* Forward-port hardware enablement delta from trusty to xenial, with changes:
|
||||
- Implement hwe stack swapping as task filters instead of install hints
|
||||
- Revert ubuntu-desktop and ubuntu-gnome to use tasks again for the above
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Wed, 08 Feb 2017 11:50:52 -0700
|
||||
|
||||
livecd-rootfs (2.408.5) xenial; urgency=medium
|
||||
|
||||
* Temporarily change the 60-install-click.chroot script to pull in clicks for
|
||||
arm64 builds from a different location for ubuntu-touch.
|
||||
* Add the new dhcpd user to touch hooks introduced by latest archive changes.
|
||||
* Remove the ubuntu-pd project logic.
|
||||
* Synced ubuntu-touch-custom changes from the vivid branch to xenial. This
|
||||
also adds a new symlink called ubuntu-touch-custom to the ubuntu-touch hooks
|
||||
dir.
|
||||
* Stop using the overlay PPA for touch livecd-rootfs changes (LP: #1628085).
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Tue, 27 Sep 2016 13:27:28 +0200
|
||||
|
||||
livecd-rootfs (2.408.4) xenial; urgency=medium
|
||||
|
||||
* live-build/ubuntu-cpc/hooks/042-vagrant.binary: fix unmount handling
|
||||
so that the teardown is done properly /before/ we try to make an
|
||||
image from our filesystem, since otherwise /etc/resolv.conf is broken.
|
||||
LP: #1621393.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Fri, 09 Sep 2016 17:04:54 -0700
|
||||
|
||||
livecd-rootfs (2.408.3) xenial-proposed; urgency=medium
|
||||
|
||||
[ Louis Zuckerman ]
|
||||
* Fixes for vagrant box builder in ubuntu-cpc LP: #1565985
|
||||
- Install virtualbox-guest-utils
|
||||
- Don't disable default synced folder
|
||||
- Don't set vm name
|
||||
- Add cloud-init config to manage /etc/hosts LP: #1561250
|
||||
|
||||
-- Brian Murray <brian@ubuntu.com> Tue, 30 Aug 2016 13:17:55 -0700
|
||||
|
||||
livecd-rootfs (2.408.2) xenial; urgency=medium
|
||||
|
||||
* Switch ubuntu-gnome from using tasks to metapackages (LP: #1602035)
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Wed, 20 Jul 2016 01:46:41 -0600
|
||||
|
||||
livecd-rootfs (2.408.1) xenial; urgency=medium
|
||||
|
||||
* Rename old ubuntu-core tarballs to ubuntu-base, for consistency with
|
||||
the changes already made to yakkety and the ubuntu-cdimage project.
|
||||
LP: #1579950.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Mon, 09 May 2016 17:00:19 -0700
|
||||
|
||||
livecd-rootfs (2.408) xenial; urgency=medium
|
||||
|
||||
* drop linux-firmware-raspi2, it does not actually contain driver
|
||||
|
33
debian/control
vendored
33
debian/control
vendored
@ -4,11 +4,40 @@ Priority: optional
|
||||
Build-Depends: debhelper (>= 7)
|
||||
Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
|
||||
Standards-Version: 3.9.6
|
||||
Vcs-Bzr: http://bazaar.launchpad.net/~ubuntu-core-dev/livecd-rootfs/trunk
|
||||
Vcs-Git: https://git.launchpad.net/livecd-rootfs -b ubuntu/xenial
|
||||
|
||||
Package: livecd-rootfs
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, debootstrap, rsync, python-minimal | python, procps, squashfs-tools (>= 1:3.3-1), grep-dctrl, lsb-release, lzma, e2fsprogs, germinate (>= 1.25.1), apt-utils, gnupg, live-build (>= 3.0~a57-1ubuntu12~), android-tools-fsutils [armhf], python3-software-properties
|
||||
Depends: ${misc:Depends},
|
||||
android-tools-fsutils [armhf],
|
||||
apt-utils,
|
||||
attr,
|
||||
debootstrap,
|
||||
distro-info,
|
||||
dosfstools,
|
||||
e2fsprogs,
|
||||
gdisk,
|
||||
genisoimage,
|
||||
germinate (>= 1.25.1),
|
||||
gnupg,
|
||||
grep-dctrl,
|
||||
kpartx,
|
||||
live-build (>= 3.0~a57-1ubuntu25.5~),
|
||||
lsb-release,
|
||||
lzma,
|
||||
parted,
|
||||
procps,
|
||||
python-minimal | python,
|
||||
python3-software-properties,
|
||||
python3-yaml,
|
||||
qemu-utils,
|
||||
rsync,
|
||||
snapd [!powerpc],
|
||||
squashfs-tools (>= 1:3.3-1),
|
||||
ubuntu-image [!powerpc],
|
||||
vmdk-stream-converter [amd64 i386],
|
||||
xz-utils,
|
||||
zerofree
|
||||
Suggests: partimage
|
||||
Breaks: ubuntu-defaults-builder (<< 0.32)
|
||||
Description: construction script for the livecd rootfs
|
||||
|
1
debian/dirs
vendored
Normal file
1
debian/dirs
vendored
Normal file
@ -0,0 +1 @@
|
||||
usr/share/livecd-rootfs/live-build/ubuntu-cpc/includes.chroot/etc/network/interfaces.d/
|
2
debian/install
vendored
2
debian/install
vendored
@ -1,2 +1,4 @@
|
||||
live-build usr/share/livecd-rootfs
|
||||
get-ppa-fingerprint usr/share/livecd-rootfs
|
||||
magic-proxy usr/share/livecd-rootfs
|
||||
lp-in-release usr/share/livecd-rootfs
|
||||
|
7
debian/tests/control
vendored
Normal file
7
debian/tests/control
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
Tests: default-bootstraps
|
||||
Depends: @, lsb-release
|
||||
Restrictions: needs-root isolation-machine
|
||||
|
||||
Tests: minimized
|
||||
Depends: @, lsb-release
|
||||
Restrictions: needs-root isolation-machine
|
97
debian/tests/default-bootstraps
vendored
Executable file
97
debian/tests/default-bootstraps
vendored
Executable file
@ -0,0 +1,97 @@
|
||||
#!/bin/sh
|
||||
# autopkgtest check: Build default rootfs for all supported project:subproject pairs
|
||||
# (C) 2017 Canonical Ltd.
|
||||
# Author: Balint Reczey <balint.reczey@canonical.com>
|
||||
|
||||
set -e
|
||||
|
||||
if [ -z "$SUITE" ]; then
|
||||
SUITE=$(lsb_release -c -s)
|
||||
fi
|
||||
|
||||
# Known project:subproject:template combinations.
|
||||
# Listed subprojects can be combined with other projects as well,
|
||||
# but this list gives reasonable coverage.
|
||||
ALL_TRIPLETS="
|
||||
base::
|
||||
edubuntu::
|
||||
edubuntu-dvd::
|
||||
kubuntu::
|
||||
kubuntu-active::
|
||||
kubuntu-dvd::
|
||||
kubuntu-plasma5::
|
||||
lubuntu::
|
||||
lubuntu-next::
|
||||
mythbuntu::
|
||||
ubuntu::
|
||||
ubuntu-base::
|
||||
ubuntu-base:buildd:
|
||||
ubuntu-budgie::
|
||||
ubuntu-budgie-desktop::
|
||||
ubuntu-budgie-live::
|
||||
ubuntu-core:system-image:ubuntu-core
|
||||
ubuntu-cpc::ubuntu-cpc
|
||||
ubuntu-cpc:minimized:ubuntu-cpc
|
||||
ubuntu-desktop-next:system-image:ubuntu-desktop-next
|
||||
ubuntu-desktop-next::ubuntu-desktop-next
|
||||
ubuntu-dvd::
|
||||
ubuntu-gnome::
|
||||
ubuntukylin::
|
||||
ubuntu-mate::
|
||||
ubuntu-mate-core::
|
||||
ubuntu-mate-desktop::
|
||||
ubuntu-mate-live::
|
||||
ubuntu-netbook::
|
||||
ubuntu-server::
|
||||
ubuntu-server:ubuntu-rtm:
|
||||
ubuntu-server:ubuntu-rtm/foo:
|
||||
ubuntu-server:wubi:
|
||||
ubuntu-touch-custom::ubuntu-touch-custom
|
||||
ubuntu-touch::ubuntu-touch
|
||||
xubuntu::"
|
||||
|
||||
if [ -z "$SELECTED_TRIPLETS" ]; then
|
||||
SELECTED_TRIPLETS="
|
||||
ubuntu-base::
|
||||
ubuntu-cpc::ubuntu-cpc
|
||||
"
|
||||
fi
|
||||
|
||||
live_build_rootfs() {
|
||||
PROJECT=${1%%:*}
|
||||
local SUBPROJECT_TMP=${1%:*}
|
||||
SUBPROJECT=${SUBPROJECT_TMP#*:}
|
||||
TEMPLATE=${1##*:}
|
||||
ARCH=$(dpkg --print-architecture)
|
||||
echo "Building rootfs for project: '$PROJECT' subproject: '$SUBPROJECT' template: '$TEMPLATE' in $PWD"
|
||||
cp -a /usr/share/livecd-rootfs/live-build/auto .
|
||||
if [ -n "$TEMPLATE" ]; then
|
||||
cp -a /usr/share/livecd-rootfs/live-build/$TEMPLATE .
|
||||
fi
|
||||
env PROJECT=$PROJECT \
|
||||
SUBPROJECT=$SUBPROJECT \
|
||||
SUITE=$SUITE \
|
||||
ARCH=$ARCH \
|
||||
lb config
|
||||
mkdir chroot
|
||||
# this part needs root rights, but right now the whole script ran as root by autopkgtest
|
||||
env PROJECT=$PROJECT \
|
||||
SUBPROJECT=$SUBPROJECT \
|
||||
ARCH=$ARCH \
|
||||
lb build
|
||||
echo "Build results for project: '$PROJECT' subproject: '$SUBPROJECT' template: '$TEMPLATE' in $PWD"
|
||||
du -sh *
|
||||
echo ""
|
||||
}
|
||||
|
||||
WORKDIR=$(mktemp -d)
|
||||
trap "RET=\$?; rm -rf $WORKDIR; exit \$RET" 0 INT QUIT ABRT PIPE TERM
|
||||
cd $WORKDIR
|
||||
|
||||
for i in $SELECTED_TRIPLETS; do
|
||||
mkdir $i
|
||||
(cd $i && live_build_rootfs $i)
|
||||
# clean up after build to avoid filling the disk, needs root rights
|
||||
rm -rf $i
|
||||
done
|
||||
|
3
debian/tests/minimized
vendored
Normal file
3
debian/tests/minimized
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
env SELECTED_TRIPLETS=ubuntu-cpc:minimized:ubuntu-cpc debian/tests/default-bootstraps
|
@ -10,6 +10,52 @@ Arguments "${@}"
|
||||
Read_conffiles config/all config/common config/bootstrap config/chroot config/binary config/source
|
||||
Set_defaults
|
||||
|
||||
if [ -z "${PROJECT:-}" ]; then
|
||||
echo "PROJECT environment variable has to be set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
. config/functions
|
||||
|
||||
if [ -n "$REPO_SNAPSHOT_STAMP" ]; then
|
||||
if [ "`whoami`" != "root" ]; then
|
||||
echo "Magic repo snapshots only work when running as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
apt-get -qyy install iptables
|
||||
|
||||
# Redirect all outgoing traffic to port 80 to proxy instead.
|
||||
iptables -t nat -A OUTPUT -p tcp --dport 80 -m owner ! --uid-owner daemon \
|
||||
-j REDIRECT --to 8080
|
||||
|
||||
# Run proxy as "daemon" to avoid infinite loop.
|
||||
/usr/share/livecd-rootfs/magic-proxy \
|
||||
--address="127.0.0.1" \
|
||||
--port=8080 \
|
||||
--run-as=daemon \
|
||||
--cutoff-time="$REPO_SNAPSHOT_STAMP" \
|
||||
--log-file=/build/livecd.magic-proxy.log \
|
||||
--pid-file=config/magic-proxy.pid \
|
||||
--background \
|
||||
--setsid
|
||||
fi
|
||||
|
||||
# Link output files somewhere launchpad-buildd will be able to find them.
|
||||
PREFIX="livecd.$PROJECT${SUBARCH:+-$SUBARCH}"
|
||||
|
||||
if [ "${IMAGEFORMAT:-}" = "ubuntu-image" ]; then
|
||||
# Use ubuntu-image instead of live-build
|
||||
|
||||
CHANNEL="${CHANNEL:-edge}"
|
||||
env SNAPPY_STORE_NO_CDN=1 \
|
||||
ubuntu-image -c "$CHANNEL" $UBUNTU_IMAGE_ARGS \
|
||||
-o "$PREFIX".img "$PREFIX".model-assertion
|
||||
xz -0 -T4 "$PREFIX".img
|
||||
mv seed.manifest "$PREFIX".manifest
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Setup cleanup function
|
||||
Setup_cleanup
|
||||
|
||||
@ -31,6 +77,128 @@ Expire-Date: 0
|
||||
|
||||
lb bootstrap "$@"
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu-cpc)
|
||||
if [ "${SUBPROJECT:-}" = minimized ] ; then
|
||||
# Set locale to C.UTF-8 by default for minimized
|
||||
# images. We can do this for all ubuntu-cpc images
|
||||
# after further testing, however minimized images
|
||||
# lack locale-gen so a change is more urgent.
|
||||
echo "LANG=C.UTF-8" > chroot/etc/default/locale
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ] \
|
||||
&& ! Chroot chroot dpkg -l tzdata 2>&1 |grep -q ^ii; then
|
||||
# workaround for tzdata purge not removing these files
|
||||
rm -f chroot/etc/localtime chroot/etc/timezone
|
||||
fi
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
# set up dpkg filters to skip installing docs on minimized system
|
||||
mkdir -p chroot/etc/dpkg/dpkg.cfg.d
|
||||
cat > chroot/etc/dpkg/dpkg.cfg.d/excludes <<EOF
|
||||
# Drop all man pages
|
||||
path-exclude=/usr/share/man/*
|
||||
|
||||
# Drop all documentation ...
|
||||
path-exclude=/usr/share/doc/*
|
||||
|
||||
# ... except copyright files ...
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
|
||||
# ... and Debian changelogs
|
||||
path-include=/usr/share/doc/*/changelog.Debian.*
|
||||
EOF
|
||||
|
||||
# Remove docs installed by bootstrap
|
||||
Chroot chroot dpkg-query -f '${binary:Package}\n' -W | Chroot chroot xargs apt-get install --reinstall
|
||||
|
||||
# Add unminimizer script which restores default image behavior
|
||||
mkdir -p chroot/usr/local/sbin
|
||||
cat > chroot/usr/local/sbin/unminimize <<'EOF'
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
echo "This system has been minimized by removing packages and content that are"
|
||||
echo "not required on a system that users do not log into."
|
||||
echo ""
|
||||
echo "This script restores content and packages that are found on a default"
|
||||
echo "Ubuntu server system in order to make this system more suitable for"
|
||||
echo "interactive use."
|
||||
echo ""
|
||||
echo "Reinstallation of packages may fail due to changes to the system"
|
||||
echo "configuration, the presence of third-party packages, or for other"
|
||||
echo "reasons."
|
||||
echo ""
|
||||
echo "This operation may take some time."
|
||||
echo ""
|
||||
read -p "Would you like to continue? [y/N]" REPLY
|
||||
echo # (optional) move to a new line
|
||||
if [ "$REPLY" != "y" ] && [ "$REPLY" != "Y" ]
|
||||
then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -f /etc/dpkg/dpkg.cfg.d/excludes ] || [ -f /etc/dpkg/dpkg.cfg.d/excludes.dpkg-tmp ]; then
|
||||
echo "Re-enabling installation of all documentation in dpkg..."
|
||||
if [ -f /etc/dpkg/dpkg.cfg.d/excludes ]; then
|
||||
mv /etc/dpkg/dpkg.cfg.d/excludes /etc/dpkg/dpkg.cfg.d/excludes.dpkg-tmp
|
||||
fi
|
||||
echo "Updating package list and upgrading packages..."
|
||||
apt-get update
|
||||
# apt-get upgrade asks for confirmation before upgrading packages to let the user stop here
|
||||
apt-get upgrade
|
||||
echo "Restoring system documentation..."
|
||||
echo "Reinstalling packages with files in /usr/share/man/ ..."
|
||||
# Reinstallation takes place in two steps because a single dpkg --verified
|
||||
# command generates very long parameter list for "xargs dpkg -S" and may go
|
||||
# over ARG_MAX. Since many packages have man pages the second download
|
||||
# handles a much smaller amount of packages.
|
||||
dpkg -S /usr/share/man/ |sed 's|, |\n|g;s|: [^:]*$||' | DEBIAN_FRONTEND=noninteractive xargs apt-get install --reinstall -y
|
||||
echo "Reinstalling packages with system documentation in /usr/share/doc/ .."
|
||||
# This step processes the packages which still have missing documentation
|
||||
dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/doc/ {print $2}' | sed 's|/[^/]*$||' | sort |uniq \
|
||||
| xargs dpkg -S | sed 's|, |\n|g;s|: [^:]*$||' | uniq | DEBIAN_FRONTEND=noninteractive xargs apt-get install --reinstall -y
|
||||
if dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/doc/ {exit 1}'; then
|
||||
echo "Documentation has been restored successfully."
|
||||
rm /etc/dpkg/dpkg.cfg.d/excludes.dpkg-tmp
|
||||
else
|
||||
echo "There are still files missing from /usr/share/doc/:"
|
||||
dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/doc/ {print " " $2}'
|
||||
echo "You may want to try running this script again or you can remove"
|
||||
echo "/etc/dpkg/dpkg.cfg.d/excludes.dpkg-tmp and restore the files manually."
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! dpkg-query --show --showformat='${db:Status-Status}\n' ubuntu-minimal 2> /dev/null | grep -q '^installed$'; then
|
||||
echo "Installing ubuntu-minimal package to provide the familiar Ubuntu minimal system..."
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y ubuntu-minimal ubuntu-standard
|
||||
fi
|
||||
|
||||
# unminimization succeeded, there is no need to mention it in motd
|
||||
rm -f /etc/update-motd.d/60-unminimize
|
||||
|
||||
EOF
|
||||
chmod +x chroot/usr/local/sbin/unminimize
|
||||
|
||||
# inform users about the unminimize script
|
||||
cat > "chroot/etc/update-motd.d/60-unminimize" << EOF
|
||||
#!/bin/sh
|
||||
#
|
||||
# This file is not managed by a package. If you no longer want to
|
||||
# see this message you can safely remove the file.
|
||||
echo "This system has been minimized by removing packages and content that are"
|
||||
echo "not required on a system that users do not log into."
|
||||
echo ""
|
||||
echo "To restore this content, you can run the 'unminimize' command."
|
||||
EOF
|
||||
|
||||
chmod +x chroot/etc/update-motd.d/60-unminimize
|
||||
fi
|
||||
|
||||
Chroot chroot "dpkg-divert --quiet --add \
|
||||
--divert /usr/sbin/update-initramfs.REAL --rename \
|
||||
/usr/sbin/update-initramfs"
|
||||
@ -52,122 +220,74 @@ EOF
|
||||
|
||||
lb chroot "$@"
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
# and if initramfs-tools was configured before our kernel,
|
||||
# /etc/kernel/postinst.d/initramfs-tools will have created
|
||||
# an initramfs despite the generic dpkg-divert; so remove it
|
||||
# here.
|
||||
rm -f chroot/boot/initrd.img-*
|
||||
fi
|
||||
|
||||
# remove crufty files that shouldn't be left in an image
|
||||
rm -f chroot/var/cache/debconf/*-old chroot/var/lib/dpkg/*-old
|
||||
Chroot chroot apt clean
|
||||
|
||||
if [ -f config/oem-config-preinstalled ]; then
|
||||
|
||||
# This is cargo-culted almost verbatim (with some syntax changes for
|
||||
# preinstalled being slightly different in what it doesn't ask) from
|
||||
# debian-installer's apt-setup:
|
||||
|
||||
codename=$LB_DISTRIBUTION
|
||||
file="chroot/etc/apt/sources.list"
|
||||
dists="main"
|
||||
alldists="main"
|
||||
if echo "$LB_PARENT_ARCHIVE_AREAS" | grep -q restricted; then
|
||||
dists="$dists restricted"
|
||||
alldists="$alldists restricted"
|
||||
fi
|
||||
if echo "$LB_PARENT_ARCHIVE_AREAS" | grep -q universe; then
|
||||
UNIVERSE=true
|
||||
else
|
||||
UNIVERSE=false
|
||||
fi
|
||||
if echo "$LB_PARENT_ARCHIVE_AREAS" | grep -q multiverse; then
|
||||
MULTIVERSE=true
|
||||
else
|
||||
MULTIVERSE=false
|
||||
fi
|
||||
|
||||
cat > $file <<EOF
|
||||
cat > chroot/etc/apt/sources.list << EOF
|
||||
# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
|
||||
# newer versions of the distribution.
|
||||
|
||||
deb $LB_PARENT_MIRROR_BINARY $codename $dists
|
||||
deb-src $LB_PARENT_MIRROR_BINARY $codename $dists
|
||||
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION main restricted
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION main restricted
|
||||
|
||||
## Major bug fix updates produced after the final release of the
|
||||
## distribution.
|
||||
deb $LB_PARENT_MIRROR_BINARY $codename-updates $dists
|
||||
deb-src $LB_PARENT_MIRROR_BINARY $codename-updates $dists
|
||||
EOF
|
||||
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates main restricted
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates main restricted
|
||||
|
||||
# Even if universe isn't enabled, we write example lines for it.
|
||||
echo >> $file
|
||||
if [ "$UNIVERSE" = true ]; then
|
||||
alldists="$alldists universe"
|
||||
COMMENT=
|
||||
else
|
||||
cat >> $file <<EOF
|
||||
## Uncomment the following two lines to add software from the 'universe'
|
||||
## repository.
|
||||
EOF
|
||||
COMMENT='# '
|
||||
fi
|
||||
cat >> $file <<EOF
|
||||
## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
|
||||
## team. Also, please note that software in universe WILL NOT receive any
|
||||
## review or updates from the Ubuntu security team.
|
||||
${COMMENT}deb $LB_PARENT_MIRROR_BINARY $codename universe
|
||||
${COMMENT}deb-src $LB_PARENT_MIRROR_BINARY $codename universe
|
||||
${COMMENT}deb $LB_PARENT_MIRROR_BINARY $codename-updates universe
|
||||
${COMMENT}deb-src $LB_PARENT_MIRROR_BINARY $codename-updates universe
|
||||
EOF
|
||||
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION universe
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION universe
|
||||
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates universe
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates universe
|
||||
|
||||
# Multiverse is different, don't write anything unless enabled.
|
||||
if [ "$MULTIVERSE" = true ]; then
|
||||
alldists="$alldists multiverse"
|
||||
cat >> $file <<EOF
|
||||
|
||||
## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
|
||||
## team, and may not be under a free licence. Please satisfy yourself as to
|
||||
## your rights to use the software. Also, please note that software in
|
||||
## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
|
||||
## team, and may not be under a free licence. Please satisfy yourself as to
|
||||
## your rights to use the software. Also, please note that software in
|
||||
## multiverse WILL NOT receive any review or updates from the Ubuntu
|
||||
## security team.
|
||||
deb $LB_PARENT_MIRROR_BINARY $codename multiverse
|
||||
deb-src $LB_PARENT_MIRROR_BINARY $codename multiverse
|
||||
deb $LB_PARENT_MIRROR_BINARY $codename-updates multiverse
|
||||
deb-src $LB_PARENT_MIRROR_BINARY $codename-updates multiverse
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat >> $file <<EOF
|
||||
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION multiverse
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION multiverse
|
||||
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates multiverse
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates multiverse
|
||||
|
||||
## N.B. software from this repository may not have been tested as
|
||||
## extensively as that contained in the main release, although it includes
|
||||
## newer versions of some applications which may provide useful features.
|
||||
## Also, please note that software in backports WILL NOT receive any review
|
||||
## or updates from the Ubuntu security team.
|
||||
# deb $LB_PARENT_MIRROR_BINARY $codename-backports $alldists
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY $codename-backports $alldists
|
||||
EOF
|
||||
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-backports main restricted universe multiverse
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-backports main restricted universe multiverse
|
||||
|
||||
cat >> $file <<EOF
|
||||
## Uncomment the following two lines to add software from Canonical's
|
||||
## 'partner' repository.
|
||||
## This software is not part of Ubuntu, but is offered by Canonical and the
|
||||
## respective vendors as a service to Ubuntu users.
|
||||
# deb http://archive.canonical.com/ubuntu $LB_DISTRIBUTION partner
|
||||
# deb-src http://archive.canonical.com/ubuntu $LB_DISTRIBUTION partner
|
||||
|
||||
deb $LB_PARENT_MIRROR_BINARY $codename-security $dists
|
||||
deb-src $LB_PARENT_MIRROR_BINARY $codename-security $dists
|
||||
EOF
|
||||
|
||||
# Security sources for Ubuntu universe; not used much, but e.g. unsupported
|
||||
# binary packages from a supported source package will end up here.
|
||||
if [ "$UNIVERSE" = true ]; then
|
||||
COMMENT=
|
||||
else
|
||||
COMMENT='# '
|
||||
fi
|
||||
cat >> $file <<EOF
|
||||
${COMMENT}deb $LB_PARENT_MIRROR_BINARY $codename-security universe
|
||||
${COMMENT}deb-src $LB_PARENT_MIRROR_BINARY $codename-security universe
|
||||
EOF
|
||||
|
||||
# Security sources for Ubuntu multiverse, with the same caveats as for
|
||||
# universe.
|
||||
if [ "$MULTIVERSE" = true ]; then
|
||||
COMMENT=
|
||||
else
|
||||
COMMENT='# '
|
||||
fi
|
||||
cat >> $file <<EOF
|
||||
${COMMENT}deb $LB_PARENT_MIRROR_BINARY $codename-security multiverse
|
||||
${COMMENT}deb-src $LB_PARENT_MIRROR_BINARY $codename-security multiverse
|
||||
deb $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security main restricted
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security main restricted
|
||||
deb $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security universe
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security universe
|
||||
deb $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security multiverse
|
||||
# deb-src $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security multiverse
|
||||
EOF
|
||||
|
||||
fi
|
||||
@ -238,7 +358,7 @@ deb file:/var/lib/preinstalled-pool/ $LB_DISTRIBUTION $LB_PARENT_ARCHIVE_AREAS
|
||||
> chroot/etc/apt/sources.list
|
||||
rm chroot/etc/apt/sources.list.preinstall chroot/etc/apt/sources.list.orig
|
||||
fi
|
||||
if [ "$PROJECT" = "ubuntu-touch" ] || [ "$PROJECT" = "ubuntu-pd" ]; then
|
||||
if [ "$PROJECT" = "ubuntu-touch" ] || [ "$PROJECT" = "ubuntu-touch-custom" ]; then
|
||||
if [ "$ARCH" = "armhf" ]; then
|
||||
INFO_DESC="$(lsb_release -d -s)"
|
||||
echo "$INFO_DESC - $ARCH ($BUILDSTAMP)" >chroot/etc/media-info
|
||||
@ -247,20 +367,15 @@ deb file:/var/lib/preinstalled-pool/ $LB_DISTRIBUTION $LB_PARENT_ARCHIVE_AREAS
|
||||
fi
|
||||
fi
|
||||
if [ "$PROJECT" = "ubuntu-cpc" ]; then
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
BUILD_NAME=minimal
|
||||
else
|
||||
BUILD_NAME=server
|
||||
fi
|
||||
cat > chroot/etc/cloud/build.info << EOF
|
||||
build_name: server
|
||||
build_name: $BUILD_NAME
|
||||
serial: $BUILDSTAMP
|
||||
EOF
|
||||
cat > chroot/etc/apt/sources.list << EOF
|
||||
deb ${LB_PARENT_MIRROR_BINARY} ${LB_DISTRIBUTION} main restricted universe multiverse
|
||||
deb ${LB_PARENT_MIRROR_BINARY} ${LB_DISTRIBUTION}-updates main restricted universe multiverse
|
||||
deb ${LB_PARENT_MIRROR_BINARY_SECURITY} ${LB_DISTRIBUTION}-security main restricted universe multiverse
|
||||
EOF
|
||||
lb chroot_hosts install
|
||||
lb chroot_resolv install
|
||||
Chroot chroot "apt-get update"
|
||||
lb chroot_resolv remove
|
||||
lb chroot_hosts remove
|
||||
fi
|
||||
|
||||
echo "===== Checking size of /usr/share/doc ====="
|
||||
@ -278,12 +393,15 @@ EOF
|
||||
if [ -e binary.success ]; then
|
||||
rm -f binary.success
|
||||
else
|
||||
# Dump the magic-proxy log to stdout on failure to aid debugging
|
||||
if [ -f /build/livecd.magic-proxy.log ] ; then
|
||||
echo "================= Magic proxy log (start) ================="
|
||||
cat /build/livecd.magic-proxy.log
|
||||
echo "================== Magic proxy log (end) =================="
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Link output files somewhere BuildLiveCD will be able to find them.
|
||||
PREFIX="livecd.$PROJECT${SUBARCH:+-$SUBARCH}"
|
||||
|
||||
case $LB_INITRAMFS in
|
||||
casper)
|
||||
INITFS="casper"
|
||||
@ -319,7 +437,7 @@ elif [ -e binary-tar.tar.gz ]; then
|
||||
cp -a binary-tar.tar.gz "$PREFIX.rootfs.tar.gz"
|
||||
fi
|
||||
|
||||
if [ "$PROJECT:$SUBPROJECT" = "ubuntu-core:system-image" ]; then
|
||||
if [ "$PROJECT:${SUBPROJECT:-}" = "ubuntu-core:system-image" ]; then
|
||||
if [ -e "binary/$INITFS/filesystem.dir" ]; then
|
||||
rootfs="binary/$INITFS/filesystem.dir"
|
||||
|
||||
@ -345,7 +463,7 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$PROJECT" = "ubuntu-touch" ] || [ "$PROJECT" = "ubuntu-pd" ]; then
|
||||
if [ "$PROJECT" = "ubuntu-touch" ] || [ "$PROJECT" = "ubuntu-touch-custom" ]; then
|
||||
(cd "binary/$INITFS/custom.dir/" && tar -c *) | \
|
||||
gzip -9 --rsyncable > "$PREFIX.custom.tar.gz"
|
||||
chmod 644 "$PREFIX.custom.tar.gz"
|
||||
@ -353,6 +471,7 @@ fi
|
||||
|
||||
# '--initramfs none' produces different manifest names.
|
||||
if [ -e "binary/$INITFS/filesystem.packages" ]; then
|
||||
./config/snap-seed-parse "chroot/" "binary/${INITFS}/filesystem.packages"
|
||||
ln "binary/$INITFS/filesystem.packages" "$PREFIX.manifest"
|
||||
chmod 644 "$PREFIX.manifest"
|
||||
fi
|
||||
@ -367,7 +486,7 @@ fi
|
||||
# ubuntu-core and ubuntu-desktop-next splits kernel stuff into a "device" tarball so
|
||||
# at this point we reset it to "none" as all the work to extract it was done already
|
||||
# in a binary hook
|
||||
case $PROJECT:$SUBPROJECT in
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
ubuntu-core:system-image|ubuntu-desktop-next:system-image)
|
||||
|
||||
# create device tarball (for snappy only atm)
|
||||
@ -602,6 +721,7 @@ for FLAVOUR in $LB_LINUX_FLAVOURS; do
|
||||
if [ -z "$LB_LINUX_FLAVOURS" ] || [ "$LB_LINUX_FLAVOURS" = "none" ]; then
|
||||
continue
|
||||
fi
|
||||
FLAVOUR=${FLAVOUR%%-hwe-*}
|
||||
if [ "$FLAVOUR" = "virtual" ]; then
|
||||
# The virtual kernel is named generic in /boot
|
||||
FLAVOUR="generic"
|
||||
@ -635,17 +755,18 @@ done
|
||||
NUMFLAVOURS="$(set -- $LB_LINUX_FLAVOURS; echo $#)"
|
||||
if [ "$NUMFLAVOURS" = 1 ] && [ "$LB_LINUX_FLAVOURS" != "none" ]; then
|
||||
# only one kernel flavour
|
||||
FLAVOUR=${LB_LINUX_FLAVOURS%%-hwe-*}
|
||||
if [ -e "binary/$INITFS/vmlinuz" ]; then
|
||||
ln "binary/$INITFS/vmlinuz" "$PREFIX.kernel"
|
||||
chmod 644 "$PREFIX.kernel"
|
||||
else
|
||||
ln -sf "$PREFIX.kernel-$LB_LINUX_FLAVOURS" "$PREFIX.kernel"
|
||||
ln -sf "$PREFIX.kernel-$FLAVOUR" "$PREFIX.kernel"
|
||||
fi
|
||||
if [ -e "binary/$INITFS/initrd.lz" ]; then
|
||||
ln "binary/$INITFS/initrd.lz" "$PREFIX.initrd"
|
||||
chmod 644 "$PREFIX.initrd"
|
||||
else
|
||||
ln -sf "$PREFIX.initrd-$LB_LINUX_FLAVOURS" "$PREFIX.initrd"
|
||||
ln -sf "$PREFIX.initrd-$FLAVOUR" "$PREFIX.initrd"
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -689,7 +810,7 @@ if [ "$SUBARCH" = "ac100" ] || [ "$SUBARCH" = "nexus7" ]; then
|
||||
|
||||
fi
|
||||
|
||||
if [ "$PROJECT" = "ubuntu-touch" ] || [ "$PROJECT" = "ubuntu-pd" ]; then
|
||||
if [ "$PROJECT" = "ubuntu-touch" ] || [ "$PROJECT" = "ubuntu-touch-custom" ]; then
|
||||
sourceslist="chroot/etc/apt/sources.list"
|
||||
|
||||
lb chroot_proc install "$@"
|
||||
@ -768,3 +889,17 @@ case $PROJECT in
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -f "config/magic-proxy.pid" ]; then
|
||||
kill -TERM $(cat config/magic-proxy.pid)
|
||||
rm -f config/magic-proxy.pid
|
||||
|
||||
# Remove previously-inserted iptables rule.
|
||||
iptables -t nat -D OUTPUT -p tcp --dport 80 -m owner ! --uid-owner daemon \
|
||||
-j REDIRECT --to 8080
|
||||
fi
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu-cpc)
|
||||
config/hooks.d/remove-implicit-artifacts
|
||||
esac
|
||||
|
@ -33,6 +33,10 @@ if [ -z "$MIRROR" ]; then
|
||||
esac
|
||||
fi
|
||||
|
||||
mkdir -p config
|
||||
cp -af /usr/share/livecd-rootfs/live-build/functions config/functions
|
||||
cp -af /usr/share/livecd-rootfs/live-build/snap-seed-parse.py config/snap-seed-parse
|
||||
|
||||
mkdir -p config/package-lists
|
||||
|
||||
add_task ()
|
||||
@ -58,9 +62,15 @@ add_task ()
|
||||
# failure.
|
||||
|
||||
for task; do
|
||||
if [ -z "$HWE_BUILD" ]; then
|
||||
# We need a ridiculous number of backslashes to protect
|
||||
# parentheses from eval.
|
||||
echo "!chroot chroot apt-cache dumpavail | grep-dctrl -nsPackage \\\\\\( -XFArchitecture $ARCH -o -XFArchitecture all \\\\\\) -a -wFTask $task" >> "config/package-lists/livecd-rootfs.list.chroot_$pass"
|
||||
else
|
||||
# If HWE_BUILD is set, we strip out drivers not available
|
||||
# in the HWE stack, then sed the rest with their HWE suffixes
|
||||
echo "!chroot chroot apt-cache dumpavail | grep-dctrl -nsPackage \\\\\\( -XFArchitecture $ARCH -o -XFArchitecture all \\\\\\) -a -wFTask $task | grep -v xserver-xorg-input-vmmouse | grep -v libgles1-mesa | sed -e 's/xserver-xorg.*/&-hwe-16.04/'" >> "config/package-lists/livecd-rootfs.list.chroot_$pass"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
@ -87,6 +97,7 @@ LIVE_TASK=
|
||||
PREINSTALLED=false
|
||||
PREINSTALL_POOL=
|
||||
PREINSTALL_POOL_SEEDS=
|
||||
PREFIX="livecd.$PROJECT${SUBARCH:+-$SUBARCH}"
|
||||
|
||||
CHROOT_HOOKS=
|
||||
BINARY_HOOKS=
|
||||
@ -101,17 +112,19 @@ add_binary_hook ()
|
||||
BINARY_HOOKS="${BINARY_HOOKS:+$BINARY_HOOKS }$1"
|
||||
}
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu-cpc)
|
||||
IMAGEFORMAT=ext4
|
||||
;;
|
||||
esac
|
||||
if [ -z "${IMAGEFORMAT:-}" ]; then
|
||||
case $PROJECT in
|
||||
ubuntu-cpc)
|
||||
IMAGEFORMAT=ext4
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
case $IMAGEFORMAT in
|
||||
ext2|ext3|ext4)
|
||||
OPTS="${OPTS:+$OPTS }--initramfs none --chroot-filesystem $IMAGEFORMAT"
|
||||
PREINSTALLED=true
|
||||
case $SUBPROJECT in
|
||||
case ${SUBPROJECT:-} in
|
||||
wubi)
|
||||
add_package install lupin-support
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
@ -133,9 +146,62 @@ case $IMAGEFORMAT in
|
||||
PREINSTALLED=true
|
||||
;;
|
||||
|
||||
ubuntu-image)
|
||||
case "$ARCH+${SUBARCH:-}" in
|
||||
amd64+*)
|
||||
MODEL=pc-amd64 ;;
|
||||
i386+*)
|
||||
MODEL=pc-i386 ;;
|
||||
arm64+snapdragon)
|
||||
MODEL=dragonboard ;;
|
||||
armhf+raspi2)
|
||||
MODEL=pi2 ;;
|
||||
armhf+raspi3)
|
||||
MODEL=pi3 ;;
|
||||
armhf+cm3)
|
||||
MODEL=cm3 ;;
|
||||
*)
|
||||
echo "Model $ARCH+${SUBARCH:-} unknown to livecd-rootfs" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
case $MODEL in
|
||||
pc-amd64|pc-i386)
|
||||
UBUNTU_IMAGE_ARGS="--image-size 3700M" ;;
|
||||
*)
|
||||
UBUNTU_IMAGE_ARGS="" ;;
|
||||
esac
|
||||
case $SUITE in
|
||||
xenial)
|
||||
# Ubuntu Core 16
|
||||
;;
|
||||
*)
|
||||
# Ubuntu Core 18
|
||||
MODEL="ubuntu-core-18-${MODEL#pc-}" ;;
|
||||
esac
|
||||
|
||||
for snap in $EXTRA_SNAPS; do
|
||||
UBUNTU_IMAGE_ARGS="$UBUNTU_IMAGE_ARGS --extra-snaps $snap"
|
||||
done
|
||||
echo "IMAGEFORMAT=$IMAGEFORMAT" >> config/common
|
||||
echo "UBUNTU_IMAGE_ARGS=\"$UBUNTU_IMAGE_ARGS\"" >> config/common
|
||||
# Store model assertion in top dir to get it picked up later as a build artifact
|
||||
env SNAPPY_STORE_NO_CDN=1 snap known --remote model series=16 model="$MODEL" brand-id=canonical > "$PREFIX".model-assertion
|
||||
echo "Configured ubuntu-image for the following model assertion:"
|
||||
cat "$PREFIX".model-assertion
|
||||
echo "----------------------------------------------------------"
|
||||
# Fake finished configuration for lb build
|
||||
mkdir -p .build
|
||||
touch .build/config
|
||||
exit 0
|
||||
;;
|
||||
|
||||
none)
|
||||
OPTS="${OPTS:+$OPTS }--chroot-filesystem $IMAGEFORMAT"
|
||||
;;
|
||||
*)
|
||||
case $PROJECT in
|
||||
ubuntu-server|ubuntu-touch|ubuntu-pd)
|
||||
ubuntu-server|ubuntu-touch|ubuntu-touch-custom)
|
||||
;;
|
||||
*)
|
||||
add_package live lupin-casper
|
||||
@ -168,7 +234,7 @@ if [ "$PREINSTALLED" = "true" ] && [ "$SUBPROJECT" != "wubi" ]; then
|
||||
ubuntu-server)
|
||||
add_package live oem-config-debconf ubiquity-frontend-debconf
|
||||
;;
|
||||
ubuntu-core|base|ubuntu-touch|ubuntu-pd|ubuntu-cpc|ubuntu-desktop-next)
|
||||
ubuntu-core|ubuntu-base|base|ubuntu-touch|ubuntu-touch-custom|ubuntu-cpc|ubuntu-desktop-next)
|
||||
;;
|
||||
*)
|
||||
add_package live oem-config-gtk ubiquity-frontend-gtk
|
||||
@ -186,16 +252,24 @@ case $BINARYFORMAT in
|
||||
;;
|
||||
esac
|
||||
|
||||
SIGNED_KERNEL_PACKAGE="linux-signed-generic"
|
||||
UNITY_HWE_HINTS="unity-settings-daemon notify-osd libqt4-sql-sqlite unity gnome-terminal"
|
||||
HWE_X_PACKAGES="xserver-xorg-hwe-16.04 xserver-xorg-video-all-hwe-16.04 xserver-xorg-input-all-hwe-16.04"
|
||||
HWE_KERNEL_FLAVOUR="generic-hwe-16.04"
|
||||
HWE_SIGNED_KERNEL_PACKAGE="linux-signed-$HWE_KERNEL_FLAVOUR"
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
OPTS="${OPTS:+$OPTS }--bootstrap-flavour=minimal --linux-packages=linux-image"
|
||||
fi
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu|ubuntu-dvd)
|
||||
add_package install ubuntu-minimal ubuntu-standard
|
||||
add_package install ubuntu-desktop $UNITY_HWE_HINTS
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard ubuntu-desktop
|
||||
add_package install $HWE_X_PACKAGES
|
||||
LIVE_TASK='ubuntu-live'
|
||||
LIVE_TASK_FILTER='--not -Pe ^linux-\(headers\|image\|signed\)'
|
||||
KERNEL_FLAVOURS="$HWE_KERNEL_FLAVOUR"
|
||||
case $ARCH in
|
||||
amd64) add_package live $SIGNED_KERNEL_PACKAGE ;;
|
||||
amd64) add_package live $HWE_SIGNED_KERNEL_PACKAGE ;;
|
||||
esac
|
||||
;;
|
||||
|
||||
@ -213,17 +287,21 @@ case $PROJECT in
|
||||
# CDIMAGE_PREINSTALLED is not passed from build.py
|
||||
# and PREINSTALLED means something different. So
|
||||
# we use SUBPROJECT to pass on the information
|
||||
if [ "$SUBPROJECT" = "system-image" ]; then
|
||||
if [ "${SUBPROJECT:-}" = "system-image" ]; then
|
||||
OPTS="${OPTS:+$OPTS }--linux-packages=linux-image"
|
||||
fi
|
||||
;;
|
||||
|
||||
kubuntu|kubuntu-dvd)
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard
|
||||
add_task install kubuntu-desktop
|
||||
add_package install $HWE_X_PACKAGES
|
||||
LIVE_TASK='kubuntu-live'
|
||||
LIVE_TASK_FILTER='--not -Pe ^linux-\(headers\|image\|signed\)'
|
||||
KERNEL_FLAVOURS="$HWE_KERNEL_FLAVOUR"
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live $HWE_SIGNED_KERNEL_PACKAGE ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe'
|
||||
add_chroot_hook remove-gnome-icon-cache
|
||||
@ -258,16 +336,17 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
xubuntu)
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard xubuntu-desktop
|
||||
add_package install $HWE_X_PACKAGES
|
||||
add_package install xterm
|
||||
LIVE_TASK='xubuntu-live'
|
||||
LIVE_TASK_FILTER='--not -Pe ^linux-\(headers\|image\|signed\)'
|
||||
KERNEL_FLAVOURS="$HWE_KERNEL_FLAVOUR"
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live $HWE_SIGNED_KERNEL_PACKAGE ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
case $ARCH in
|
||||
amd64|i386) KERNEL_FLAVOURS=generic ;;
|
||||
esac
|
||||
;;
|
||||
|
||||
ubuntu-netbook)
|
||||
@ -276,44 +355,62 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
mythbuntu)
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard mythbuntu-desktop
|
||||
add_package install $HWE_X_PACKAGES
|
||||
LIVE_TASK='mythbuntu-live'
|
||||
LIVE_TASK_FILTER='--not -Pe ^linux-\(headers\|image\|signed\)'
|
||||
KERNEL_FLAVOURS="$HWE_KERNEL_FLAVOUR"
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live $HWE_SIGNED_KERNEL_PACKAGE ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
;;
|
||||
|
||||
lubuntu)
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard lubuntu-desktop
|
||||
LIVE_TASK='lubuntu-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64|i386) add_package install thermald ;;
|
||||
esac
|
||||
add_package install $HWE_X_PACKAGES
|
||||
LIVE_TASK='lubuntu-live'
|
||||
LIVE_TASK_FILTER='--not -Pe ^linux-\(headers\|image\|signed\)'
|
||||
KERNEL_FLAVOURS="$HWE_KERNEL_FLAVOUR"
|
||||
case $ARCH in
|
||||
amd64) add_package live $HWE_SIGNED_KERNEL_PACKAGE ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
case $ARCH in
|
||||
amd64|i386) KERNEL_FLAVOURS=generic ;;
|
||||
esac
|
||||
|
||||
# The Lubuntu STRUCTURE file has "feature
|
||||
# no-follow-recommends". Mirror this.
|
||||
export APT_OPTIONS="--yes --no-install-recommends"
|
||||
;;
|
||||
|
||||
ubuntu-gnome)
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard ubuntu-gnome-desktop
|
||||
add_package install $HWE_X_PACKAGES
|
||||
LIVE_TASK='ubuntu-gnome-live'
|
||||
LIVE_TASK_FILTER='--not -Pe ^linux-\(headers\|image\|signed\)'
|
||||
KERNEL_FLAVOURS="$HWE_KERNEL_FLAVOUR"
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live $HWE_SIGNED_KERNEL_PACKAGE ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe'
|
||||
;;
|
||||
|
||||
ubuntu-mate)
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard ubuntu-mate-core ubuntu-mate-desktop
|
||||
LIVE_TASK='ubuntu-mate-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64|i386) add_package install thermald ;;
|
||||
esac
|
||||
add_package install $HWE_X_PACKAGES
|
||||
LIVE_TASK='ubuntu-mate-live'
|
||||
LIVE_TASK_FILTER='--not -Pe ^linux-\(headers\|image\|signed\)'
|
||||
KERNEL_FLAVOURS="$HWE_KERNEL_FLAVOUR"
|
||||
case $ARCH in
|
||||
amd64) add_package live $HWE_SIGNED_KERNEL_PACKAGE ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
# The Ubuntu MATE STRUCTURE file has "feature
|
||||
@ -322,19 +419,25 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
ubuntustudio-dvd)
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard ubuntustudio-desktop ubuntustudio-audio ubuntustudio-fonts ubuntustudio-graphics ubuntustudio-video ubuntustudio-publishing ubuntustudio-photography
|
||||
add_package install $HWE_X_PACKAGES linux-generic-hwe-16.04-
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
case $ARCH in
|
||||
amd64|i386) KERNEL_FLAVOURS=lowlatency ;;
|
||||
amd64|i386) KERNEL_FLAVOURS=lowlatency-hwe-16.04 ;;
|
||||
esac
|
||||
;;
|
||||
|
||||
ubuntukylin)
|
||||
HWE_BUILD="yes"
|
||||
add_task install minimal standard ubuntukylin-desktop
|
||||
add_package install $HWE_X_PACKAGES
|
||||
add_package install ubuntukylin-default-settings
|
||||
LIVE_TASK='ubuntukylin-live'
|
||||
LIVE_TASK_FILTER='--not -Pe ^linux-\(headers\|image\|signed\)'
|
||||
KERNEL_FLAVOURS="$HWE_KERNEL_FLAVOUR"
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live $HWE_SIGNED_KERNEL_PACKAGE ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe'
|
||||
;;
|
||||
@ -413,13 +516,18 @@ case $PROJECT in
|
||||
OPTS="${OPTS:+$OPTS }--bootstrap-flavour=minimal"
|
||||
;;
|
||||
|
||||
ubuntu-touch|ubuntu-pd)
|
||||
if [ "$PROJECT" = "ubuntu-touch" ]; then
|
||||
meta_package=ubuntu-touch
|
||||
else
|
||||
meta_package=ubuntu-pocket-desktop
|
||||
fi
|
||||
add_package install ubuntu-minimal $meta_package systemd-sysv- packagekit ubuntu-system-settings-online-accounts
|
||||
ubuntu-base)
|
||||
OPTS="${OPTS:+$OPTS }--bootstrap-flavour=minimal"
|
||||
;;
|
||||
|
||||
ubuntu-touch|ubuntu-touch-custom)
|
||||
HINTS="packagekit ubuntu-system-settings-online-accounts"
|
||||
case $ARCH in
|
||||
amd64|i386)
|
||||
HINTS="$HINTS qml-module-ubuntu-components-gles unity8"
|
||||
;;
|
||||
esac
|
||||
add_package install ubuntu-minimal ubuntu-touch $HINTS
|
||||
|
||||
COMPONENTS='main restricted universe'
|
||||
BOOTAPPEND_LIVE='hostname=ubuntu-phablet username=ubuntu'
|
||||
@ -433,7 +541,7 @@ case $PROJECT in
|
||||
# SUBPROJECT, but it's a handy thing that launchpad-buildd
|
||||
# already passes through to us that we weren't otherwise
|
||||
# using here.
|
||||
case $SUBPROJECT in
|
||||
case ${SUBPROJECT:-} in
|
||||
ubuntu-rtm/dogfood)
|
||||
MIRROR=http://derived-archive.dogfood.content.paddev.net/ubuntu-rtm/
|
||||
OPTS="${OPTS:+$OPTS }--apt-secure false"
|
||||
@ -451,29 +559,47 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
ubuntu-cpc)
|
||||
add_task install minimal standard cloud-image
|
||||
add_package install ubuntu-minimal
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
add_task install cloud-image
|
||||
add_package install sudo
|
||||
# linux-kvm currently only exists in xenial, not in
|
||||
# non-LTS suites. Fall back to virtual flavor, which
|
||||
# may or may not boot initramfsless but enables us to
|
||||
# test building and possibly build derivative images
|
||||
# using other kernel flavors.
|
||||
# If you enable an extra ppa, it is assumed that
|
||||
# linux-kvm is available since you control the
|
||||
# archive and can provide this metapackage as
|
||||
# necessary.
|
||||
if [ "$ARCH" != "amd64" ] || ([ -z "$EXTRA_PPAS" ] && [ "$SUITE" != xenial ]); then
|
||||
KERNEL_FLAVOURS=virtual
|
||||
else
|
||||
KERNEL_FLAVOURS=kvm
|
||||
fi
|
||||
else
|
||||
add_task install minimal standard cloud-image
|
||||
add_package install ubuntu-minimal
|
||||
KERNEL_FLAVOURS=virtual
|
||||
case $ARCH in
|
||||
armhf|arm64|ppc64el|powerpc)
|
||||
add_task install server
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
BINARY_REMOVE_LINUX=false
|
||||
OPTS="${OPTS:+$OPTS }--initramfs=none"
|
||||
KERNEL_FLAVOURS=virtual
|
||||
case $ARCH in
|
||||
armhf)
|
||||
KERNEL_FLAVOURS=generic-lpae
|
||||
add_package install flash-kernel
|
||||
add_task install server
|
||||
;;
|
||||
arm64)
|
||||
KERNEL_FLAVOURS=generic
|
||||
add_package install flash-kernel
|
||||
add_task install server
|
||||
;;
|
||||
ppc64el)
|
||||
add_task install server
|
||||
;;
|
||||
powerpc)
|
||||
KERNEL_FLAVOURS=powerpc64-smp
|
||||
add_task install server
|
||||
;;
|
||||
esac
|
||||
OPTS="${OPTS:+$OPTS }--system=normal"
|
||||
@ -488,6 +614,37 @@ case $PROJECT in
|
||||
;;
|
||||
esac
|
||||
|
||||
case $SUBPROJECT in
|
||||
buildd)
|
||||
OPTS="${OPTS:+$OPTS }--archive-areas main"
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
OPTS="${OPTS:+$OPTS }--apt-recommends false"
|
||||
OPTS="${OPTS:+$OPTS }--apt-secure false"
|
||||
OPTS="${OPTS:+$OPTS }--parent-mirror-binary ${MIRROR}"
|
||||
# XXX cjwatson 2018-04-27: We need to work out how to make
|
||||
# this conditional so that we can do things like building
|
||||
# buildd chroots with -updates. This probably involves
|
||||
# either extending the PROPOSED hack or fixing the strange
|
||||
# way that SUITE is in fact a series; in either case it's
|
||||
# likely to involve work both here and in launchpad-buildd.
|
||||
OPTS="${OPTS:+$OPTS }--security false --volatile false"
|
||||
|
||||
add_package install adduser
|
||||
add_package install policyrcd-script-zg2
|
||||
add_package install pkgbinarymangler
|
||||
add_package install ca-certificates
|
||||
add_package install pkg-create-dbgsym
|
||||
add_package install apt-transport-https
|
||||
add_package install tzdata
|
||||
add_package install fakeroot
|
||||
add_package install build-essential
|
||||
# Needed for LXD-based builds.
|
||||
add_package install init
|
||||
|
||||
cp -af /usr/share/livecd-rootfs/live-build/make-lxd-metadata.py config/make-lxd-metadata
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$PREINSTALLED" != "true" ] && [ "$LIVE_TASK" ]; then
|
||||
add_task live "$LIVE_TASK"
|
||||
fi
|
||||
@ -557,7 +714,7 @@ case $ARCH in
|
||||
esac
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu-server|ubuntu-core|ubuntu-touch|ubuntu-pd)
|
||||
ubuntu-server|ubuntu-core|ubuntu-base|ubuntu-touch|ubuntu-touch-custom)
|
||||
case $SUBPROJECT in
|
||||
system-image)
|
||||
# keep the kernel for the system-image build
|
||||
@ -586,7 +743,7 @@ case $PROJECT in
|
||||
;;
|
||||
esac
|
||||
|
||||
case $SUBPROJECT in
|
||||
case ${SUBPROJECT:-} in
|
||||
wubi)
|
||||
add_binary_hook build-wubildr
|
||||
;;
|
||||
@ -595,6 +752,7 @@ esac
|
||||
lb config noauto \
|
||||
--mode ubuntu \
|
||||
--distribution "$SUITE" \
|
||||
--iso-preparer "livecd-rootfs" \
|
||||
--bootstrap-keyring ubuntu-keyring \
|
||||
--binary-images "$BINARY_IMAGES" \
|
||||
--memtest "$MEMTEST" \
|
||||
@ -607,13 +765,19 @@ lb config noauto \
|
||||
--initsystem none \
|
||||
--bootloader "$BOOTLOADER" \
|
||||
--initramfs-compression lzma \
|
||||
--checksums none \
|
||||
--cache false \
|
||||
${BOOTAPPEND_LIVE:+--bootappend-live "$BOOTAPPEND_LIVE"} \
|
||||
$OPTS \
|
||||
"$@"
|
||||
|
||||
echo "LB_CHROOT_HOOKS=\"$CHROOT_HOOKS\"" >> config/chroot
|
||||
echo "SUBPROJECT=\"${SUBPROJECT:-}\"" >> config/chroot
|
||||
echo "LB_DISTRIBUTION=\"$SUITE\"" >> config/chroot
|
||||
echo "LB_BINARY_HOOKS=\"$BINARY_HOOKS\"" >> config/binary
|
||||
echo "BUILDSTAMP=\"$NOW\"" >> config/binary
|
||||
echo "SUBPROJECT=\"${SUBPROJECT:-}\"" >> config/binary
|
||||
echo "LB_DISTRIBUTION=\"$SUITE\"" >> config/binary
|
||||
|
||||
case $ARCH+$SUBARCH in
|
||||
armhf+raspi2)
|
||||
@ -637,6 +801,7 @@ cat > /boot/firmware/config.txt << EOM
|
||||
# Some settings may impact device functionality. See link above for details
|
||||
|
||||
kernel=uboot.bin
|
||||
device_tree_address=0x02000000
|
||||
|
||||
# enable i2c
|
||||
dtparam=i2c_arm=on
|
||||
@ -727,7 +892,7 @@ EOF
|
||||
config/archives/proposed.list.binary
|
||||
fi
|
||||
|
||||
case $PROJECT:$SUBPROJECT in
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
*-dvd:*)
|
||||
. config/bootstrap
|
||||
|
||||
@ -743,9 +908,31 @@ EOF
|
||||
fi
|
||||
;;
|
||||
|
||||
ubuntu-touch:*|ubuntu-pd:*|ubuntu-core:system-image|ubuntu-desktop-next:system-image|ubuntu-cpc:*)
|
||||
ubuntu-touch:*|ubuntu-touch-custom:*|ubuntu-core:system-image|ubuntu-desktop-next:system-image|ubuntu-cpc:*)
|
||||
cp -af /usr/share/livecd-rootfs/live-build/${PROJECT}/* \
|
||||
config/
|
||||
|
||||
if [ "$PROJECT" = "ubuntu-cpc" ]; then
|
||||
case ${IMAGE_TARGETS:-} in
|
||||
"")
|
||||
config/hooks.d/make-hooks --hooks-dir config/hooks all
|
||||
;;
|
||||
*)
|
||||
config/hooks.d/make-hooks --hooks-dir config/hooks \
|
||||
"$IMAGE_TARGETS"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ "$IMAGEFORMAT" = none ]; then
|
||||
rm -f config/hooks/*.binary*
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
case $SUBPROJECT in
|
||||
buildd)
|
||||
cp -af /usr/share/livecd-rootfs/live-build/buildd/* config/
|
||||
;;
|
||||
esac
|
||||
|
||||
@ -813,7 +1000,7 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
case $SUBPROJECT in
|
||||
case ${SUBPROJECT:-} in
|
||||
ubuntu-rtm|ubuntu-rtm/*)
|
||||
# debootstrap doesn't know about ubuntu-rtm series directly. Rather
|
||||
# than having to teach it, we employ a few hacks to make it use the
|
||||
|
5
live-build/buildd/hooks/00-kernel-img.chroot
Executable file
5
live-build/buildd/hooks/00-kernel-img.chroot
Executable file
@ -0,0 +1,5 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
# At one point, kernel builds needed this.
|
||||
echo do_initrd = Yes >>/etc/kernel-img.conf
|
12
live-build/buildd/hooks/00-mirror.binary
Executable file
12
live-build/buildd/hooks/00-mirror.binary
Executable file
@ -0,0 +1,12 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
. config/bootstrap
|
||||
|
||||
# Use a public-facing mirror URL, for the benefit of
|
||||
# sbuild-launchpad-chroot. We deliberately do this only after live-build
|
||||
# has run "apt-get update" for the last time, in order that
|
||||
# /var/lib/apt/lists/ has suitable cached Packages files; this speeds up
|
||||
# builds on buildds.
|
||||
sed -i "s,${LB_PARENT_MIRROR_BINARY},${LB_MIRROR_BINARY},g" \
|
||||
binary/etc/apt/sources.list
|
19
live-build/buildd/hooks/00-ptmx-chardev.chroot_early
Executable file
19
live-build/buildd/hooks/00-ptmx-chardev.chroot_early
Executable file
@ -0,0 +1,19 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
# debootstrap 1.0.76 started creating /dev/ptmx as a symlink to
|
||||
# /dev/pts/ptmx. Unfortunately, this doesn't work with sbuild, because it
|
||||
# leaves the ptmxmode mount option at its default of 000, which causes
|
||||
# builds to be unable to open /dev/pts/ptmx. To avoid this, debootstrap
|
||||
# 1.0.89 switched to creating it as a device node where possible. See
|
||||
# https://bugs.debian.org/817236 for details and analysis.
|
||||
#
|
||||
# xenial has a version of debootstrap in the range that contains this bug.
|
||||
# It seems too risky to try to cherry-pick the debootstrap change in
|
||||
# question in an SRU at this point; instead, just fix things up here for
|
||||
# buildd images.
|
||||
|
||||
if [ -h /dev/ptmx ]; then
|
||||
mknod -m 666 /dev/ptmx.new c 5 2
|
||||
mv /dev/ptmx.new /dev/ptmx
|
||||
fi
|
10
live-build/buildd/hooks/01-pkgbinarymangler.chroot
Executable file
10
live-build/buildd/hooks/01-pkgbinarymangler.chroot
Executable file
@ -0,0 +1,10 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
# Configure pkgbinarymangler.
|
||||
sed -i /^enable/s/false/true/ \
|
||||
/etc/pkgbinarymangler/maintainermangler.conf \
|
||||
/etc/pkgbinarymangler/striptranslations.conf || true
|
||||
sed -i /^invalid_current/s/ignore/fail/ \
|
||||
/etc/pkgbinarymangler/maintainermangler.conf \
|
||||
/etc/pkgbinarymangler/striptranslations.conf || true
|
13
live-build/buildd/hooks/01-policy-rc-d.binary
Executable file
13
live-build/buildd/hooks/01-policy-rc-d.binary
Executable file
@ -0,0 +1,13 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
# Put the /usr/sbin/policy-rc.d alternatives symlink in place. Ordinarily
|
||||
# update-alternatives ought to create this when policyrcd-script-zg2 is
|
||||
# installed, but that doesn't work because live-build has already installed
|
||||
# a dummy one at that point. The simplest approach is to repair the
|
||||
# situation by putting it in place here.
|
||||
if [ -L binary/etc/alternatives/policy-rc.d ] && \
|
||||
[ ! -e binary/usr/sbin/policy-rc.d ] && \
|
||||
[ ! -L binary/usr/sbin/policy-rc.d ]; then
|
||||
ln -s /etc/alternatives/policy-rc.d binary/usr/sbin/policy-rc.d
|
||||
fi
|
9
live-build/buildd/hooks/02-user.chroot
Executable file
9
live-build/buildd/hooks/02-user.chroot
Executable file
@ -0,0 +1,9 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
# Create the buildd user and group.
|
||||
addgroup --gid 2501 buildd
|
||||
adduser --system --disabled-password --gecos 'Build Daemon user' \
|
||||
--ingroup buildd --uid 2001 --shell /bin/bash buildd
|
||||
mkdir -p /build/buildd
|
||||
chown buildd:buildd /build/buildd
|
10
live-build/buildd/hooks/50-buildd-tar.binary
Executable file
10
live-build/buildd/hooks/50-buildd-tar.binary
Executable file
@ -0,0 +1,10 @@
|
||||
#! /bin/sh
|
||||
# A few things (launchpad-buildd, sbuild-launchpad-chroot) rely on the
|
||||
# top-level directory being "chroot-autobuild", so we have to do this
|
||||
# ourselves.
|
||||
set -e
|
||||
|
||||
# gzip was chosen for fastest decompression speed: it decompresses buildd
|
||||
# chroots about twice as fast as xz and about five times as fast as bzip2.
|
||||
tar --transform='s,^binary,chroot-autobuild,' --sort=name --numeric-owner \
|
||||
-czf "livecd.$PROJECT.rootfs.tar.gz" binary
|
16
live-build/buildd/hooks/51-buildd-lxd.binary
Executable file
16
live-build/buildd/hooks/51-buildd-lxd.binary
Executable file
@ -0,0 +1,16 @@
|
||||
#! /bin/sh
|
||||
# Some build types prefer a LXD image over a traditional chroot tarball.
|
||||
set -e
|
||||
|
||||
. config/bootstrap
|
||||
|
||||
TMPDIR="$(mktemp -d)"
|
||||
config/make-lxd-metadata "${LB_DISTRIBUTION%-*}" "$ARCH" \
|
||||
>"$TMPDIR/metadata.yaml"
|
||||
tar --numeric-owner -cf "livecd.$PROJECT.lxd.tar" -C "$TMPDIR" metadata.yaml
|
||||
rm -rf "$TMPDIR"
|
||||
# When using the combined metadata/rootfs form, the rootfs must be under
|
||||
# rootfs/ rather than under chroot-autobuild/.
|
||||
tar --transform='s,^binary,rootfs,' --sort=name --numeric-owner \
|
||||
-rf "livecd.$PROJECT.lxd.tar" binary
|
||||
gzip -9 "livecd.$PROJECT.lxd.tar"
|
@ -0,0 +1,2 @@
|
||||
DPkg::Options {"--force-unsafe-io";};
|
||||
DPkg::Use-Pty "false";
|
@ -0,0 +1,3 @@
|
||||
Package: *
|
||||
Pin: release a=*-backports
|
||||
Pin-Priority: 500
|
1
live-build/buildd/includes.chroot/etc/fstab
Normal file
1
live-build/buildd/includes.chroot/etc/fstab
Normal file
@ -0,0 +1 @@
|
||||
/dev/root / ext2 noatime,errors=remount-ro 0 1
|
1
live-build/buildd/includes.chroot/etc/hostname
Normal file
1
live-build/buildd/includes.chroot/etc/hostname
Normal file
@ -0,0 +1 @@
|
||||
INVALID
|
9
live-build/buildd/includes.chroot/etc/hosts
Normal file
9
live-build/buildd/includes.chroot/etc/hosts
Normal file
@ -0,0 +1,9 @@
|
||||
127.0.0.1 localhost.localdomain localhost
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
ff02::3 ip6-allhosts
|
0
live-build/buildd/includes.chroot/etc/resolv.conf
Normal file
0
live-build/buildd/includes.chroot/etc/resolv.conf
Normal file
13
live-build/buildd/includes.chroot/usr/local/sbin/policy-rc.d
Executable file
13
live-build/buildd/includes.chroot/usr/local/sbin/policy-rc.d
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
# policy-rc.d script for chroots.
|
||||
# Copyright (c) 2007 Peter Palfrader <peter@palfrader.org>
|
||||
# License: <weasel> MIT, if you want one.
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
-*) shift ;;
|
||||
makedev) exit 0;;
|
||||
*) echo "Not running services in chroot."; exit 101 ;;
|
||||
esac
|
||||
done
|
2
live-build/buildd/preseed/debconf.preseed
Normal file
2
live-build/buildd/preseed/debconf.preseed
Normal file
@ -0,0 +1,2 @@
|
||||
# We never want debconf interaction.
|
||||
debconf debconf/frontend select Noninteractive
|
3
live-build/buildd/preseed/man-db.preseed
Normal file
3
live-build/buildd/preseed/man-db.preseed
Normal file
@ -0,0 +1,3 @@
|
||||
# Avoid unnecessary manual page database builds (see
|
||||
# https://bugs.debian.org/554914).
|
||||
man-db man-db/auto-update boolean false
|
3
live-build/buildd/preseed/sun-java6.preseed
Normal file
3
live-build/buildd/preseed/sun-java6.preseed
Normal file
@ -0,0 +1,3 @@
|
||||
# Pre-accept interactive EULA prompts.
|
||||
sun-java6-bin shared/accepted-sun-dlj-v1-1 boolean true
|
||||
sun-java6-jre shared/accepted-sun-dlj-v1-1 boolean true
|
467
live-build/functions
Normal file
467
live-build/functions
Normal file
@ -0,0 +1,467 @@
|
||||
# vi: ts=4 expandtab syntax=sh
|
||||
|
||||
#imagesize=${IMAGE_SIZE:-$((2252*1024**2))} # 2.2G (the current size we ship)
|
||||
imagesize=${IMAGE_SIZE:-2361393152} # 2.2G (the current size we ship)
|
||||
fs_label="${FS_LABEL:-rootfs}"
|
||||
|
||||
rootfs_dev_mapper=
|
||||
loop_device=
|
||||
loop_raw=
|
||||
backing_img=
|
||||
|
||||
clean_loops() {
|
||||
local kpartx_ret
|
||||
local kpartx_stdout
|
||||
|
||||
if [ -n "${backing_img}" ]; then
|
||||
# sync before removing loop to avoid "Device or resource busy" errors
|
||||
sync
|
||||
kpartx_ret=""
|
||||
kpartx_stdout=$(kpartx -v -d "${backing_img}") || kpartx_ret=$?
|
||||
echo "$kpartx_stdout"
|
||||
if [ -n "$kpartx_ret" ]; then
|
||||
if echo "$kpartx_stdout" | grep -q "loop deleted"; then
|
||||
echo "Suppressing kpartx returning error (#860894)"
|
||||
else
|
||||
exit $kpartx_ret
|
||||
fi
|
||||
fi
|
||||
unset backing_img
|
||||
fi
|
||||
|
||||
if [ -z "${rootfs_dev_mapper}" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
unset loop_device
|
||||
unset loop_raw
|
||||
unset rootfs_dev_mapper
|
||||
}
|
||||
|
||||
create_empty_disk_image() {
|
||||
# Prepare an empty disk image
|
||||
dd if=/dev/zero of="$1" bs=1 count=0 seek="${imagesize}"
|
||||
}
|
||||
|
||||
create_manifest() {
|
||||
local chroot_root=${1}
|
||||
local target_file=${2}
|
||||
echo "create_manifest chroot_root: ${chroot_root}"
|
||||
dpkg-query --show --admindir="${chroot_root}/var/lib/dpkg" > ${target_file}
|
||||
echo "create_manifest call to dpkg-query finished."
|
||||
./config/snap-seed-parse "${chroot_root}" "${target_file}"
|
||||
echo "create_manifest call to snap_seed_parse finished."
|
||||
echo "create_manifest finished"
|
||||
}
|
||||
|
||||
make_ext4_partition() {
|
||||
device="$1"
|
||||
label=${fs_label:+-L "${fs_label}"}
|
||||
mkfs.ext4 -F -b 4096 -i 8192 -m 0 ${label} -E resize=536870912 "$device"
|
||||
}
|
||||
|
||||
mount_image() {
|
||||
trap clean_loops EXIT
|
||||
backing_img="$1"
|
||||
local rootpart="$2"
|
||||
kpartx_mapping="$(kpartx -s -v -a ${backing_img})"
|
||||
|
||||
# Find the loop device
|
||||
loop_p1="$(echo -e ${kpartx_mapping} | head -n1 | awk '{print$3}')"
|
||||
loop_device="/dev/${loop_p1%p[0-9]*}"
|
||||
if [ ! -b ${loop_device} ]; then
|
||||
echo "unable to find loop device for ${backing_img}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find the rootfs location
|
||||
rootfs_dev_mapper="/dev/mapper/${loop_p1%%[0-9]}${rootpart}"
|
||||
if [ ! -b "${rootfs_dev_mapper}" ]; then
|
||||
echo "${rootfs_dev_mapper} is not a block device";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add some information to the debug logs
|
||||
echo "Mounted disk image ${backing_img} to ${rootfs_dev_mapper}"
|
||||
blkid ${rootfs_dev_mapper}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_resolvconf() {
|
||||
local mountpoint=${1}
|
||||
mv "${mountpoint}/etc/resolv.conf" resolv.conf.tmp
|
||||
cp /etc/resolv.conf "${mountpoint}/etc/resolv.conf"
|
||||
}
|
||||
|
||||
recover_resolvconf() {
|
||||
local mountpoint=${1}
|
||||
mv resolv.conf.tmp "${mountpoint}/etc/resolv.conf"
|
||||
}
|
||||
|
||||
setup_mountpoint() {
|
||||
local mountpoint="$1"
|
||||
|
||||
mount --rbind --make-rslave /dev "$mountpoint/dev"
|
||||
mount proc-live -t proc "$mountpoint/proc"
|
||||
mount sysfs-live -t sysfs "$mountpoint/sys"
|
||||
mount -t tmpfs none "$mountpoint/tmp"
|
||||
mount -t tmpfs none "$mountpoint/var/lib/apt"
|
||||
mount -t tmpfs none "$mountpoint/var/cache/apt"
|
||||
setup_resolvconf "${mountpoint}"
|
||||
chroot "$mountpoint" apt-get update
|
||||
|
||||
}
|
||||
|
||||
teardown_mountpoint() {
|
||||
# Reverse the operations from setup_mountpoint
|
||||
local mountpoint="$1"
|
||||
|
||||
# ensure we have exactly one trailing slash, and escape all slashes for awk
|
||||
mountpoint_match=$(echo "$mountpoint" | sed -e's,/$,,; s,/,\\/,g;')'\/'
|
||||
# sort -r ensures that deeper mountpoints are unmounted first
|
||||
for submount in $(awk </proc/self/mounts "\$2 ~ /$mountpoint_match/ \
|
||||
{ print \$2 }" | LC_ALL=C sort -r); do
|
||||
umount $submount
|
||||
done
|
||||
recover_resolvconf "${mountpoint}"
|
||||
}
|
||||
|
||||
mount_partition() {
|
||||
partition="$1"
|
||||
mountpoint="$2"
|
||||
|
||||
mount "$partition" "$mountpoint"
|
||||
setup_mountpoint "$mountpoint"
|
||||
}
|
||||
|
||||
|
||||
mount_disk_image() {
|
||||
local disk_image=${1}
|
||||
local mountpoint=${2}
|
||||
mount_image ${disk_image} 1
|
||||
mount_partition "${rootfs_dev_mapper}" $mountpoint
|
||||
|
||||
local uefi_dev="/dev/mapper${loop_device///dev/}p15"
|
||||
if [ -b ${uefi_dev} -a -e $mountpoint/boot/efi ]; then
|
||||
mount "${uefi_dev}" $mountpoint/boot/efi
|
||||
fi
|
||||
|
||||
# This is needed to allow for certain operations
|
||||
# such as updating grub and installing software
|
||||
cat > $mountpoint/usr/sbin/policy-rc.d << EOF
|
||||
#!/bin/sh
|
||||
# ${IMAGE_STR}
|
||||
echo "All runlevel operations denied by policy" >&2
|
||||
exit 101
|
||||
EOF
|
||||
chmod 0755 $mountpoint/usr/sbin/policy-rc.d
|
||||
|
||||
}
|
||||
|
||||
umount_partition() {
|
||||
local mountpoint=${1}
|
||||
teardown_mountpoint $mountpoint
|
||||
umount -R $mountpoint
|
||||
udevadm settle
|
||||
|
||||
if [ -n "${rootfs_dev_mapper}" -a -b "${rootfs_dev_mapper}" ]; then
|
||||
# buildd's don't have /etc/mtab symlinked
|
||||
# /etc/mtab is needed in order zerofree space for ext4 filesystems
|
||||
[ -e /etc/mtab ] || ln -s /proc/mounts /etc/mtab
|
||||
|
||||
# both of these are likely overkill, but it does result in slightly
|
||||
# smaller ext4 filesystem
|
||||
e2fsck -y -E discard ${rootfs_dev_mapper}
|
||||
zerofree ${rootfs_dev_mapper}
|
||||
fi
|
||||
}
|
||||
|
||||
umount_disk_image() {
|
||||
mountpoint="$1"
|
||||
|
||||
local uefi_dev="/dev/mapper${loop_device///dev/}p15"
|
||||
if [ -e "$mountpoint/boot/efi" -a -b "$uefi_dev" ]; then
|
||||
# zero fill free space in UEFI partition
|
||||
cat < /dev/zero > "$mountpoint/boot/efi/bloat_file" 2> /dev/null || true
|
||||
rm "$mountpoint/boot/efi/bloat_file"
|
||||
umount --detach-loop "$mountpoint/boot/efi"
|
||||
fi
|
||||
|
||||
if [ -e $mountpoint/usr/sbin/policy-rc.d ]; then
|
||||
rm $mountpoint/usr/sbin/policy-rc.d
|
||||
fi
|
||||
umount_partition $mountpoint
|
||||
clean_loops
|
||||
}
|
||||
|
||||
modify_vmdk_header() {
|
||||
# Modify the VMDK headers so that both VirtualBox _and_ VMware can
|
||||
# read the vmdk and import them. The vodoo here is _not_ documented
|
||||
# anywhere....so this will have to do. This is undocumented vodoo
|
||||
# that has been learned by the Cloud Image team.
|
||||
|
||||
vmdk_name="${1}"
|
||||
descriptor=$(mktemp)
|
||||
newdescriptor=$(mktemp)
|
||||
|
||||
# Extract the vmdk header for manipulation
|
||||
dd if="${vmdk_name}" of="${descriptor}" bs=1 skip=512 count=1024
|
||||
|
||||
# The sed lines below is where the magic is. Specifically:
|
||||
# ddb.toolsVersion: sets the open-vm-tools so that VMware shows
|
||||
# the tooling as current
|
||||
# ddb.virtualHWVersion: set the version to 7, which covers most
|
||||
# current versions of VMware
|
||||
# createType: make sure its set to stream Optimized
|
||||
# remove the vmdk-stream-converter comment and replace with
|
||||
# # Disk DescriptorFile. This is needed for Virtualbox
|
||||
# remove the comments from vmdk-stream-converter which causes
|
||||
# VirtualBox and others to fail VMDK validation
|
||||
|
||||
sed -e 's|# Description file.*|# Disk DescriptorFile|' \
|
||||
-e '/# Believe this is random*/d' \
|
||||
-e '/# Indicates no parent/d' \
|
||||
-e '/# The Disk Data Base/d' \
|
||||
-e 's|ddb.comment.*|ddb.toolsVersion = "2147483647"|' \
|
||||
"${descriptor}" > "${newdescriptor}"
|
||||
|
||||
# The header is cannot be bigger than 1024
|
||||
expr $(stat --format=%s ${newdescriptor}) \< 1024 > /dev/null 2>&1 || {
|
||||
echo "descriptor is too large, VMDK will be invalid!"; exit 1; }
|
||||
|
||||
# Overwrite the vmdk header with our new, modified one
|
||||
dd conv=notrunc,nocreat \
|
||||
if="${newdescriptor}" of="${vmdk_name}" \
|
||||
bs=1 seek=512 count=1024
|
||||
|
||||
rm ${descriptor} ${newdescriptor}
|
||||
}
|
||||
|
||||
create_vmdk() {
|
||||
# There is no real good way to create a _compressed_ VMDK using open source
|
||||
# tooling that works across multiple VMDK-capable platforms. This functions
|
||||
# uses vmdk-stream-converter and then calls modify_vmdk_header to produce a
|
||||
# compatible VMDK.
|
||||
|
||||
src="$1"
|
||||
destination="$2"
|
||||
size="${3:-10240}"
|
||||
|
||||
streamconverter="/usr/share/pyshared/VMDKstream.py"
|
||||
scratch_d=$(mktemp -d)
|
||||
cp ${src} ${scratch_d}/resize.img
|
||||
|
||||
truncate --size=${size}M ${scratch_d}/resize.img
|
||||
python ${streamconverter} ${scratch_d}/resize.img ${destination}
|
||||
modify_vmdk_header ${destination}
|
||||
|
||||
qemu-img info ${destination}
|
||||
rm -rf ${scratch_d}
|
||||
}
|
||||
|
||||
create_derivative() {
|
||||
# arg1 is the disk type
|
||||
# arg2 is the new name
|
||||
unset derivative_img
|
||||
case ${1} in
|
||||
uefi) disk_image="binary/boot/disk-uefi.ext4";
|
||||
dname="${disk_image//-uefi/-$2-uefi}";;
|
||||
*) disk_image="binary/boot/disk.ext4";
|
||||
dname="${disk_image//.ext4/-$2.ext4}";;
|
||||
esac
|
||||
|
||||
if [ ! -e ${disk_image} ]; then
|
||||
echo "Did not find ${disk_image}!"; exit 1;
|
||||
fi
|
||||
|
||||
cp ${disk_image} ${dname}
|
||||
export derivative_img=${dname}
|
||||
}
|
||||
|
||||
convert_to_qcow2() {
|
||||
|
||||
src="$1"
|
||||
destination="$2"
|
||||
qemu-img convert -c -O qcow2 -o compat=0.10 "$src" "$destination"
|
||||
qemu-img info "$destination"
|
||||
}
|
||||
|
||||
replace_grub_root_with_label() {
|
||||
# When update-grub is run, it will detect the disks in the build system.
|
||||
# Instead, we want grub to use the right labelled disk
|
||||
CHROOT_ROOT="$1"
|
||||
|
||||
# If boot by partuuid has been requested, don't override.
|
||||
if [ -f $CHROOT_ROOT/etc/default/grub.d/40-force-partuuid.cfg ] && \
|
||||
grep -q ^GRUB_FORCE_PARTUUID= $CHROOT_ROOT/etc/default/grub.d/40-force-partuuid.cfg
|
||||
then
|
||||
return 0
|
||||
fi
|
||||
sed -i -e "s,root=[^ ]*,root=LABEL=${fs_label}," \
|
||||
"$CHROOT_ROOT/boot/grub/grub.cfg"
|
||||
}
|
||||
|
||||
|
||||
# When running update-grub in a chroot on a build host, we don't want it to
|
||||
# probe for disks or probe for other installed OSes. Extract common
|
||||
# diversion wrappers, so this isn't reinvented differently for each image.
|
||||
divert_grub() {
|
||||
CHROOT_ROOT="$1"
|
||||
|
||||
chroot "$CHROOT_ROOT" dpkg-divert --local \
|
||||
--rename /usr/sbin/grub-probe
|
||||
chroot "$CHROOT_ROOT" touch /usr/sbin/grub-probe
|
||||
chroot "$CHROOT_ROOT" chmod +x /usr/sbin/grub-probe
|
||||
|
||||
chroot "$CHROOT_ROOT" dpkg-divert --local \
|
||||
--divert /etc/grub.d/30_os-prober.dpkg-divert \
|
||||
--rename /etc/grub.d/30_os-prober
|
||||
|
||||
# Divert systemd-detect-virt; /etc/kernel/postinst.d/zz-update-grub
|
||||
# no-ops if we are in a container, and the launchpad farm runs builds
|
||||
# in lxd. We therefore pretend that we're never in a container (by
|
||||
# exiting 1).
|
||||
chroot "$CHROOT_ROOT" dpkg-divert --local \
|
||||
--rename /usr/bin/systemd-detect-virt
|
||||
echo "exit 1" > "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
|
||||
chmod +x "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
|
||||
}
|
||||
|
||||
undivert_grub() {
|
||||
CHROOT_ROOT="$1"
|
||||
|
||||
chroot "$CHROOT_ROOT" rm /usr/sbin/grub-probe
|
||||
chroot "$CHROOT_ROOT" dpkg-divert --remove --local \
|
||||
--rename /usr/sbin/grub-probe
|
||||
|
||||
chroot "$CHROOT_ROOT" dpkg-divert --remove --local \
|
||||
--divert /etc/grub.d/30_os-prober.dpkg-divert \
|
||||
--rename /etc/grub.d/30_os-prober
|
||||
|
||||
rm "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
|
||||
chroot "$CHROOT_ROOT" dpkg-divert --remove --local \
|
||||
--rename /usr/bin/systemd-detect-virt
|
||||
}
|
||||
|
||||
release_ver() {
|
||||
# Return the release version number
|
||||
distro-info --series="$LB_DISTRIBUTION" -r | awk '{ print $1 }'
|
||||
}
|
||||
|
||||
_snap_preseed() {
|
||||
# Download the snap/assertion and add to the preseed
|
||||
local CHROOT_ROOT=$1
|
||||
local SNAP=$2
|
||||
local SNAP_NAME=${SNAP%/*}
|
||||
local CHANNEL=${3:?Snap channel must be specified}
|
||||
|
||||
local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
|
||||
local snaps_dir="$seed_dir/snaps"
|
||||
local seed_yaml="$seed_dir/seed.yaml"
|
||||
local assertions_dir="$seed_dir/assertions"
|
||||
|
||||
# Download the snap & assertion
|
||||
local snap_download_failed=0
|
||||
chroot $CHROOT_ROOT sh -c "
|
||||
set -x;
|
||||
cd /var/lib/snapd/seed;
|
||||
SNAPPY_STORE_NO_CDN=1 snap download \
|
||||
--channel=$CHANNEL \"$SNAP_NAME\"" || snap_download_failed=1
|
||||
if [ $snap_download_failed = 1 ] ; then
|
||||
echo "If the channel ($CHANNEL) includes '*/ubuntu-##.##' track per "
|
||||
echo "Ubuntu policy (ex. stable/ubuntu-18.04) the publisher will need "
|
||||
echo "to temporarily create the channel/track to allow fallback during"
|
||||
echo "download (ex. stable/ubuntu-18.04 falls back to stable if the"
|
||||
echo "prior had been created in the past)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv -v $seed_dir/*.assert $assertions_dir
|
||||
mv -v $seed_dir/*.snap $snaps_dir
|
||||
|
||||
# Add the snap to the seed.yaml
|
||||
! [ -e $seed_yaml ] && echo "snaps:" > $seed_yaml
|
||||
cat <<EOF >> $seed_yaml
|
||||
-
|
||||
name: ${SNAP_NAME}
|
||||
channel: ${CHANNEL}
|
||||
EOF
|
||||
|
||||
case ${SNAP} in */classic) echo " classic: true" >> $seed_yaml;; esac
|
||||
|
||||
echo -n " file: " >> $seed_yaml
|
||||
(cd $snaps_dir; ls -1 ${SNAP_NAME}_*.snap) >> $seed_yaml
|
||||
}
|
||||
|
||||
snap_prepare_assertions() {
|
||||
# Configure basic snapd assertions
|
||||
local CHROOT_ROOT=$1
|
||||
# A colon-separated string of brand:model to be used for the image's model
|
||||
# assertion
|
||||
local CUSTOM_BRAND_MODEL=$2
|
||||
|
||||
local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
|
||||
local snaps_dir="$seed_dir/snaps"
|
||||
local assertions_dir="$seed_dir/assertions"
|
||||
local model_assertion="$assertions_dir/model"
|
||||
local account_key_assertion="$assertions_dir/account-key"
|
||||
local account_assertion="$assertions_dir/account"
|
||||
|
||||
mkdir -p "$assertions_dir"
|
||||
mkdir -p "$snaps_dir"
|
||||
|
||||
local brand="$(echo $CUSTOM_BRAND_MODEL | cut -d: -f 1)"
|
||||
local model="$(echo $CUSTOM_BRAND_MODEL | cut -d: -f 2)"
|
||||
|
||||
if ! [ -e "$model_assertion" ] ; then
|
||||
snap known --remote model series=16 \
|
||||
model=$model brand-id=$brand \
|
||||
> "$model_assertion"
|
||||
fi
|
||||
|
||||
if ! [ -e "$account_key_assertion" ] ; then
|
||||
local account_key=$(sed -n -e's/sign-key-sha3-384: //p' \
|
||||
< "$model_assertion")
|
||||
snap known --remote account-key \
|
||||
public-key-sha3-384="$account_key" \
|
||||
> "$account_key_assertion"
|
||||
fi
|
||||
|
||||
|
||||
if ! [ -e "$account_assertion" ] ; then
|
||||
local account=$(sed -n -e's/account-id: //p' < "$account_key_assertion")
|
||||
snap known --remote account account-id=$account \
|
||||
> "$account_assertion"
|
||||
fi
|
||||
}
|
||||
|
||||
snap_prepare() {
|
||||
# Configure basic snapd assertions and pre-seeds the 'core' snap
|
||||
local CHROOT_ROOT=$1
|
||||
# Optional. If set, should be a colon-separated string of brand:model to be
|
||||
# used for the image's model assertion
|
||||
local CUSTOM_BRAND_MODEL=${2:-generic:generic-classic}
|
||||
|
||||
local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
|
||||
local snaps_dir="$seed_dir/snaps"
|
||||
|
||||
snap_prepare_assertions "$CHROOT_ROOT" "$CUSTOM_BRAND_MODEL"
|
||||
|
||||
# Download the core snap
|
||||
if ! [ -f $snaps_dir/core_[0-9]*.snap ] ; then
|
||||
_snap_preseed $CHROOT_ROOT core stable
|
||||
fi
|
||||
}
|
||||
|
||||
snap_preseed() {
|
||||
# Preseed a snap in the image
|
||||
local CHROOT_ROOT=$1
|
||||
local SNAP=$2
|
||||
# Per Ubuntu policy, all seeded snaps (with the exception of the core
|
||||
# snap) must pull from stable/ubuntu-$(release_ver) as their channel.
|
||||
local CHANNEL=${3:-"stable/ubuntu-$(release_ver)"}
|
||||
|
||||
snap_prepare $CHROOT_ROOT
|
||||
_snap_preseed $CHROOT_ROOT $SNAP $CHANNEL
|
||||
}
|
49
live-build/make-lxd-metadata.py
Executable file
49
live-build/make-lxd-metadata.py
Executable file
@ -0,0 +1,49 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
"""Make a metadata.yaml file for a LXD image."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
# Map dpkg architecture names to LXD architecture names.
|
||||
lxd_arches = {
|
||||
"amd64": "x86_64",
|
||||
"arm64": "aarch64",
|
||||
"armhf": "armv7l",
|
||||
"i386": "i686",
|
||||
"powerpc": "ppc",
|
||||
"ppc64el": "ppc64le",
|
||||
"s390x": "s390x",
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("series", help="Ubuntu series name")
|
||||
parser.add_argument("architecture", help="Ubuntu architecture name")
|
||||
args = parser.parse_args()
|
||||
|
||||
metadata = {
|
||||
"architecture": lxd_arches[args.architecture],
|
||||
"creation_date": int(time.time()),
|
||||
"properties": {
|
||||
"os": "Ubuntu",
|
||||
"series": args.series,
|
||||
"architecture": args.architecture,
|
||||
"description": "Ubuntu buildd %s %s" % (
|
||||
args.series, args.architecture),
|
||||
},
|
||||
}
|
||||
|
||||
# Encoding this as JSON is good enough, and saves pulling in a YAML
|
||||
# library dependency.
|
||||
json.dump(
|
||||
metadata, sys.stdout, sort_keys=True, indent=4, separators=(",", ": "),
|
||||
ensure_ascii=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
68
live-build/snap-seed-parse.py
Executable file
68
live-build/snap-seed-parse.py
Executable file
@ -0,0 +1,68 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Usage: snap-seed-parse [${chroot_dir}] <output file>
|
||||
|
||||
This script looks for a seed.yaml path in the given root directory, parsing
|
||||
it and appending the parsed lines to the given output file.
|
||||
|
||||
The $chroot_dir argument is optional and will default to the empty string.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import re
|
||||
import yaml
|
||||
|
||||
|
||||
def log(msg):
|
||||
print("snap-seed-parse: {}".format(msg))
|
||||
|
||||
|
||||
log("Parsing seed.yaml")
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('chroot', nargs='?', default='',
|
||||
help='root dir for the chroot from which to generate the '
|
||||
'manifest')
|
||||
parser.add_argument('file', help='Output manifest to this file')
|
||||
|
||||
ARGS = parser.parse_args()
|
||||
CHROOT_ROOT = ARGS.chroot
|
||||
FNAME = ARGS.file
|
||||
|
||||
# Trim any trailing slashes for correct appending
|
||||
log("CHROOT_ROOT: {}".format(CHROOT_ROOT))
|
||||
if len(CHROOT_ROOT) > 0 and CHROOT_ROOT[-1] == '/':
|
||||
CHROOT_ROOT = CHROOT_ROOT[:-1]
|
||||
|
||||
# This is where we expect to find the seed.yaml file
|
||||
YAML_PATH = CHROOT_ROOT + '/var/lib/snapd/seed/seed.yaml'
|
||||
|
||||
# Snaps are prepended with this string in the manifest
|
||||
LINE_PREFIX = 'snap:'
|
||||
|
||||
log("yaml path: {}".format(YAML_PATH))
|
||||
if not os.path.isfile(YAML_PATH):
|
||||
log("WARNING: yaml path not found; no seeded snaps found.")
|
||||
exit(0)
|
||||
else:
|
||||
log("yaml path found.")
|
||||
|
||||
with open(YAML_PATH, 'r') as fh:
|
||||
yaml_lines = yaml.safe_load(fh)['snaps']
|
||||
|
||||
log('Writing manifest to {}'.format(FNAME))
|
||||
|
||||
with open(FNAME, 'a+') as fh:
|
||||
for item in yaml_lines:
|
||||
filestring = item['file']
|
||||
# Pull the revision number off the file name
|
||||
revision = filestring[filestring.rindex('_')+1:]
|
||||
revision = re.sub(r'[^0-9]', '', revision)
|
||||
fh.write("{}{}\t{}\t{}\n".format(LINE_PREFIX,
|
||||
item['name'],
|
||||
item['channel'],
|
||||
revision,
|
||||
))
|
||||
log('Manifest output finished.')
|
68
live-build/ubuntu-cpc/README.cpc.md
Normal file
68
live-build/ubuntu-cpc/README.cpc.md
Normal file
@ -0,0 +1,68 @@
|
||||
# TL;DR
|
||||
|
||||
In order to generate the hooks for a specific image target set, call the
|
||||
`make-hooks` script, located in `hooks.d` as
|
||||
|
||||
./make-hooks --hooks-dir ../hooks <image_set>
|
||||
|
||||
where `image_set` is the name of a series file (e.g. "vagrant") without leading
|
||||
path components. Do *not* check in the `hooks` folder, it is automatically
|
||||
generated by `auto/config` during Live Build runs.
|
||||
|
||||
|
||||
# Hook placement and ordering
|
||||
|
||||
Scripts live in subfolders below the `hooks.d` folder. Currently the folders
|
||||
`chroot` and `base` exist. The folder with the name `extra` is reserved for
|
||||
private scripts, which are not included in the source of livecd-rootfs. The
|
||||
scripts are not numbered, instead the order of their execution depends on the
|
||||
order in which they are listed in a *series* file.
|
||||
|
||||
Series files are placed in subfolders `hooks.d/base/series` or
|
||||
`hooks.d/extra/series`. Each series file contains a list of scripts to be
|
||||
executed. Empty lines and lines starting with a `#` are ignored.
|
||||
|
||||
Series files in `extra/series` override files in `base/series` with the same
|
||||
name. For example, if a series file `base/series/cloudA` exists and a series
|
||||
file `extra/series/cloudA`, then the latter will be preferred.
|
||||
|
||||
A series file in `extra/series` may also list scripts that are located in the
|
||||
`chroot` and `base` folders. In addition, series files can *depend* on other
|
||||
series files. For example, the series files for most custom images look similar
|
||||
to this:
|
||||
|
||||
depends disk-image
|
||||
depends extra-settings
|
||||
extra/cloudB.binary
|
||||
|
||||
Where `disk-image` and `extra-settings` may list scripts and dependencies which
|
||||
are to be processed before the script `extra/cloudB.binary` is called.
|
||||
|
||||
ACHTUNG: live build runs scripts with the suffix ".chroot" in a batch separate
|
||||
from scripts ending in ".binary". Even if you arrange them interleaved in your
|
||||
series files, the chroot scripts will be run before the binary scripts.
|
||||
|
||||
# Image set selection for Live Build
|
||||
|
||||
During a Live Build, enumerated symbolic links are generated based on the
|
||||
contents of one or more series files. The series files are selected according
|
||||
to the contents of the `IMAGE_TARGETS` environment variable. For example, in
|
||||
order to trigger the build of `squashfs` and `vagrant`, list them in the
|
||||
`IMAGE_TARGETS` variable as `squashfs,vagrant`. The separator can be a comma,
|
||||
a semi-colon or whitespace.
|
||||
|
||||
The generation of the symbolic links is triggered from the `auto/config` script,
|
||||
from where the contents of the `IMAGE_TARGETS` environment variable are passed
|
||||
on to the `make-hooks` script.
|
||||
|
||||
|
||||
# Symlink generation
|
||||
|
||||
Since Live Build itself does not know about series files, a traditional `hooks`
|
||||
folder is generated using the `make-hooks` script. The script takes as arguments
|
||||
the names of the series files to be processed.
|
||||
|
||||
The script parses the series files and generates enumerated symbolic links for
|
||||
all entries. Per default, these are placed into a directory named `hooks` next
|
||||
to the `hooks.d` directory. This can be changed using the `--hooks-dir`
|
||||
parameter.
|
@ -1,252 +0,0 @@
|
||||
# vi: ts=4 expandtab syntax=sh
|
||||
|
||||
CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
IMAGE_SIZE=$((2252*1024**2)) # 2.2G (the current size we ship)
|
||||
|
||||
rootfs_dev_mapper=
|
||||
loop_device=
|
||||
loop_raw=
|
||||
backing_img=
|
||||
|
||||
apt-get -qqy install dosfstools gdisk
|
||||
|
||||
clean_loops() {
|
||||
|
||||
if [ -n "${backing_img}" ]; then
|
||||
kpartx -v -d "${backing_img}"
|
||||
unset backing_img
|
||||
fi
|
||||
|
||||
if [ -z "${rootfs_dev_mapper}" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
unset loop_device
|
||||
unset loop_raw
|
||||
unset rootfs_dev_mapper
|
||||
}
|
||||
|
||||
create_empty_disk_image() {
|
||||
# Prepare an empty disk image
|
||||
dd if=/dev/zero of="$1" bs=1 count=0 seek="${IMAGE_SIZE}"
|
||||
}
|
||||
|
||||
make_ext4_partition() {
|
||||
device="$1"
|
||||
|
||||
mkfs.ext4 -F -b 4096 -i 8192 -m 0 -L cloudimg-rootfs -E resize=536870912 "$device"
|
||||
}
|
||||
|
||||
mount_image() {
|
||||
apt-get install -qqy kpartx
|
||||
trap clean_loops EXIT
|
||||
backing_img="$1"
|
||||
local rootpart="$2"
|
||||
kpartx_mapping="$(kpartx -s -v -a ${backing_img})"
|
||||
|
||||
# Find the loop device
|
||||
loop_p1="$(echo -e ${kpartx_mapping} | head -n1 | awk '{print$3}')"
|
||||
loop_device="/dev/loop$(echo ${loop_p1} | cut -b5)"
|
||||
if [ ! -b ${loop_device} ]; then
|
||||
echo "unable to find loop device for ${backing_img}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find the rootfs location
|
||||
rootfs_dev_mapper="/dev/mapper/${loop_p1%%[0-9]}${rootpart}"
|
||||
if [ ! -b "${rootfs_dev_mapper}" ]; then
|
||||
echo "${rootfs_dev_mapper} is not a block device";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add some information to the debug logs
|
||||
echo "Mounted disk image ${backing_img} to ${rootfs_dev_mapper}"
|
||||
blkid ${rootfs_dev_mapper}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
setup_mountpoint() {
|
||||
local mountpoint="$1"
|
||||
|
||||
mount --bind /dev "$mountpoint/dev"
|
||||
mount devpts-live -t proc "$mountpoint/dev/pts"
|
||||
mount proc-live -t proc "$mountpoint/proc"
|
||||
mount sysfs-live -t sysfs "$mountpoint/sys"
|
||||
mount -t tmpfs none "$mountpoint/tmp"
|
||||
mv "$mountpoint/etc/resolv.conf" resolv.conf.tmp
|
||||
cp /etc/resolv.conf "$mountpoint/etc/resolv.conf"
|
||||
|
||||
}
|
||||
|
||||
mount_partition() {
|
||||
partition="$1"
|
||||
mountpoint="$2"
|
||||
|
||||
mount "$partition" "$mountpoint"
|
||||
setup_mountpoint "$mountpoint"
|
||||
}
|
||||
|
||||
|
||||
mount_disk_image() {
|
||||
local disk_image=${1}
|
||||
local mountpoint=${2}
|
||||
mount_image ${disk_image} 1
|
||||
mount_partition "${rootfs_dev_mapper}" $mountpoint
|
||||
|
||||
local uefi_dev="/dev/mapper${loop_device///dev/}p15"
|
||||
if [ -b ${uefi_dev} -a -e $mountpoint/boot/efi ]; then
|
||||
mount "${uefi_dev}" $mountpoint/boot/efi
|
||||
fi
|
||||
|
||||
# This is needed to allow for certain operations
|
||||
# such as updating grub and installing software
|
||||
cat > $mountpoint/usr/sbin/policy-rc.d << EOF
|
||||
#!/bin/sh
|
||||
# ${CLOUD_IMG_STR}
|
||||
echo "All runlevel operations denied by policy" >&2
|
||||
exit 101
|
||||
EOF
|
||||
chmod 0755 $mountpoint/usr/sbin/policy-rc.d
|
||||
|
||||
}
|
||||
|
||||
umount_settle() {
|
||||
# Unmount device, and let it settle
|
||||
umount $1
|
||||
udevadm settle
|
||||
sleep 3
|
||||
}
|
||||
|
||||
umount_partition() {
|
||||
local mountpoint=${1}
|
||||
mv resolv.conf.tmp "$mountpoint/etc/resolv.conf"
|
||||
for submnt in proc sys dev/pts dev tmp;
|
||||
do
|
||||
umount_settle $mountpoint/$submnt
|
||||
done
|
||||
umount_settle $mountpoint
|
||||
|
||||
if [ -n "${rootfs_dev_mapper}" -a -b "${rootfs_dev_mapper}" ]; then
|
||||
# buildd's don't have /etc/mtab symlinked
|
||||
# /etc/mtab is needed in order zerofree space for ext4 filesystems
|
||||
[ -e /etc/mtab ] || ln -s /proc/mounts /etc/mtab
|
||||
|
||||
# both of these are likely overkill, but it does result in slightly
|
||||
# smaller ext4 filesystem
|
||||
apt-get -qqy install zerofree
|
||||
e2fsck -y -E discard ${rootfs_dev_mapper}
|
||||
zerofree ${rootfs_dev_mapper}
|
||||
fi
|
||||
}
|
||||
|
||||
umount_disk_image() {
|
||||
mountpoint="$1"
|
||||
|
||||
local uefi_dev="/dev/mapper${loop_device///dev/}p15"
|
||||
if [ -e "$mountpoint/boot/efi" -a -b "$uefi_dev" ]; then
|
||||
umount --detach-loop "$mountpoint/boot/efi"
|
||||
fi
|
||||
|
||||
if [ -e $mountpoint/usr/sbin/policy-rc.d ]; then
|
||||
rm $mountpoint/usr/sbin/policy-rc.d
|
||||
fi
|
||||
umount_partition $mountpoint
|
||||
clean_loops
|
||||
}
|
||||
|
||||
modify_vmdk_header() {
|
||||
# Modify the VMDK headers so that both VirtualBox _and_ VMware can
|
||||
# read the vmdk and import them. The vodoo here is _not_ documented
|
||||
# anywhere....so this will have to do. This is undocumented vodoo
|
||||
# that has been learned by the Cloud Image team.
|
||||
|
||||
vmdk_name="${1}"
|
||||
descriptor=$(mktemp)
|
||||
newdescriptor=$(mktemp)
|
||||
|
||||
# Extract the vmdk header for manipulation
|
||||
dd if="${vmdk_name}" of="${descriptor}" bs=1 skip=512 count=1024
|
||||
|
||||
# The sed lines below is where the magic is. Specifically:
|
||||
# ddb.toolsVersion: sets the open-vm-tools so that VMware shows
|
||||
# the tooling as current
|
||||
# ddb.virtualHWVersion: set the version to 7, which covers most
|
||||
# current versions of VMware
|
||||
# createType: make sure its set to stream Optimized
|
||||
# remove the vmdk-stream-converter comment and replace with
|
||||
# # Disk DescriptorFile. This is needed for Virtualbox
|
||||
# remove the comments from vmdk-stream-converter which causes
|
||||
# VirtualBox and others to fail VMDK validation
|
||||
|
||||
sed -e 's|# Description file.*|# Disk DescriptorFile|' \
|
||||
-e '/# Believe this is random*/d' \
|
||||
-e '/# Indicates no parent/d' \
|
||||
-e '/# The Disk Data Base/d' \
|
||||
-e 's|ddb.comment.*|ddb.toolsVersion = "2147483647"|' \
|
||||
"${descriptor}" > "${newdescriptor}"
|
||||
|
||||
# The header is cannot be bigger than 1024
|
||||
expr $(stat --format=%s ${newdescriptor}) \< 1024 > /dev/null 2>&1 || {
|
||||
echo "descriptor is too large, VMDK will be invalid!"; exit 1; }
|
||||
|
||||
# Overwrite the vmdk header with our new, modified one
|
||||
dd conv=notrunc,nocreat \
|
||||
if="${newdescriptor}" of="${vmdk_name}" \
|
||||
bs=1 seek=512 count=1024
|
||||
|
||||
rm ${descriptor} ${newdescriptor}
|
||||
}
|
||||
|
||||
create_vmdk() {
|
||||
# There is no real good way to create a _compressed_ VMDK using open source
|
||||
# tooling that works across multiple VMDK-capable platforms. This functions
|
||||
# uses vmdk-stream-converter and then calls modify_vmdk_header to produce a
|
||||
# compatible VMDK.
|
||||
|
||||
src="$1"
|
||||
destination="$2"
|
||||
size="${3:-10240}"
|
||||
|
||||
apt-get install -qqy qemu-utils vmdk-stream-converter
|
||||
streamconverter="/usr/share/pyshared/VMDKstream.py"
|
||||
scratch_d=$(mktemp -d)
|
||||
cp ${src} ${scratch_d}/resize.img
|
||||
|
||||
truncate --size=${size}M ${scratch_d}/resize.img
|
||||
python ${streamconverter} ${scratch_d}/resize.img ${destination}
|
||||
modify_vmdk_header ${destination}
|
||||
|
||||
qemu-img info ${destination}
|
||||
rm -rf ${scratch_d}
|
||||
}
|
||||
|
||||
create_derivative() {
|
||||
# arg1 is the disk type
|
||||
# arg2 is the new name
|
||||
unset derivative_img
|
||||
case ${1} in
|
||||
uefi) disk_image="binary/boot/disk-uefi.ext4";
|
||||
dname="${disk_image//-uefi/-$2-uefi}";;
|
||||
*) disk_image="binary/boot/disk.ext4";
|
||||
dname="${disk_image//.ext4/-$2.ext4}";;
|
||||
esac
|
||||
|
||||
if [ ! -e ${disk_image} ]; then
|
||||
echo "Did not find ${disk_image}!"; exit 1;
|
||||
fi
|
||||
|
||||
cp ${disk_image} ${dname}
|
||||
export derivative_img=${dname}
|
||||
}
|
||||
|
||||
convert_to_qcow2() {
|
||||
apt-get install -qqy qemu-utils
|
||||
|
||||
src="$1"
|
||||
destination="$2"
|
||||
qemu-img convert -c -O qcow2 -o compat=0.10 "$src" "$destination"
|
||||
qemu-img info "$destination"
|
||||
}
|
||||
|
||||
|
@ -7,11 +7,15 @@ case $ARCH in
|
||||
;;
|
||||
esac
|
||||
|
||||
. /build/config/functions
|
||||
IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
FS_LABEL="cloudimg-rootfs"
|
||||
|
||||
. config/binary
|
||||
|
||||
. config/functions
|
||||
|
||||
create_partitions() {
|
||||
disk_image="$1"
|
||||
apt-get install -qqy gdisk
|
||||
sgdisk "${disk_image}" \
|
||||
--zap-all
|
||||
sgdisk "${disk_image}" \
|
||||
@ -26,13 +30,14 @@ install_grub() {
|
||||
mkdir mountpoint
|
||||
mount_partition "${rootfs_dev_mapper}" mountpoint
|
||||
|
||||
chroot mountpoint apt-get -qqy update
|
||||
chroot mountpoint apt-get -qqy install grub2
|
||||
chroot mountpoint apt-get -qqy remove --purge grub-legacy-ec2
|
||||
|
||||
# set the kernel commandline to use hvc0
|
||||
mkdir -p mountpoint/etc/default/grub.d
|
||||
cat << EOF > mountpoint/etc/default/grub.d/50-cloudimg-settings.cfg
|
||||
${CLOUD_IMG_STR}
|
||||
${IMAGE_STR}
|
||||
|
||||
# Set the recordfail timeout
|
||||
GRUB_RECORDFAIL_TIMEOUT=0
|
||||
@ -49,10 +54,10 @@ EOF
|
||||
--boot-directory=/boot \
|
||||
--target=powerpc-ieee1275
|
||||
|
||||
chroot mountpoint dpkg-divert --local --rename /etc/grub.d/30_os-prober
|
||||
divert_grub mountpoint
|
||||
chroot mountpoint update-grub
|
||||
sed -i "s,root=.* ,root=LABEL=cloudimg-rootfs ,g" mountpoint/boot/grub/grub.cfg
|
||||
chroot mountpoint dpkg-divert --remove --local --rename /etc/grub.d/30_os-prober
|
||||
replace_grub_root_with_label mountpoint
|
||||
undivert_grub mountpoint
|
||||
|
||||
umount_partition mountpoint
|
||||
rmdir mountpoint
|
@ -9,9 +9,12 @@ case $ARCH in
|
||||
;;
|
||||
esac
|
||||
|
||||
. /build/config/functions
|
||||
IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
FS_LABEL="cloudimg-rootfs"
|
||||
|
||||
apt-get -qqy install dosfstools gdisk
|
||||
. config/binary
|
||||
|
||||
. config/functions
|
||||
|
||||
create_partitions() {
|
||||
disk_image="$1"
|
||||
@ -61,27 +64,46 @@ install_grub() {
|
||||
efi_boot_dir="/boot/efi/EFI/BOOT"
|
||||
chroot mountpoint mkdir -p "${efi_boot_dir}"
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ] && [ -n "$partuuid" ]; then
|
||||
# FIXME: code duplicated between disk-image.binary
|
||||
# and disk-image-uefi.binary. We want to fix this to not
|
||||
# have initramfs-tools installed at all on these images.
|
||||
echo "partuuid found for root device; omitting initrd"
|
||||
echo "GRUB_FORCE_PARTUUID=$partuuid" >> mountpoint/etc/default/grub.d/40-force-partuuid.cfg
|
||||
fi
|
||||
|
||||
chroot mountpoint apt-get -y update
|
||||
|
||||
# The modules below only make sense on non-Secure Boot UEFI systems.
|
||||
# Otherwise, with Secure Boot enabled GRUB will refuse to load them.
|
||||
# Any modules already in debian/build-efi-images do not need to be listed.
|
||||
# Furthermore, other modules such as terminal, video_* and efi_* are all
|
||||
# already available.
|
||||
case $ARCH in
|
||||
arm64)
|
||||
chroot mountpoint apt-get -qqy install --no-install-recommends grub-efi-arm64 grub-efi-arm64-bin
|
||||
grub_modules="part_gpt fat gzio ext2 normal chain boot configfile linux search_fs_uuid search_label terminal serial video video_fb efi_gop"
|
||||
grub_modules="serial"
|
||||
efi_target=arm64-efi
|
||||
;;
|
||||
amd64)
|
||||
chroot mountpoint apt-get install -qqy grub-efi-amd64-signed grub-efi-amd64 shim-signed
|
||||
grub_modules="part_gpt fat ext2 normal chain boot configfile linux multiboot search_fs_uuid search_label terminal serial video video_fb video_bochs usb usb_keyboard efi_gop efi_uga"
|
||||
chroot mountpoint cp /usr/lib/shim/shim.efi.signed "${efi_boot_dir}/shimx64.efi"
|
||||
chroot mountpoint cp /usr/lib/shim/MokManager.efi.signed "${efi_boot_dir}/MokManager.efi"
|
||||
chroot mountpoint cp /usr/lib/grub/x86_64-efi-signed/grubx64.efi.signed "${efi_boot_dir}/grubx64.efi"
|
||||
grub_modules="multiboot serial usb usb_keyboard"
|
||||
efi_target=x86_64-efi
|
||||
;;
|
||||
esac
|
||||
|
||||
cat << EOF >> mountpoint/etc/default/grub.d/50-cloudimg-settings.cfg
|
||||
${CLOUD_IMG_STR}
|
||||
${IMAGE_STR}
|
||||
# For Cloud Image compatability
|
||||
GRUB_PRELOAD_MODULES="${grub_modules}"
|
||||
GRUB_PRELOAD_MODULES="${GRUB_PRELOAD_MODULES:-$grub_modules}"
|
||||
EOF
|
||||
|
||||
# This call to populate the package manifest is added here to capture
|
||||
# grub-efi packages that otherwise would not make it into the base
|
||||
# manifest. filesystem.packages is moved into place via symlinking to
|
||||
# livecd.ubuntu-cpc.manifest by live-build/auto/build after lb_binary runs
|
||||
create_manifest "mountpoint" "binary/boot/filesystem.packages"
|
||||
|
||||
chroot mountpoint grub-install "${loop_device}" \
|
||||
--boot-directory=/boot \
|
||||
--efi-directory=/boot/efi \
|
||||
@ -93,7 +115,7 @@ EOF
|
||||
|
||||
if [ -f mountpoint/boot/efi/EFI/BOOT/grub.cfg ]; then
|
||||
sed -i "s| root| root hd0,gpt1|" mountpoint/boot/efi/EFI/BOOT/grub.cfg
|
||||
sed -i "1i${CLOUD_IMG_STR}" mountpoint/boot/efi/EFI/BOOT/grub.cfg
|
||||
sed -i "1i${IMAGE_STR}" mountpoint/boot/efi/EFI/BOOT/grub.cfg
|
||||
# For some reason the grub disk is looking for /boot/grub/grub.cfg on
|
||||
# part 15....
|
||||
chroot mountpoint mkdir -p /boot/efi/boot/grub
|
||||
@ -106,19 +128,15 @@ EOF
|
||||
chroot mountpoint grub-install --target=i386-pc "${loop_device}"
|
||||
fi
|
||||
|
||||
chroot mountpoint dpkg-divert --local --rename /etc/grub.d/30_os-prober
|
||||
divert_grub mountpoint
|
||||
chroot mountpoint update-grub
|
||||
sed -i "s,root=.* ,root=LABEL=cloudimg-rootfs ,g" mountpoint/boot/grub/grub.cfg
|
||||
chroot mountpoint dpkg-divert --remove --local --rename /etc/grub.d/30_os-prober
|
||||
replace_grub_root_with_label mountpoint
|
||||
undivert_grub mountpoint
|
||||
|
||||
chroot mountpoint apt-get -y clean
|
||||
chroot mountpoint apt-get -y update
|
||||
|
||||
rm mountpoint/tmp/device.map
|
||||
sync
|
||||
umount mountpoint/boot/efi
|
||||
sleep 5
|
||||
udevadm settle
|
||||
mount
|
||||
umount_partition mountpoint
|
||||
rmdir mountpoint
|
||||
@ -130,6 +148,8 @@ create_empty_disk_image "${disk_image}"
|
||||
create_partitions "${disk_image}"
|
||||
mount_image "${disk_image}" 1
|
||||
|
||||
partuuid=$(blkid -s PARTUUID -o value "$rootfs_dev_mapper")
|
||||
|
||||
# Copy the chroot in to the disk
|
||||
make_ext4_partition "${rootfs_dev_mapper}"
|
||||
mkdir mountpoint
|
@ -1,12 +1,19 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
. /build/config/functions
|
||||
IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
FS_LABEL="cloudimg-rootfs"
|
||||
|
||||
. config/binary
|
||||
|
||||
. config/functions
|
||||
|
||||
BOOTPART_START=
|
||||
BOOTPART_END=
|
||||
BOOT_MOUNTPOINT=
|
||||
ROOTPART_START=1
|
||||
|
||||
my_d=$(dirname $(readlink -f ${0}))
|
||||
|
||||
case $ARCH:$SUBARCH in
|
||||
ppc64el:*|powerpc:*)
|
||||
echo "POWER disk images are handled separately"
|
||||
@ -25,8 +32,6 @@ case $ARCH:$SUBARCH in
|
||||
esac
|
||||
|
||||
create_empty_partition_table() {
|
||||
apt-get install -qqy parted
|
||||
|
||||
parted "$1" --script -- mklabel msdos
|
||||
}
|
||||
|
||||
@ -64,6 +69,8 @@ create_empty_partition "${disk_image}" "$ROOTPART" "$ROOTPART_START" -1 ext2 "$R
|
||||
|
||||
mount_image "${disk_image}" "$ROOTPART"
|
||||
|
||||
partuuid=$(blkid -s PARTUUID -o value "$rootfs_dev_mapper")
|
||||
|
||||
# Copy the chroot in to the disk
|
||||
make_ext4_partition "${rootfs_dev_mapper}"
|
||||
mkdir mountpoint
|
||||
@ -88,7 +95,7 @@ case $ARCH:$SUBARCH in
|
||||
# not the best place for this, but neither flash-kernel nor
|
||||
# u-boot have provisions for installing u-boot via maintainer
|
||||
# script
|
||||
/build/config/hooks/raspi2/mkknlimg --dtok \
|
||||
${my_d}/raspi2/mkknlimg --dtok \
|
||||
mountpoint/usr/lib/u-boot/rpi_2/u-boot.bin \
|
||||
mountpoint/boot/firmware/uboot.bin
|
||||
;;
|
||||
@ -111,6 +118,15 @@ if [ "${should_install_grub}" -eq 1 ]; then
|
||||
${loop_device}
|
||||
|
||||
rm mountpoint/tmp/device.map
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ] && [ -n "$partuuid" ]; then
|
||||
echo "partuuid found for root device; forcing it in Grub"
|
||||
mkdir -p mountpoint/etc/default/grub.d
|
||||
echo "GRUB_FORCE_PARTUUID=$partuuid" >> mountpoint/etc/default/grub.d/40-force-partuuid.cfg
|
||||
divert_grub mountpoint
|
||||
chroot mountpoint update-grub
|
||||
undivert_grub mountpoint
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$ARCH" = "s390x" ]; then
|
@ -18,8 +18,9 @@
|
||||
<VirtualSystem ovf:id="@@NAME@@">
|
||||
<Info>A virtual machine</Info>
|
||||
<Name>@@NAME@@</Name>
|
||||
<OperatingSystemSection ovf:id="100" vmw:osType="other3xLinux64Guest">
|
||||
<OperatingSystemSection ovf:id="@@OVF_ID@@" vmw:osType="@@OVF_OS_TYPE@@">
|
||||
<Info>The kind of installed guest operating system</Info>
|
||||
<Description>Ubuntu Linux (@@OVF_DESC_BITS@@-bit)</Description>
|
||||
</OperatingSystemSection>
|
||||
|
||||
<ProductSection ovf:required="false">
|
@ -16,8 +16,9 @@
|
||||
<VirtualSystem ovf:id="@@NAME@@">
|
||||
<Info>A virtual machine</Info>
|
||||
<Name>@@NAME@@</Name>
|
||||
<OperatingSystemSection ovf:id="100" vmw:osType="other3xLinux64Guest">
|
||||
<OperatingSystemSection ovf:id="@@OVF_ID@@" vmw:osType="@@OVF_OS_TYPE@@">
|
||||
<Info>The kind of installed guest operating system</Info>
|
||||
<Description>Ubuntu Linux (@@OVF_DESC_BITS@@-bit)</Description>
|
||||
</OperatingSystemSection>
|
||||
|
||||
<ProductSection ovf:required="false">
|
@ -4,15 +4,13 @@ case $ARCH:$SUBARCH in
|
||||
# Not sure if any other cloud images use subarch for something that
|
||||
# should take qcow2 format, so only skipping this on raspi2 for now.
|
||||
armhf:raspi2)
|
||||
apt-get install -qqy pxz
|
||||
pxz -T4 -c binary/boot/disk.ext4 > livecd.ubuntu-cpc.disk1.img.xz
|
||||
xz -T4 -c binary/boot/disk.ext4 > livecd.ubuntu-cpc.disk1.img.xz
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
apt-get install -qqy qemu-utils
|
||||
|
||||
. /build/config/functions
|
||||
. config/functions
|
||||
|
||||
if [ -f binary/boot/disk.ext4 ]; then
|
||||
convert_to_qcow2 binary/boot/disk.ext4 livecd.ubuntu-cpc.disk1.img
|
@ -7,8 +7,6 @@ if [ -n "$SUBARCH" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
apt-get -qqy install squashfs-tools
|
||||
|
||||
squashfs_f="${PWD}/livecd.ubuntu-cpc.squashfs"
|
||||
squashfs_f_manifest="${squashfs_f}.manifest"
|
||||
|
@ -5,7 +5,7 @@ if [ -n "$SUBARCH" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
. /build/config/functions
|
||||
. config/functions
|
||||
|
||||
mkdir binary/boot/filesystem.dir
|
||||
cp -a chroot/* binary/boot/filesystem.dir
|
||||
@ -19,14 +19,9 @@ chroot binary/boot/filesystem.dir chmod +x /usr/sbin/grub-probe
|
||||
env DEBIAN_FRONTEND=noninteractive chroot binary/boot/filesystem.dir apt-get --purge remove --assume-yes '^linux-.*' 'linux-base+'
|
||||
env DEBIAN_FRONTEND=noninteractive chroot binary/boot/filesystem.dir apt-get --purge remove --assume-yes '^grub-.*'
|
||||
env DEBIAN_FRONTEND=noninteractive chroot binary/boot/filesystem.dir apt-get autoremove --purge --assume-yes
|
||||
chroot binary/boot/filesystem.dir mkdir /lib/modules
|
||||
chroot binary/boot/filesystem.dir mkdir -p /lib/modules
|
||||
|
||||
chroot binary/boot/filesystem.dir rm /usr/sbin/grub-probe
|
||||
chroot binary/boot/filesystem.dir dpkg-divert --remove --local --rename /usr/sbin/grub-probe
|
||||
|
||||
mv resolv.conf.tmp "binary/boot/filesystem.dir/etc/resolv.conf"
|
||||
umount "binary/boot/filesystem.dir/proc"
|
||||
umount "binary/boot/filesystem.dir/sys"
|
||||
umount "binary/boot/filesystem.dir/dev/pts"
|
||||
umount "binary/boot/filesystem.dir/dev"
|
||||
umount "binary/boot/filesystem.dir/tmp"
|
||||
teardown_mountpoint binary/boot/filesystem.dir
|
1
live-build/ubuntu-cpc/hooks.d/base/series/all
Symbolic link
1
live-build/ubuntu-cpc/hooks.d/base/series/all
Symbolic link
@ -0,0 +1 @@
|
||||
base
|
8
live-build/ubuntu-cpc/hooks.d/base/series/base
Normal file
8
live-build/ubuntu-cpc/hooks.d/base/series/base
Normal file
@ -0,0 +1,8 @@
|
||||
depends root-dir
|
||||
depends tarball
|
||||
depends squashfs
|
||||
depends disk-image
|
||||
depends qcow2
|
||||
depends vmdk
|
||||
depends vagrant
|
||||
depends wsl
|
12
live-build/ubuntu-cpc/hooks.d/base/series/disk-image
Normal file
12
live-build/ubuntu-cpc/hooks.d/base/series/disk-image
Normal file
@ -0,0 +1,12 @@
|
||||
base/disk-image.binary
|
||||
base/disk-image-uefi.binary
|
||||
base/disk-image-ppc64el.binary
|
||||
provides livecd.ubuntu-cpc.ext4
|
||||
provides livecd.ubuntu-cpc.initrd-generic
|
||||
provides livecd.ubuntu-cpc.initrd-generic-lpae
|
||||
provides livecd.ubuntu-cpc.initrd-powerpc64-smp
|
||||
provides livecd.ubuntu-cpc.kernel-generic
|
||||
provides livecd.ubuntu-cpc.kernel-generic-lpae
|
||||
provides livecd.ubuntu-cpc.kernel-powerpc64-smp
|
||||
provides livecd.ubuntu-cpc.kernel-kvm
|
||||
provides livecd.ubuntu-cpc.manifest
|
5
live-build/ubuntu-cpc/hooks.d/base/series/qcow2
Normal file
5
live-build/ubuntu-cpc/hooks.d/base/series/qcow2
Normal file
@ -0,0 +1,5 @@
|
||||
depends disk-image
|
||||
base/qcow2-image.binary
|
||||
provides livecd.ubuntu-cpc.disk1.img
|
||||
provides livecd.ubuntu-cpc.uefi1.img
|
||||
provides livecd.ubuntu-cpc.disk1.img.xz
|
1
live-build/ubuntu-cpc/hooks.d/base/series/root-dir
Normal file
1
live-build/ubuntu-cpc/hooks.d/base/series/root-dir
Normal file
@ -0,0 +1 @@
|
||||
base/root-tarball.binary
|
4
live-build/ubuntu-cpc/hooks.d/base/series/squashfs
Normal file
4
live-build/ubuntu-cpc/hooks.d/base/series/squashfs
Normal file
@ -0,0 +1,4 @@
|
||||
depends root-dir
|
||||
base/root-squashfs.binary
|
||||
provides livecd.ubuntu-cpc.squashfs
|
||||
provides livecd.ubuntu-cpc.squashfs.manifest
|
5
live-build/ubuntu-cpc/hooks.d/base/series/tarball
Normal file
5
live-build/ubuntu-cpc/hooks.d/base/series/tarball
Normal file
@ -0,0 +1,5 @@
|
||||
depends root-dir
|
||||
base/root-xz.binary
|
||||
provides livecd.ubuntu-cpc.rootfs.tar.gz
|
||||
provides livecd.ubuntu-cpc.rootfs.tar.xz
|
||||
provides livecd.ubuntu-cpc.rootfs.manifest
|
3
live-build/ubuntu-cpc/hooks.d/base/series/vagrant
Normal file
3
live-build/ubuntu-cpc/hooks.d/base/series/vagrant
Normal file
@ -0,0 +1,3 @@
|
||||
depends disk-image
|
||||
base/vagrant.binary
|
||||
provides livecd.ubuntu-cpc.vagrant.box
|
6
live-build/ubuntu-cpc/hooks.d/base/series/vmdk
Normal file
6
live-build/ubuntu-cpc/hooks.d/base/series/vmdk
Normal file
@ -0,0 +1,6 @@
|
||||
depends disk-image
|
||||
base/vmdk-image.binary
|
||||
base/vmdk-ova-image.binary
|
||||
provides livecd.ubuntu-cpc.disk1.vmdk
|
||||
provides livecd.ubuntu-cpc.uefi.vmdk
|
||||
provides livecd.ubuntu-cpc.ova
|
4
live-build/ubuntu-cpc/hooks.d/base/series/wsl
Normal file
4
live-build/ubuntu-cpc/hooks.d/base/series/wsl
Normal file
@ -0,0 +1,4 @@
|
||||
depends root-dir
|
||||
base/wsl-gz.binary
|
||||
provides livecd.ubuntu-cpc.wsl.rootfs.tar.gz
|
||||
provides livecd.ubuntu-cpc.wsl.rootfs.manifest
|
@ -1,43 +1,101 @@
|
||||
#!/bin/bash -eux
|
||||
#!/bin/bash -ex
|
||||
# vi: ts=4 noexpandtab
|
||||
#
|
||||
# Generate a generic Vagrant Box.
|
||||
#
|
||||
# Vagrant images are essentially nothing more than OVA's with extra-metadata.
|
||||
# Vagrant images are essentially nothing more than OVA's with extra-metadata
|
||||
# and some preinstalled packages.
|
||||
#
|
||||
# We can't use the OVA's for Vagrant since Vagrant uses SSH to modify the instance.
|
||||
# This build step creates a cloud-config ISO so that Cloud-Init will configure
|
||||
# the initial user, creates meta-data that tells Vagrant how to interact with
|
||||
# the cloud-init created users, and finally create the OVA.
|
||||
# We can't use the OVA's for Vagrant since Vagrant uses SSH to modify the
|
||||
# instance. This build step creates a cloud-config ISO so that Cloud-Init
|
||||
# will configure the initial user, creates meta-data that tells Vagrant how
|
||||
# to interact with the cloud-init created users, and finally create the OVA.
|
||||
#
|
||||
# For this step, we re-use the VMDK's made in 040-vmdk-image.binary
|
||||
# For this step, we make a deriviative of binary/boot/disk.ext4 and install
|
||||
# some packages in it, convert it to a vmdk, and then assemble the vagrant
|
||||
# box.
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
echo "Skipping minimized $0 build as images won't boot with linux-kvm"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
cur_d=${PWD}
|
||||
my_d=$(dirname $(readlink -f ${0}))
|
||||
|
||||
base_vmdk="livecd.ubuntu-cpc.disk1.vmdk"
|
||||
|
||||
# Switch on $ARCH to determine which ID and description to use in the produced
|
||||
# OVF. We have fancy Ubuntu-specific IDs in the OVF specification, we might as
|
||||
# well use them.
|
||||
case $ARCH in
|
||||
amd64|i386) ;;
|
||||
*)
|
||||
echo "Vagrant images are not supported for $ARCH"
|
||||
exit 0
|
||||
amd64)
|
||||
ovf_id=94
|
||||
ovf_os_type="ubuntu64Guest"
|
||||
ovf_desc_bits=64 ;;
|
||||
i386)
|
||||
ovf_id=93
|
||||
ovf_os_type="ubuntu32Guest"
|
||||
ovf_desc_bits=32 ;;
|
||||
*)
|
||||
echo "Vagrant images are not supported for $ARCH yet."
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
if [ ! -e ${base_vmdk} ]; then
|
||||
echo "Did not find VMDK to produce Vagrant images."
|
||||
exit 0
|
||||
fi
|
||||
IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
|
||||
. /build/config/functions
|
||||
|
||||
# Virtualbox is needed for making a small VMDK
|
||||
apt-get -qqy install genisoimage qemu-utils
|
||||
. config/functions
|
||||
|
||||
# Lets be safe about this
|
||||
box_d=$(mktemp -d)
|
||||
seed_d=$(mktemp -d)
|
||||
trap "rm -rf ${box_d} ${seed_d}" EXIT
|
||||
mount_d=$(mktemp -d)
|
||||
|
||||
create_derivative "disk" "vagrant" #sets ${derivative_img}
|
||||
mount_disk_image ${derivative_img} ${mount_d}
|
||||
|
||||
cleanup_vagrant() {
|
||||
if [ -d "$mount_d" ]; then
|
||||
umount_disk_image "$mount_d"
|
||||
fi
|
||||
rm -rf ${box_d} ${seed_d} ${mount_d} ${derivative_img}
|
||||
}
|
||||
trap cleanup_vagrant EXIT
|
||||
|
||||
chroot ${mount_d} apt-get update
|
||||
# virtualbox-guest-utils Recommends: virtualbox-guest-x11, which we want to
|
||||
# avoid pulling into a cloud image.
|
||||
chroot ${mount_d} apt-get install --no-install-recommends -y virtualbox-guest-utils
|
||||
chroot ${mount_d} apt-get clean
|
||||
|
||||
# Create and setup users inside the image.
|
||||
# Vagrant users expect a "vagrant" user with a "vagrant" username.
|
||||
# See https://www.vagrantup.com/docs/boxes/base.html
|
||||
# Note: We decided NOT to allow root login with a default password.
|
||||
# --disabled-password and --gecos need to be passed to avoid adduser
|
||||
# asking questions when running in an interactive shell
|
||||
chroot ${mount_d} adduser vagrant --disabled-password --gecos ""
|
||||
echo "vagrant:vagrant" | chroot ${mount_d} chpasswd
|
||||
|
||||
# The vagrant user should have passwordless sudo.
|
||||
cat << EOF > ${mount_d}/etc/sudoers.d/vagrant
|
||||
vagrant ALL=(ALL) NOPASSWD:ALL
|
||||
EOF
|
||||
|
||||
# Add the insecure vagrant pubkey to the vagrant user, as is expected by the
|
||||
# vagrant ecosystem (https://www.vagrantup.com/docs/boxes/base.html)
|
||||
chroot ${mount_d} chmod 0440 /etc/sudoers.d/vagrant
|
||||
chroot ${mount_d} mkdir -p /home/vagrant/.ssh
|
||||
cat << EOF > ${mount_d}/home/vagrant/.ssh/authorized_keys
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
|
||||
EOF
|
||||
chroot ${mount_d} chown -R vagrant:vagrant /home/vagrant/.ssh
|
||||
chroot ${mount_d} chmod 700 /home/vagrant/.ssh
|
||||
|
||||
umount_disk_image "$mount_d"
|
||||
rmdir "$mount_d"
|
||||
|
||||
# Used to identify bits
|
||||
suite=$(chroot chroot lsb_release -c -s)
|
||||
@ -47,13 +105,7 @@ distro=$(chroot chroot lsb_release --id --short | tr [:upper:] [:lower:])
|
||||
# Get the VMDK in place
|
||||
prefix="${distro}-${suite}-${version}-cloudimg"
|
||||
vmdk_f="${box_d}/${prefix}.vmdk"
|
||||
cp ${base_vmdk} ${vmdk_f}
|
||||
|
||||
# Vagrant needs a base user. We either inject the well-known SSH key
|
||||
# or use password authentication. Both are ugly. So we'll use a password
|
||||
# and make it random. This obviously is insecure...but at least its
|
||||
# better than the alternatives.
|
||||
ubuntu_user_pass=$(openssl rand -hex 12)
|
||||
create_vmdk ${derivative_img} ${vmdk_f}
|
||||
|
||||
####################################
|
||||
# Create the ConfigDrive
|
||||
@ -67,9 +119,7 @@ cdrom_vmdk_f="${box_d}/${prefix}-configdrive.vmdk"
|
||||
# except via local host.
|
||||
cat > ${seed_d}/user-data <<END
|
||||
#cloud-config
|
||||
password: ${ubuntu_user_pass}
|
||||
chpasswd: { expire: False }
|
||||
ssh_pwauth: True
|
||||
manage_etc_hosts: localhost
|
||||
END
|
||||
|
||||
# Create the fake meta-data
|
||||
@ -109,17 +159,11 @@ load include_vagrantfile if File.exist?(include_vagrantfile)
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.base_mac = "${macaddr}"
|
||||
config.ssh.username = "ubuntu"
|
||||
config.ssh.password = "${ubuntu_user_pass}"
|
||||
config.vm.synced_folder '.', '/vagrant', disabled: true
|
||||
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
vb.name = "${prefix}"
|
||||
vb.customize [ "modifyvm", :id, "--uart1", "0x3F8", "4" ]
|
||||
vb.customize [ "modifyvm", :id, "--uartmode1", "file", File.join(Dir.pwd, "%s-console.log" % vb.name) ]
|
||||
vb.customize [ "modifyvm", :id, "--uartmode1", "file", File.join(Dir.pwd, "${prefix}-console.log") ]
|
||||
end
|
||||
|
||||
|
||||
end
|
||||
EOF
|
||||
|
||||
@ -151,16 +195,19 @@ cp ${my_d}/ovf/ubuntu-ova-v1-cloudcfg-vmdk.tmpl ${ovf}
|
||||
serial_stamp=$(date +%Y%m%d)
|
||||
sed -i "${ovf}" \
|
||||
-e "s/@@NAME@@/${prefix}-${serial_stamp}/g" \
|
||||
-e "s/@@FILENAME1@@/${vmdk_f##*/}/g" \
|
||||
-e "s/@@FILENAME1@@/${vmdk_f##*/}/g" \
|
||||
-e "s/@@VMDK_FILE_SIZE@@/${vmdk_size}/g" \
|
||||
-e "s/@@VMDK_CAPACITY@@/${vmdk_capacity}/g" \
|
||||
-e "s/@@FILENAME2@@/${cdrom_vmdk_f##*/}/g" \
|
||||
-e "s/@@FILENAME2@@/${cdrom_vmdk_f##*/}/g" \
|
||||
-e "s/@@VMDK_FILE_SIZE2@@/${cdrom_size}/g" \
|
||||
-e "s/@@VMDK_CAPACITY2@@/${cdrom_capacity}/g" \
|
||||
-e "s/@@NUM_CPUS@@/2/g" \
|
||||
-e "s/@@VERSION@@/${version}/g" \
|
||||
-e "s/@@DATE@@/${serial_stamp}/g" \
|
||||
-e "s/@@MEM_SIZE@@/1024/g"
|
||||
-e "s/@@MEM_SIZE@@/1024/g" \
|
||||
-e "s/@@OVF_ID@@/${ovf_id}/g" \
|
||||
-e "s/@@OVF_OS_TYPE@@/${ovf_os_type}/g" \
|
||||
-e "s/@@OVF_DESC_BITS@@/${ovf_desc_bits}/g"
|
||||
|
||||
ovf_sha256=$(sha256sum ${ovf} | cut -d' ' -f1)
|
||||
|
||||
@ -169,7 +216,7 @@ manifest="${box_d}/${prefix}.mf"
|
||||
cat > "${manifest}" <<EOF
|
||||
SHA256(${vmdk_f##*/})= ${vmdk_sha256}
|
||||
SHA256(${cdrom_vmdk_f##*/})= ${cdrom_sha256}
|
||||
SHA256(${ovf##*/}.ovf)= ${ovf_sha256}
|
||||
SHA256(${ovf##*/})= ${ovf_sha256}
|
||||
EOF
|
||||
|
||||
# Now create the box
|
||||
@ -181,16 +228,16 @@ OVA information:
|
||||
VMDK Name: ${vmdk_f##*/}
|
||||
VMDK Capacity: ${vmdk_capacity}
|
||||
VMDK SHA256: ${vmdk_sha256}
|
||||
CDROM Name: ${cdrom_vmdk_f##*/}
|
||||
CDROM Name: ${cdrom_vmdk_f##*/}
|
||||
CDROM Capacity: ${cdrom_capacity}
|
||||
CDROM SHA256: ${cdrom_sha256}
|
||||
CDROM SHA256: ${cdrom_sha256}
|
||||
EOM
|
||||
|
||||
tar -C ${box_d} \
|
||||
-cf ${cur_d}/livecd.ubuntu-cpc.vagrant.box \
|
||||
box.ovf \
|
||||
Vagrantfile \
|
||||
metadata.json \
|
||||
Vagrantfile \
|
||||
metadata.json \
|
||||
${prefix}.mf \
|
||||
${vmdk_f##*/} \
|
||||
${cdrom_vmdk_f##*/}
|
||||
${cdrom_vmdk_f##*/}
|
@ -3,6 +3,15 @@
|
||||
#
|
||||
# Generate VMDK files
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
echo "Skipping minimized $0 build as images won't boot with linux-kvm"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
extension="disk1.vmdk"
|
||||
|
||||
case $ARCH in
|
||||
@ -11,7 +20,7 @@ case $ARCH in
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
. /build/config/functions
|
||||
. config/functions
|
||||
|
||||
if [ -e binary/boot/disk.ext4 ]; then
|
||||
create_vmdk binary/boot/disk.ext4 livecd.ubuntu-cpc.disk1.vmdk
|
@ -7,13 +7,34 @@
|
||||
# and checksums. This step produces an OVA that is suitable for use with
|
||||
# Cloud's that support the OVF specification.
|
||||
#
|
||||
# For this step, we re-use the VMDK's made in 040-vmdk-image.binary
|
||||
# For this step, we re-use the VMDK's made in vmdk-image.binary
|
||||
|
||||
case $ARCH in
|
||||
amd64|i386) ;;
|
||||
*) echo "OVA images are not supported for $ARCH yet.";
|
||||
exit 0;;
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
echo "Skipping minimized $0 build as images won't boot with linux-kvm"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
# Switch on $ARCH to determine which ID and description to use in the produced
|
||||
# OVF. We have fancy Ubuntu-specific IDs in the OVF specification, we might as
|
||||
# well use them.
|
||||
case $ARCH in
|
||||
amd64)
|
||||
ovf_id=94
|
||||
ovf_os_type="ubuntu64Guest"
|
||||
ovf_desc_bits=64 ;;
|
||||
i386)
|
||||
ovf_id=93
|
||||
ovf_os_type="ubuntu32Guest"
|
||||
ovf_desc_bits=32 ;;
|
||||
*)
|
||||
echo "OVA images are not supported for $ARCH yet.";
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
cur_d=${PWD}
|
||||
my_d=$(dirname $(readlink -f ${0}))
|
||||
|
||||
@ -24,7 +45,7 @@ fi
|
||||
|
||||
if [ ! -e ${base_vmdk} ]; then
|
||||
find . | grep vmdk
|
||||
exit 0
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Lets be safe about this
|
||||
@ -57,7 +78,10 @@ sed -i "${ovf}" \
|
||||
-e "s/@@NUM_CPUS@@/2/g" \
|
||||
-e "s/@@VERSION@@/${version}/g" \
|
||||
-e "s/@@DATE@@/${serial_stamp}/g" \
|
||||
-e "s/@@MEM_SIZE@@/1024/g"
|
||||
-e "s/@@MEM_SIZE@@/1024/g" \
|
||||
-e "s/@@OVF_ID@@/${ovf_id}/g" \
|
||||
-e "s/@@OVF_OS_TYPE@@/${ovf_os_type}/g" \
|
||||
-e "s/@@OVF_DESC_BITS@@/${ovf_desc_bits}/g"
|
||||
|
||||
# Get the checksums
|
||||
vmdk_sha256=$(sha256sum ${vmdk_f} | cut -d' ' -f1)
|
||||
@ -67,7 +91,7 @@ ovf_sha256=$(sha256sum ${ovf} | cut -d' ' -f1)
|
||||
manifest="${scratch_d}/${prefix}.mf"
|
||||
cat > "${manifest}" <<EOF
|
||||
SHA256(${vmdk_f##*/})= ${vmdk_sha256}
|
||||
SHA256(${ovf##*/}.ovf)= ${ovf_sha256}
|
||||
SHA256(${ovf##*/})= ${ovf_sha256}
|
||||
EOF
|
||||
|
||||
# Now create the OVA
|
56
live-build/ubuntu-cpc/hooks.d/base/wsl-gz.binary
Executable file
56
live-build/ubuntu-cpc/hooks.d/base/wsl-gz.binary
Executable file
@ -0,0 +1,56 @@
|
||||
#!/bin/bash -eux
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Generate the compressed root directory for WSL
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
echo "Skipping minimized $0 build as WSL systems are designed to be interactive"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case $ARCH in
|
||||
amd64|arm64)
|
||||
;;
|
||||
*)
|
||||
echo "WSL root tarballs are not generated for $ARCH."
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
if [ -n "${SUBARCH:-}" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
. config/functions
|
||||
|
||||
rootfs_dir=wslroot.dir
|
||||
|
||||
# This is the directory created by create-root-dir.binary
|
||||
cp -a binary/boot/filesystem.dir/ $rootfs_dir
|
||||
|
||||
setup_mountpoint $rootfs_dir
|
||||
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $rootfs_dir apt-get -y -qq install ubuntu-wsl
|
||||
|
||||
create_manifest $rootfs_dir livecd.ubuntu-cpc.wsl.rootfs.manifest
|
||||
teardown_mountpoint $rootfs_dir
|
||||
|
||||
# remove attributes not supported by WSL's tar
|
||||
if [ -d $rootfs_dir/var/log/journal ]; then
|
||||
setfattr -x system.posix_acl_access $rootfs_dir/var/log/journal
|
||||
setfattr -x system.posix_acl_default $rootfs_dir/var/log/journal
|
||||
fi
|
||||
|
||||
# The reason for not using just tar .. -C $rootfs_dir . is that using '.' was
|
||||
# found not working once and checking if using the simpler command is safe
|
||||
# needs verification of the app installation on all Windows 10 builds
|
||||
# we support with WSL.
|
||||
cd $rootfs_dir
|
||||
tar --xattrs --sort=name -czf ../livecd.ubuntu-cpc.wsl.rootfs.tar.gz *
|
||||
cd ..
|
||||
|
||||
rm -rf $rootfs_dir
|
9
live-build/ubuntu-cpc/hooks.d/chroot/061-open-iscsi.chroot
Executable file
9
live-build/ubuntu-cpc/hooks.d/chroot/061-open-iscsi.chroot
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Set InitiatorName to be runtime generated when iscsid first starts, so
|
||||
# that each cloud image gets a unique value
|
||||
#
|
||||
|
||||
if [ -f /etc/iscsi/initiatorname.iscsi ]; then
|
||||
echo "GenerateName=yes" > /etc/iscsi/initiatorname.iscsi
|
||||
fi
|
@ -3,6 +3,8 @@ rootd="${1:-/}"
|
||||
root_fs_label=cloudimg-rootfs
|
||||
set -ex
|
||||
|
||||
. /root/config/chroot
|
||||
|
||||
CLOUD_IMG_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
|
||||
LANG=C
|
||||
@ -45,7 +47,7 @@ respawn
|
||||
script
|
||||
exec /sbin/getty -L CONDEV 115200 vt102
|
||||
end script
|
||||
${CLOUD_IMG_STR}
|
||||
${IMAGE_STR}
|
||||
EOF
|
||||
sed -i "s/CONDEV/${condev}/g" "$idir/${condev}.conf"
|
||||
}
|
||||
@ -87,7 +89,9 @@ _xchroot "${rootd}" sh -c 'sed -i "/^127.0.1.1/d" /etc/hosts'
|
||||
_xchroot "${rootd}" sh -c 'rm -f /etc/ssh/ssh_host_[rd]sa_key*'
|
||||
|
||||
## --------------
|
||||
_xchroot "${rootd}" locale-gen en_US.utf8
|
||||
if [ "${SUBPROJECT:-}" != minimized ]; then
|
||||
_xchroot "${rootd}" locale-gen en_US.utf8
|
||||
fi
|
||||
|
||||
## --------------
|
||||
# set cloud-init to be on
|
||||
@ -122,24 +126,6 @@ cat > /etc/fstab << EOM
|
||||
LABEL=cloudimg-rootfs / ext4 defaults 0 0
|
||||
EOM
|
||||
|
||||
## Make sure that the update-motd.d directory exists
|
||||
[ ! -e "${rootd}/etc/update-motd.d" ] &&
|
||||
mkdir -p "${rootd}/etc/update-motd.d"
|
||||
|
||||
## write a MOTD file advertising support for images
|
||||
cat > "${rootd}/etc/update-motd.d/51-cloudguest" << EOF
|
||||
#!/bin/sh
|
||||
#
|
||||
${CLOUD_IMG_STR}
|
||||
# This file is not managed by a package. If you no longer want to
|
||||
# see this message you can safely remove the file.
|
||||
echo ""
|
||||
echo " Get cloud support with Ubuntu Advantage Cloud Guest:"
|
||||
echo " http://www.ubuntu.com/business/services/cloud"
|
||||
EOF
|
||||
|
||||
chmod +x "${rootd}/etc/update-motd.d/51-cloudguest"
|
||||
|
||||
# for quantal and newer, add /etc/overlayroot.local.conf
|
||||
# but do not overwrite anything that somehow got there
|
||||
if [ -f "${rootd}/etc/overlayroot.conf" ] &&
|
||||
@ -150,6 +136,35 @@ if [ -f "${rootd}/etc/overlayroot.conf" ] &&
|
||||
} > "${rootd}/etc/overlayroot.local.conf"
|
||||
fi
|
||||
|
||||
# previous steps may have left a dangling symlink here with
|
||||
# SUBPROJECT=minimized and that breaks lb_chroot_hacks step
|
||||
if [ -L "${rootd}/boot/initrd.img" ] && [ ! -e "${rootd}/boot/initrd.img" ]; then
|
||||
rm "${rootd}/boot/initrd.img"
|
||||
fi
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
# Remove various packages that we don't want in the minimized images.
|
||||
# Some of these are tools that don't make sense by default
|
||||
# non-interactively; some are libraries whose reverse-dependencies
|
||||
# will have already been removed; open-vm-tools, it's a bug that this
|
||||
# is in the common cloud seed because this should only be included
|
||||
# in VMWare guest images, and we know none of the minimized images
|
||||
# are targeted at VMWare.
|
||||
_xchroot "${rootd}" env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-mark auto '^lib.*' '^python*' vim-runtime 2>/dev/null
|
||||
# FIXME: iso-codes is a dep of software-properties and shouldn't be
|
||||
_xchroot "${rootd}" env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get -y autoremove --purge iso-codes xauth pastebinit \
|
||||
plymouth open-vm-tools git shared-mime-info vim vim-common \
|
||||
console-setup ncurses-term tmux screen policykit-1 \
|
||||
xdg-user-dirs less run-one apport-symptoms \
|
||||
ubuntu-cloudimage-keyring file
|
||||
# Add back pollinate, which gets removed due to a dependency on vim-common
|
||||
_xchroot "${rootd}" env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get -y install --no-install-recommends pollinate
|
||||
|
||||
_xchroot "${rootd}" apt clean
|
||||
fi
|
||||
|
||||
#### END COMMON ARCH FUNCTIONS
|
||||
|
280
live-build/ubuntu-cpc/hooks.d/make-hooks
Executable file
280
live-build/ubuntu-cpc/hooks.d/make-hooks
Executable file
@ -0,0 +1,280 @@
|
||||
#!/usr/bin/env python3
|
||||
#-*- encoding: utf-8 -*-
|
||||
"""
|
||||
This script parses a series file and its dependencies and generates a hooks
|
||||
folder containing symbolic links to the scripts that need to be invoked for
|
||||
a given image target set.
|
||||
|
||||
For example, if you wish to build the image target sets "vmdk" and "vagrant",
|
||||
you would call this script as
|
||||
|
||||
./make-hooks --hooks-dir hooks vmdk vagrant
|
||||
|
||||
Scripts live in subfolders below the "hooks.d" folder. Currently the folders
|
||||
"chroot" and "base" exist. The folder with the name "extra" is reserved for
|
||||
private scripts, which are not included in the source of livecd-rootfs. The
|
||||
scripts are not numbered, instead the order of their execution depends on the
|
||||
order in which they are listed in a series file.
|
||||
|
||||
Series files are placed into the subfolders "base/series" or "extra/series".
|
||||
Each series file contains a list of scripts to be executed. Empty lines and
|
||||
lines starting with a '#' are ignored. Series files in "extra/series" override
|
||||
files in "base/series" with the same name. For example, if a series file
|
||||
"base/series/cloudA" exists and a series file "extra/series/cloudA", then the
|
||||
latter will be preferred.
|
||||
|
||||
A series file in "extra/series" may also list scripts that are located in the
|
||||
"chroot" and "base" folders. In addition, series files can depend on other
|
||||
series files. For example, the series files for most custom images look similar
|
||||
to this:
|
||||
|
||||
depends disk-image
|
||||
depends extra-settings
|
||||
extra/cloudB.binary
|
||||
provides livecd.ubuntu-cpc.disk-kvm.img
|
||||
provides livecd.ubuntu-cpc.disk-kvm.manifest
|
||||
|
||||
Where "disk-image" and "extra-settings" may list scripts and dependencies which
|
||||
are to be processed before the script "extra/cloudB.binary" is called.
|
||||
|
||||
The "provides" directive defines a file that the hook creates; it can be
|
||||
specified multiple times. The field is used by this script to generate a list
|
||||
of output files created explicitly by the named image targets. The list is
|
||||
saved to the "explicit_provides" file in the hooks output directory. In
|
||||
the case of the "all" target this list would be empty. This list is
|
||||
consumed by the "remove-implicit-artifacts" which is run at the end of the build.
|
||||
|
||||
ACHTUNG: live build runs scripts with the suffix ".chroot" in a batch separate
|
||||
from scripts ending in ".binary". Even if you arrange them interleaved in your
|
||||
series files, the chroot scripts will be run before the binary scripts.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
SCRIPT_DIR = os.path.normpath(os.path.dirname(os.path.realpath(sys.argv[0])))
|
||||
HOOKS_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "hooks"))
|
||||
|
||||
EXIT_OK = 0
|
||||
EXIT_ERR = 1
|
||||
|
||||
class MakeHooksError(Exception):
|
||||
pass
|
||||
|
||||
class MakeHooks:
|
||||
"""This class provides series file parsing and symlink generator
|
||||
functionality."""
|
||||
|
||||
def __init__(self, hooks_dir=None, quiet=False):
|
||||
"""The hooks_dir parameter can be used to specify the path to the
|
||||
directory, into which the hook symlinks to the actual script files
|
||||
should be placed.
|
||||
|
||||
If quiet is set to True, info messages during symlink creation will
|
||||
be suppressed. Use this if your build is not private, but you would
|
||||
like to hide which scripts are being run.
|
||||
"""
|
||||
self._script_dir = SCRIPT_DIR
|
||||
self._hooks_dir = hooks_dir or HOOKS_DIR
|
||||
self._quiet = quiet
|
||||
self._hooks_list = []
|
||||
self._included = set()
|
||||
self._provides = []
|
||||
|
||||
def reset(self):
|
||||
"""Reset the internal state allowing instance to be reused for
|
||||
another run."""
|
||||
self._hooks_list.clear()
|
||||
self._included.clear()
|
||||
|
||||
def print_usage(self):
|
||||
print(
|
||||
"CPC live build hook generator script \n"
|
||||
" \n"
|
||||
"Usage: ./make-hooks.sh [OPTIONS] <image_set> \n"
|
||||
" \n"
|
||||
"Options: \n"
|
||||
" \n"
|
||||
" --help, -h Show this message and exit. \n"
|
||||
" --quiet, -q Only show warnings and error messages. \n"
|
||||
" --hooks-dir, -d <dir> The directory where to write the symlinks.\n"
|
||||
)
|
||||
|
||||
def find_series_file(self, image_set):
|
||||
"""Search for the series file requested in the image_set parameter.
|
||||
|
||||
The image_set parameter should be a string containing the name of an
|
||||
image target set represented by a series file. First the "extra/series"
|
||||
folder is searched followed by the "base/series" folder.
|
||||
|
||||
When a file with the given name is found, the search stops and the
|
||||
full path to the file is returned.
|
||||
"""
|
||||
for subdir in ["extra", "base"]:
|
||||
series_file = os.path.join(self._script_dir, subdir, "series",
|
||||
image_set)
|
||||
if os.path.isfile(series_file):
|
||||
return series_file
|
||||
return None
|
||||
|
||||
def make_hooks(self, image_sets):
|
||||
"""Entry point for parsing series files and their dependencies and
|
||||
for generating the symlinks in the hooks folder.
|
||||
|
||||
The image_sets parameter must be an iterable containing the names of
|
||||
the series files representing the corresponding image target sets,
|
||||
e.g. "vmdk" or "vagrant".
|
||||
"""
|
||||
self.collect_chroot_hooks()
|
||||
self.collect_binary_hooks(image_sets, explicit_sets=True)
|
||||
self.create_symlinks()
|
||||
self.create_explicit_provides()
|
||||
|
||||
def collect_chroot_hooks(self):
|
||||
"""Chroot hooks are numbered and not explicitly mentioned in series
|
||||
files. Collect them, sort them and add them to the internal list of
|
||||
paths to hook sripts.
|
||||
"""
|
||||
chroot_hooks_dir = os.path.join(self._script_dir, "chroot")
|
||||
|
||||
chroot_entries = os.listdir(chroot_hooks_dir)
|
||||
chroot_entries.sort()
|
||||
|
||||
for entry in chroot_entries:
|
||||
if not (entry.endswith(".chroot_early") or
|
||||
entry.endswith(".chroot")):
|
||||
continue
|
||||
self._hooks_list.append(os.path.join("chroot", entry))
|
||||
|
||||
def collect_binary_hooks(self, image_sets, explicit_sets=False):
|
||||
"""Search the series files for the given image_sets and parse them
|
||||
and their dependencies to generate a list of hook scripts to be run
|
||||
during image build.
|
||||
|
||||
The image_sets parameter must be an iterable containing the names of
|
||||
the series files representing the corresponding image target sets,
|
||||
e.g. "vmdk" or "vagrant".
|
||||
|
||||
Populates the internal list of paths to hook scripts in the order in
|
||||
which the scripts are to be run.
|
||||
|
||||
If "explicit_sets" is True, the files specified on lines starting
|
||||
with "provides" will be added to self._provides to track explicit
|
||||
output artifacts. This is only True for the initial images_sets
|
||||
list, dependent image sets should set this to False.
|
||||
"""
|
||||
for image_set in image_sets:
|
||||
series_file = self.find_series_file(image_set)
|
||||
|
||||
if not series_file:
|
||||
raise MakeHooksError(
|
||||
"Series file for image set '%s' not found." % image_set)
|
||||
|
||||
with open(series_file, "r", encoding="utf-8") as fp:
|
||||
for line in fp:
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
|
||||
m = re.match(r"^\s*depends\s+(\S+.*)$", line)
|
||||
if m:
|
||||
include_set = m.group(1)
|
||||
if include_set in self._included:
|
||||
continue
|
||||
self._included.add(include_set)
|
||||
self.collect_binary_hooks([include_set,])
|
||||
continue
|
||||
|
||||
m = re.match(r"^\s*provides\s+(\S+.*)$", line)
|
||||
if m:
|
||||
if explicit_sets:
|
||||
self._provides.append(m.group(1))
|
||||
continue
|
||||
|
||||
if not line in self._hooks_list:
|
||||
self._hooks_list.append(line)
|
||||
|
||||
def create_symlinks(self):
|
||||
"""Once the internal list of hooks scripts has been populated by a
|
||||
call to collect_?_hooks, this method is used to populate the hooks
|
||||
folder with enumerated symbolic links to the hooks scripts. If the
|
||||
folder does not exist, it will be created. If it exists, it must be
|
||||
empty or a MakeHooksError will be thrown.
|
||||
"""
|
||||
if os.path.isdir(self._hooks_dir) and os.listdir(self._hooks_dir):
|
||||
# Only print a warning, because directory might have been created
|
||||
# by auto/config voodoo.
|
||||
sys.stderr.write("WARNING: Hooks directory exists and is not empty.\n")
|
||||
os.makedirs(self._hooks_dir, exist_ok=True)
|
||||
|
||||
for counter, hook in enumerate(self._hooks_list, start=1):
|
||||
hook_basename = os.path.basename(hook)
|
||||
|
||||
m = re.match(r"^\d+-(?:\d+-)?(?P<basename>.*)$", hook_basename)
|
||||
if m:
|
||||
hook_basename = m.group("basename")
|
||||
|
||||
linkname = ("%03d-" % counter) + hook_basename
|
||||
linkdest = os.path.join(self._hooks_dir, linkname)
|
||||
linksrc = os.path.relpath(os.path.join(self._script_dir, hook),
|
||||
self._hooks_dir)
|
||||
|
||||
if not self._quiet:
|
||||
print("[HOOK] %s => %s" % (linkname, hook))
|
||||
os.symlink(linksrc, linkdest)
|
||||
|
||||
def create_explicit_provides(self):
|
||||
"""
|
||||
Create a file named "explicit_provides" in self._script_dir
|
||||
listing all files named on "provides" in the series files of
|
||||
targets explicitly named by the user. The file is created but
|
||||
left empty if there are no explict "provides" keywords in the
|
||||
targets (this is the case for 'all')
|
||||
"""
|
||||
with open(os.path.join(self._script_dir, "explicit_provides"), "w",
|
||||
encoding="utf-8") as fp:
|
||||
empty = True
|
||||
for provides in self._provides:
|
||||
if not self._quiet:
|
||||
print("[PROVIDES] %s" % provides)
|
||||
fp.write("%s\n" % provides)
|
||||
empty = False
|
||||
if not empty:
|
||||
fp.write('livecd.magic-proxy.log\n')
|
||||
|
||||
def cli(self, args):
|
||||
"""Command line interface to the hooks generator."""
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("-q", "--quiet", dest="quiet", type=bool,
|
||||
help="Only show warnings and error messages.")
|
||||
parser.add_argument("-d", "--hooks-dir", dest="hooks_dir", type=str,
|
||||
help="The directory where to create the symlinks.")
|
||||
parser.add_argument("image_target", nargs="+", type=str,
|
||||
help="")
|
||||
|
||||
self.reset()
|
||||
options = parser.parse_args(args)
|
||||
|
||||
# Copy options to object attributes.
|
||||
for key, value in vars(options).items():
|
||||
if value and hasattr(self, "_" + key):
|
||||
setattr(self, "_" + key, value)
|
||||
|
||||
# Take remaining command line arguments, sanitize and turn into list.
|
||||
image_sets = re.sub(r";|,", " ", " ".join(options.image_target))\
|
||||
.split()
|
||||
|
||||
self.make_hooks(image_sets)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
MakeHooks().cli(sys.argv[1:])
|
||||
except MakeHooksError as e:
|
||||
sys.stderr.write("%s: %s\n" % (os.path.basename(sys.argv[0]), str(e)))
|
||||
sys.exit(EXIT_ERR)
|
41
live-build/ubuntu-cpc/hooks.d/remove-implicit-artifacts
Executable file
41
live-build/ubuntu-cpc/hooks.d/remove-implicit-artifacts
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python3
|
||||
#-*- encoding: utf-8 -*-
|
||||
"""
|
||||
Remove output files not created by explicitly specified image targets
|
||||
|
||||
This uses the 'explicit_provides' file generated by the 'make-hooks'
|
||||
script. If the file is empty, all output will be saved.
|
||||
"""
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
|
||||
if __name__ == "__main__":
|
||||
print('Running {}'.format(__file__))
|
||||
scriptname = os.path.basename(__file__)
|
||||
explicit = set()
|
||||
with open('./config/hooks.d/explicit_provides', 'r',
|
||||
encoding='utf-8') as fp:
|
||||
for filename in fp:
|
||||
explicit.add(filename.rstrip())
|
||||
|
||||
if not explicit:
|
||||
print('{}: explicit_provides is empty. '
|
||||
'All binary output will be included'.format(scriptname))
|
||||
sys.exit(0)
|
||||
|
||||
all = set(glob.glob('livecd.ubuntu-cpc.*'))
|
||||
implicit = all - explicit
|
||||
|
||||
print('{}: all artifacts considered: {}'.format(scriptname, all))
|
||||
print('{}: explict artifacts to keep: {}'.format(scriptname, explicit))
|
||||
print('{}: implicit artifacts to remove: {}'.format(scriptname, implicit))
|
||||
|
||||
for file in implicit:
|
||||
if os.path.islink(file):
|
||||
print('{}: unlinking {}'.format(scriptname, file))
|
||||
os.unlink(file)
|
||||
elif os.path.isfile(file):
|
||||
print('{}: removing {} '
|
||||
'{} bytes'.format(scriptname, file, os.stat(file).st_size))
|
||||
os.remove(file)
|
@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Execute extra binary hooks.
|
||||
|
||||
my_dir=$(dirname $(readlink -f ${0}))
|
||||
extra_d=${my_dir}/extra
|
||||
|
||||
if [ ! -d ${my_dir}/extra ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Export the common functions to the extras
|
||||
. /build/config/functions
|
||||
|
||||
# Cleaner execution
|
||||
/bin/run-parts --regex ".*\.binary" "${extra_d}"
|
@ -53,6 +53,7 @@ systemd-network:x:112:117:systemd Network Management,,,:/run/systemd/netif:/bin/
|
||||
systemd-resolve:x:113:118:systemd Resolver,,,:/run/systemd/resolve:/bin/false
|
||||
systemd-bus-proxy:x:114:119:systemd Bus Proxy,,,:/run/systemd:/bin/false
|
||||
nm-openvpn:x:115:120:NetworkManager OpenVPN,,,:/var/lib/openvpn/chroot:/bin/false
|
||||
dhcpd:x:116:122::/var/run:/bin/false
|
||||
EOF
|
||||
else
|
||||
echo "/etc/passwd post-debootstrap hash doesn't match record" >&2
|
||||
@ -101,6 +102,7 @@ systemd-network:*:16372:0:99999:7:::
|
||||
systemd-resolve:*:16372:0:99999:7:::
|
||||
systemd-bus-proxy:*:16372:0:99999:7:::
|
||||
nm-openvpn:*:16909:0:99999:7:::
|
||||
dhcpd:*:16925:0:99999:7:::
|
||||
EOF
|
||||
else
|
||||
echo "/etc/shadow post-debootstrap hash doesn't match record" >&2
|
||||
@ -186,6 +188,7 @@ systemd-resolve:x:118:
|
||||
systemd-bus-proxy:x:119:
|
||||
input:x:121:
|
||||
nm-openvpn:x:120:
|
||||
dhcpd:x:122:
|
||||
EOF
|
||||
else
|
||||
echo "/etc/group post-debootstrap hash doesn't match record" >&2
|
||||
@ -271,6 +274,7 @@ systemd-resolve:!::
|
||||
systemd-bus-proxy:!::
|
||||
input:!::
|
||||
nm-openvpn:!::
|
||||
dhcpd:!::
|
||||
EOF
|
||||
else
|
||||
echo "/etc/gshadow post-debootstrap hash doesn't match record" >&2
|
||||
|
@ -4,8 +4,19 @@ set -e
|
||||
|
||||
echo "Setting up click packages"
|
||||
|
||||
CLICKARCH=$(dpkg --print-architecture)
|
||||
|
||||
click_uri=http://archive-team.internal/click_packages
|
||||
click_list=$click_uri/click_list
|
||||
if [ "$CLICKARCH" = "arm64" ]; then
|
||||
# FIXME: this is temporary. Since right now we can't have arm64 clicks in the store
|
||||
# (before implementing fat-packages), we need to fetch the arm64 click list from a
|
||||
# different place
|
||||
click_list=$click_uri/click_list.arm64
|
||||
click_install_flags="--allow-unauthenticated"
|
||||
else
|
||||
click_list=$click_uri/click_list
|
||||
click_install_flags=""
|
||||
fi
|
||||
click_db=/usr/share/click/preinstalled
|
||||
click_db_custom=/custom/click
|
||||
|
||||
@ -20,8 +31,6 @@ tmpdir="$(mktemp -d)"
|
||||
cleanup () { rm -rf "$tmpdir"; }
|
||||
trap cleanup EXIT
|
||||
|
||||
CLICKARCH=$(dpkg --print-architecture)
|
||||
|
||||
wget --no-verbose -O "$tmpdir/click_list" "$click_list"
|
||||
for package in $(cat "$tmpdir/click_list")
|
||||
do
|
||||
@ -61,7 +70,7 @@ do
|
||||
mv /etc/click/databases/10_core.conf \
|
||||
/etc/click/databases/10_core.conf.tmp
|
||||
fi
|
||||
click install --force-missing-framework --root="$root" --all-users \
|
||||
click install --force-missing-framework --root="$root" --all-users $click_install_flags \
|
||||
"$tmpdir/$package"
|
||||
if [ "$root" = "$click_db_custom" ]; then
|
||||
mv /etc/click/databases/10_core.conf.tmp \
|
||||
|
1
lp-in-release
Symbolic link
1
lp-in-release
Symbolic link
@ -0,0 +1 @@
|
||||
magic-proxy
|
971
magic-proxy
Executable file
971
magic-proxy
Executable file
@ -0,0 +1,971 @@
|
||||
#!/usr/bin/python3 -u
|
||||
#-*- encoding: utf-8 -*-
|
||||
"""
|
||||
This script can be called as "lp-in-release" or as "magic-proxy". When called
|
||||
under the former name, it acts as a CLI tool, when called under the latter name
|
||||
it will act as a transparent HTTP proxy.
|
||||
|
||||
The CLI tool parses the directory listing of
|
||||
|
||||
http://<mirror>/dists/suite/by-hash/SHA256
|
||||
|
||||
and figures out which hashes belong to an InRelease file. For example, to list
|
||||
all available hashes for "cosmic" run
|
||||
|
||||
./lp-in-release list --suite cosmic
|
||||
|
||||
Per default the script scans archive.ubuntu.com, but you can tell it to use a
|
||||
different mirror with the --mirror-url command line parameter. Analogously, you
|
||||
can list the hashes for "cosmic-updates" or "cosmic-security". The script can
|
||||
also find the hash that was valid at a given timestamp via
|
||||
|
||||
./lp-in-release select --suite cosmic --cutoff-time <timestamp>
|
||||
|
||||
Finally, you can use the script to inject inrelease-path settings into a
|
||||
sources.list file via
|
||||
|
||||
./lp-in-release inject --cutoff-time <timestamp> /etc/apt/sources.list
|
||||
|
||||
The proxy is just an extension to this functionality. Whenever a URL points at
|
||||
an InRelease file or a path listed in an InRelease file, the proxy will
|
||||
automatically inject the by hash URL for the resource according to the timestamp
|
||||
it was configured for. The proxy works in transparent and non-transparent mode.
|
||||
"""
|
||||
from datetime import datetime, timedelta, tzinfo
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import fcntl
|
||||
import getopt
|
||||
import hashlib
|
||||
import http.client
|
||||
import http.server
|
||||
import json
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
import shutil
|
||||
import socketserver
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
|
||||
EXIT_OK = 0
|
||||
EXIT_ERR = 1
|
||||
|
||||
class LPInReleaseBaseError(Exception):
|
||||
pass
|
||||
|
||||
class LPInReleaseIndexError(LPInReleaseBaseError):
|
||||
pass
|
||||
|
||||
class LPInReleaseCacheError(LPInReleaseBaseError):
|
||||
pass
|
||||
|
||||
class LPInReleaseProxyError(LPInReleaseBaseError):
|
||||
pass
|
||||
|
||||
class InRelease:
|
||||
"""This class represents an InRelease file."""
|
||||
|
||||
def __init__(self, mirror, suite, data, hash_=None, last_modified=None):
|
||||
"""mirror must contain the proper URL of the package repository up to
|
||||
the "dists" folder, e.g.
|
||||
|
||||
http://archive.ubuntu.com/ubuntu
|
||||
|
||||
suite is the name of the suite this InRelease file belongs to, e.g.
|
||||
<release>, <release>-updates or <release>-security.
|
||||
|
||||
data must contain the full contents of the InReleaes file as a unicode
|
||||
string.
|
||||
|
||||
If supplied, then hash_ will be used as the sha256 hexdigest of the
|
||||
binary encoding of the InRelease file. If not supplied, the hash will
|
||||
be calculated. This is just used as a time-saver, when cache contents
|
||||
are read back in.
|
||||
|
||||
last_modified must be a string of format
|
||||
|
||||
Thu, 26 Apr 2018 23:37:48 UTC
|
||||
|
||||
representing the publication time of the InRelease file. If not given,
|
||||
the generation time stored in the InRelease file will be used. Below,
|
||||
this is set explicitly to correspond to the Last-Modified header spat
|
||||
out by the Web server.
|
||||
"""
|
||||
self.mirror = mirror
|
||||
self.suite = suite
|
||||
self.data = data
|
||||
self.dict = {}
|
||||
|
||||
if hash_:
|
||||
self.hash = hash_
|
||||
else:
|
||||
h = hashlib.sha256()
|
||||
h.update(data.encode("utf-8"))
|
||||
self.hash = h.hexdigest()
|
||||
|
||||
if last_modified:
|
||||
self.published = self._parse_datetime(last_modified)
|
||||
else:
|
||||
self.published = self._extract_timestamp(data)
|
||||
|
||||
@property
|
||||
def datetime(self):
|
||||
"""Return the publication time of this InRelease file as a string in
|
||||
YYYY-MM-DD HH:MM:SS ISO format. The result is always in GMT."""
|
||||
return datetime \
|
||||
.utcfromtimestamp(self.published) \
|
||||
.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
@property
|
||||
def normalized_address(self):
|
||||
"""Return the "normalized" address of the mirror URL, consisting of
|
||||
only the hostname and the path. This may be used as an index into an
|
||||
InReleaseCache."""
|
||||
result = urllib.parse.urlparse(self.mirror)
|
||||
address = result.hostname + result.path.rstrip("/")
|
||||
return address
|
||||
|
||||
@property
|
||||
def contents(self):
|
||||
"""Return the pure contents of the InRelease file with the signature
|
||||
stripped off."""
|
||||
return self._split_release_and_sig(self.data)[0]
|
||||
|
||||
@property
|
||||
def signature(self):
|
||||
"""Return the ASCII-armored PGP signature of the InRelease file."""
|
||||
return self._split_release_and_sig(self.data)[1]
|
||||
|
||||
def serialize(self):
|
||||
"""Serializes the InRelease object into Python structures to be stored
|
||||
in an InReleaseCache."""
|
||||
month_names = [ "_ignore_",
|
||||
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
||||
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
|
||||
]
|
||||
|
||||
wkday_names = [
|
||||
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun",
|
||||
]
|
||||
|
||||
dt = datetime.utcfromtimestamp(self.published)
|
||||
|
||||
published = "{}, {:02} {} {} {:02}:{:02}:{:02} GMT".format(
|
||||
wkday_names[dt.weekday()],
|
||||
dt.day,
|
||||
month_names[dt.month],
|
||||
dt.year,
|
||||
dt.hour,
|
||||
dt.minute,
|
||||
dt.second
|
||||
)
|
||||
|
||||
return {
|
||||
"mirror": self.mirror,
|
||||
"suite": self.suite,
|
||||
"hash": self.hash,
|
||||
"published": published,
|
||||
"data": self.data,
|
||||
}
|
||||
|
||||
def get_hash_for(self, path):
|
||||
"""Check if the given path is listed in this InRelease file and if so
|
||||
return the corresponding hash in hexdigest format. If the path is not
|
||||
listed, None is returned."""
|
||||
if not self.dict:
|
||||
self._parse_contents()
|
||||
return self.dict.get(path)
|
||||
|
||||
def _parse_contents(self):
|
||||
"""This method parses out all lines containing SHA256 hashes and creates
|
||||
an internal dict, mapping resources to hashes."""
|
||||
regex = re.compile(
|
||||
r" (?P<hash>[0-9a-f]{64})\s+(?P<size>\d+)\s+(?P<path>\S+)")
|
||||
|
||||
for line in self.contents.splitlines():
|
||||
m = regex.match(line)
|
||||
if not m:
|
||||
continue
|
||||
self.dict[m.group("path")] = m.group("hash")
|
||||
|
||||
def _parse_datetime(self, datetime_string):
|
||||
"""Because the behavior of Python's strptime's would be
|
||||
locale-dependent, we parse datetime strings of the format found in
|
||||
Last-Modified HTTP headers ourselves. This returns an integer
|
||||
representing a posix timestamp or None, if the parsing failed."""
|
||||
class UTC(tzinfo):
|
||||
def utcoffset(self, dt):
|
||||
return timedelta(0)
|
||||
|
||||
# we need a map, because strptime would be locale-dependent
|
||||
month_name_to_number = {
|
||||
"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6,
|
||||
"Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12
|
||||
}
|
||||
|
||||
rexpr = r"""^\s*\w+,\s+
|
||||
(?P<day>\d+) \s+
|
||||
(?P<month>\w+) \s+
|
||||
(?P<year>\d+) \s+
|
||||
(?P<hour>\d+) :
|
||||
(?P<min>\d+) :
|
||||
(?P<sec>\d+) .*$"""
|
||||
|
||||
m = re.match(rexpr, datetime_string, flags=re.VERBOSE)
|
||||
if not m:
|
||||
return None
|
||||
|
||||
parts = list(m.group("year", "month", "day", "hour", "min", "sec"))
|
||||
parts[1] = month_name_to_number[m.group("month")]
|
||||
parts = [int(s) for s in parts]
|
||||
dt = datetime(*parts, tzinfo=UTC())
|
||||
epoch = datetime(1970, 1, 1, tzinfo=UTC())
|
||||
posix = (dt - epoch).total_seconds()
|
||||
|
||||
return int(posix)
|
||||
|
||||
def _extract_timestamp(self, data):
|
||||
"""Parse the contents of the InRelease file to find the time it was
|
||||
generated. Returns a POSIX timestamp if found or None otherwise."""
|
||||
for line in data.splitlines():
|
||||
if line.startswith("Date:"):
|
||||
return self._parse_datetime(line.split(":", 1)[1])
|
||||
|
||||
return None
|
||||
|
||||
def _split_release_and_sig(self, data):
|
||||
"""Split the InRelease file into content and signature parts and return
|
||||
a tuple of unicode strings (content, signature)."""
|
||||
rexpr = re.escape("-----BEGIN PGP SIGNED MESSAGE-----") + r"\r?\n|" + \
|
||||
re.escape("-----BEGIN PGP SIGNATURE-----" ) + r"\r?\n|" + \
|
||||
re.escape("-----END PGP SIGNATURE-----" )
|
||||
|
||||
# returns content and signature
|
||||
return re.split(rexpr, data)[1:3]
|
||||
|
||||
|
||||
class LPInReleaseCache:
|
||||
"""A cache for InRelease files that can optionally be saved to and
|
||||
loaded from disk."""
|
||||
|
||||
def __init__(self, filename=None):
|
||||
"""If filename is given, it is the name of the file that cache contents
|
||||
will be saved to or loaded from when the save and load methods are
|
||||
called, respectively."""
|
||||
self._filename = filename
|
||||
self._data = {}
|
||||
self._lock = threading.Lock()
|
||||
|
||||
self.load()
|
||||
|
||||
def load(self):
|
||||
"""Load the cache contents from disk performing some rudimentary file
|
||||
locking to prevent corruption."""
|
||||
if not self._filename:
|
||||
return
|
||||
|
||||
buf = []
|
||||
fd = None
|
||||
try:
|
||||
fd = os.open(self._filename, os.O_CREAT | os.O_RDWR)
|
||||
|
||||
fcntl.flock(fd, fcntl.LOCK_EX)
|
||||
|
||||
while True:
|
||||
tmp = os.read(fd, 4096)
|
||||
if not tmp:
|
||||
break
|
||||
buf.append(tmp)
|
||||
|
||||
fcntl.flock(fd, fcntl.LOCK_UN)
|
||||
except OSError as e:
|
||||
raise LPInReleaseCacheError("Failed to load cache file: {}"
|
||||
.format(str(e)))
|
||||
finally:
|
||||
if fd:
|
||||
os.close(fd)
|
||||
|
||||
cache_data = {} if not buf else json.loads(
|
||||
b"".join(buf).decode("utf-8"))
|
||||
|
||||
with self._lock:
|
||||
self._data = cache_data
|
||||
|
||||
def save(self):
|
||||
"""Save the cache contents to disk performing some rudimentary file
|
||||
locking to prevent corruption."""
|
||||
if not self._filename:
|
||||
return
|
||||
|
||||
with self._lock:
|
||||
buf = json \
|
||||
.dumps(self._data, ensure_ascii=False, indent=4,
|
||||
sort_keys=True) \
|
||||
.encode("utf-8")
|
||||
|
||||
fd = None
|
||||
try:
|
||||
fd = os.open(self._filename, os.O_CREAT | os.O_RDWR)
|
||||
|
||||
fcntl.flock(fd, fcntl.LOCK_EX)
|
||||
|
||||
os.ftruncate(fd, 0)
|
||||
os.write(fd, buf)
|
||||
|
||||
fcntl.flock(fd, fcntl.LOCK_UN)
|
||||
except OSError as e:
|
||||
raise LPInReleaseCacheError("Failed to store cache file: {}"
|
||||
.format(str(e)))
|
||||
finally:
|
||||
if fd:
|
||||
os.close(fd)
|
||||
|
||||
def add(self, inrelease):
|
||||
"""Add the given InRelease object to the cache."""
|
||||
with self._lock:
|
||||
self._data \
|
||||
.setdefault(inrelease.normalized_address, {}) \
|
||||
.setdefault(inrelease.suite, {}) \
|
||||
.setdefault(inrelease.hash, inrelease.serialize())
|
||||
|
||||
def get_one(self, mirror, suite, hash_):
|
||||
"""Return a single InRelease object for the given mirror and suite,
|
||||
corresponding to the hash or None if such an entry does not exist."""
|
||||
with self._lock:
|
||||
url_obj = urllib.parse.urlparse(mirror)
|
||||
address = url_obj.hostname + url_obj.path.rstrip("/")
|
||||
|
||||
inrel = self._data\
|
||||
.get(address, {})\
|
||||
.get(suite, {})\
|
||||
.get(hash_)
|
||||
|
||||
if not inrel:
|
||||
return None
|
||||
|
||||
return InRelease(
|
||||
inrel["mirror"],
|
||||
inrel["suite"],
|
||||
inrel["data"],
|
||||
hash_=inrel["hash"],
|
||||
last_modified=inrel["published"]
|
||||
)
|
||||
|
||||
def get_all(self, mirror, suite):
|
||||
"""Retrieve a list of InRelease objects for the given mirror and suite.
|
||||
Return a list of all known InRelease objects for the given mirror and
|
||||
suite."""
|
||||
with self._lock:
|
||||
url_obj = urllib.parse.urlparse(mirror)
|
||||
address = url_obj.hostname + url_obj.path.rstrip("/")
|
||||
|
||||
inrel_by_hash = self._data\
|
||||
.get(address, {})\
|
||||
.get(suite, {})
|
||||
|
||||
inrelease_list = []
|
||||
|
||||
for hash_, inrel in inrel_by_hash.items():
|
||||
inrelease_list.append(
|
||||
InRelease(
|
||||
inrel["mirror"],
|
||||
inrel["suite"],
|
||||
inrel["data"],
|
||||
hash_=inrel["hash"],
|
||||
last_modified=inrel["published"]
|
||||
)
|
||||
)
|
||||
|
||||
return inrelease_list
|
||||
|
||||
|
||||
class LPInReleaseIndex:
|
||||
"""Abstraction to the build system's view of the "by hash" database.
|
||||
Currently, that interface is the by-hash directory listing of the Web
|
||||
server."""
|
||||
|
||||
def __init__(self, mirror, suite, cache=None):
|
||||
"""The mirror is the base URL of the repository up to the "dists"
|
||||
folder, e.g.
|
||||
|
||||
http://archive.ubuntu.com/ubuntu
|
||||
|
||||
suite is the name of the suite this InReleaseIndex object operates on,
|
||||
e.g. <release>, <release>-updates or <release>-security.
|
||||
|
||||
Optionally, cache can be initialized to a LPInReleaseCache object, in
|
||||
which case all look-ups will first go to the cache and only cache
|
||||
misses will result in requests to the Web server.
|
||||
"""
|
||||
self._mirror = mirror
|
||||
self._suite = suite
|
||||
self._cache = cache
|
||||
|
||||
self._base_url = "/".join([self._mirror, "dists", self._suite,
|
||||
"by-hash/SHA256"])
|
||||
|
||||
def inrelease_files(self):
|
||||
"""Iterate over all InRelease files found in the archive for the mirror
|
||||
and suite this index has been configured to operate on."""
|
||||
hashes = self._retrieve_hashes()
|
||||
|
||||
for h in hashes:
|
||||
inrelease = None
|
||||
|
||||
if self._cache:
|
||||
inrelease = self._cache.get_one(self._mirror,
|
||||
self._suite, hash_=h)
|
||||
if not inrelease:
|
||||
inrelease = self._retrieve_inrelease(h)
|
||||
if not inrelease:
|
||||
continue
|
||||
|
||||
yield inrelease
|
||||
|
||||
def get_inrelease_for_timestamp(self, time_gmt):
|
||||
"""Find and return the InRelease file that was valid at the given Posix
|
||||
timestamp."""
|
||||
candidate = None
|
||||
|
||||
for inrelease in self.inrelease_files():
|
||||
if inrelease.published > time_gmt:
|
||||
continue
|
||||
if not candidate or inrelease.published > candidate.published:
|
||||
candidate = inrelease
|
||||
|
||||
return candidate
|
||||
|
||||
def _retrieve_inrelease(self, hash_):
|
||||
"""Retrieve the contents of the file identified by hash_. Check if the
|
||||
file is an InRelease file and return a corresponding InRelease object.
|
||||
If the hash_ does not belong to an InRelease file, None is returned."""
|
||||
_500KB = 500 * 1024
|
||||
|
||||
buf = b""
|
||||
inrelease = None
|
||||
url = self._base_url + "/" + hash_
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(url) as response:
|
||||
|
||||
# InRelease files seem to be around 200-300KB
|
||||
|
||||
content_length = response.headers.get("Content-Length")
|
||||
last_modified = response.headers.get("Last-Modified")
|
||||
|
||||
if not content_length:
|
||||
buf = response.read(_500KB + 1)
|
||||
content_length = len(buf)
|
||||
else:
|
||||
content_length = int(content_length)
|
||||
|
||||
# Slightly silly heuristic, but does the job
|
||||
|
||||
if content_length > _500KB or content_length < 1024:
|
||||
return None
|
||||
|
||||
buf += response.read()
|
||||
|
||||
content_encoding = self \
|
||||
._guess_content_encoding_for_response(response)
|
||||
|
||||
# few additional checks to see if this is an InRelease file
|
||||
|
||||
try:
|
||||
buf = buf.decode(content_encoding)
|
||||
except UnicodeError:
|
||||
return None
|
||||
|
||||
if not buf.startswith("-----BEGIN PGP SIGNED MESSAGE-----"):
|
||||
return None
|
||||
|
||||
for kw in ["Origin:", "Label:", "Suite:", "Acquire-By-Hash:"]:
|
||||
if not kw in buf:
|
||||
return None
|
||||
|
||||
inrelease = InRelease(self._mirror, self._suite, buf,
|
||||
hash_=hash_, last_modified=last_modified)
|
||||
|
||||
if self._cache:
|
||||
self._cache.add(inrelease)
|
||||
except urllib.error.HTTPError as e:
|
||||
if not e.code in [404,]:
|
||||
raise LPInReleaseIndexError("Error retrieving {}: {}"
|
||||
.format(url, str(e)))
|
||||
|
||||
return inrelease
|
||||
|
||||
def _guess_content_encoding_for_response(self, response):
|
||||
"""Guess the content encoding of the given HTTPResponse object."""
|
||||
content_encoding = response.headers.get("Content-Encoding")
|
||||
content_type = response.headers.get("Content-Type",
|
||||
"text/html;charset=UTF-8")
|
||||
|
||||
if not content_encoding:
|
||||
m = re.match(r"^.*charset=(\S+)$", content_type)
|
||||
|
||||
if m:
|
||||
content_encoding = m.group(1)
|
||||
else:
|
||||
content_encoding = "UTF-8"
|
||||
|
||||
return content_encoding
|
||||
|
||||
def _retrieve_hashes(self):
|
||||
"""Retrieve all available by-hashes for the mirror and suite that this
|
||||
index is configured to operate on."""
|
||||
hashes = []
|
||||
|
||||
if self._cache:
|
||||
cache_entry = self._cache.get_all(self._mirror, self._suite)
|
||||
if cache_entry:
|
||||
return [inrel.hash for inrel in cache_entry]
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(self._base_url) as response:
|
||||
content_encoding = self._guess_content_encoding_for_response(
|
||||
response)
|
||||
|
||||
body = response.read().decode(content_encoding)
|
||||
hashes = list(set(re.findall(r"[a-z0-9]{64}", body)))
|
||||
except urllib.error.URLError as e:
|
||||
raise LPInReleaseIndexError("Could not retrieve hash listing: {}"
|
||||
.format(str(e)))
|
||||
|
||||
return hashes
|
||||
|
||||
|
||||
class LPInReleaseIndexCli:
|
||||
"""A CLI interface for LPInReleaseIndex."""
|
||||
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
self._mirror = None
|
||||
self._suite = None
|
||||
self._timestamp = None
|
||||
self._cachefile = None
|
||||
self._cache = None
|
||||
self._infile = None
|
||||
self._outfile = None
|
||||
|
||||
def __call__(self, args):
|
||||
options = vars(self._parse_opts(args))
|
||||
|
||||
# Copy settings to object attributes
|
||||
for key, value in options.items():
|
||||
if hasattr(self, "_" + key):
|
||||
setattr(self, "_" + key, value)
|
||||
|
||||
if self._cachefile:
|
||||
self._cache = LPInReleaseCache(self._cachefile)
|
||||
|
||||
try:
|
||||
options["func"]()
|
||||
except LPInReleaseIndexError as e:
|
||||
sys.stderr.write("{}: {}\n".format(self._name, str(e)))
|
||||
sys.exit(EXIT_ERR)
|
||||
|
||||
if self._cache:
|
||||
self._cache.save()
|
||||
|
||||
def list(self):
|
||||
"""List all InRelease hashes for a given mirror and suite."""
|
||||
for inrelease in self._list(self._mirror, self._suite):
|
||||
if self._timestamp and inrelease.published > self._timestamp:
|
||||
continue
|
||||
|
||||
print("{} {} ({})".format(
|
||||
inrelease.hash,
|
||||
inrelease.datetime,
|
||||
inrelease.published,
|
||||
))
|
||||
|
||||
def select(self):
|
||||
"""Find the hash of the InRelease file valid at a given timestamp."""
|
||||
candidate = self._select(self._mirror, self._suite)
|
||||
|
||||
if candidate:
|
||||
print("{} {} ({})".format(
|
||||
candidate.hash,
|
||||
candidate.datetime,
|
||||
candidate.published,
|
||||
))
|
||||
|
||||
def inject(self):
|
||||
"""Inject by-hash and inrelease-path settings into a sources.list."""
|
||||
sources_list = self._infile
|
||||
|
||||
if not os.path.exists(sources_list):
|
||||
sys.stderr.write("{}: No such file: {}.\n"
|
||||
.format(self._name, sources_list))
|
||||
sys.exit(EXIT_ERR)
|
||||
|
||||
with open(sources_list, "r", encoding="utf-8") as fp:
|
||||
buf = fp.read()
|
||||
|
||||
rexpr = re.compile(r"""^
|
||||
(?P<type>deb(?:-src)?)\s+
|
||||
(?P<opts>\[[^\]]+\]\s+)?
|
||||
(?P<mirror>(?P<scheme>\S+):\S+)\s+
|
||||
(?P<suite>\S+)\s+
|
||||
(?P<comps>.*)$""", flags=re.VERBOSE)
|
||||
|
||||
lines = buf.splitlines(True)
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = lines[i]
|
||||
m = rexpr.match(line)
|
||||
|
||||
if not m:
|
||||
continue
|
||||
if m.group("scheme") not in ["http", "https", "ftp"]:
|
||||
continue
|
||||
|
||||
opts = {}
|
||||
if m.group("opts"):
|
||||
for entry in m.group("opts").strip().strip("[]").split():
|
||||
k, v = entry.split("=")
|
||||
opts[k] = v
|
||||
|
||||
inrelease = self._select(m.group("mirror"), m.group("suite"))
|
||||
if inrelease:
|
||||
opts["by-hash"] = "yes"
|
||||
opts["inrelease-path"] = "by-hash/SHA256/" + inrelease.hash
|
||||
|
||||
groupdict = m.groupdict()
|
||||
groupdict["opts"] = " ".join(["{0}={1}".format(*o) for o in
|
||||
opts.items()])
|
||||
|
||||
lines[i] = "{type} [{opts}] {mirror} {suite} {comps}\n"\
|
||||
.format(**groupdict)
|
||||
|
||||
outfile = None
|
||||
try:
|
||||
if not self._outfile or self._outfile == "-":
|
||||
outfile = sys.stdout
|
||||
else:
|
||||
outfile = open(self._outfile, "w+", encoding="utf-8")
|
||||
outfile.write("".join(lines))
|
||||
finally:
|
||||
if outfile and outfile != sys.stdout:
|
||||
outfile.close()
|
||||
|
||||
def _parse_opts(self, args):
|
||||
"""Parse command line arguments and initialize the CLI object."""
|
||||
main_parser = argparse.ArgumentParser()
|
||||
subparsers = main_parser.add_subparsers(dest="command")
|
||||
|
||||
parser_inject = subparsers.add_parser("inject",
|
||||
help="Rewrite a sources.list file injecting appropriate hashes.")
|
||||
parser_list = subparsers.add_parser("list",
|
||||
help="List InRelease hashes for a given release and suite.")
|
||||
parser_select = subparsers.add_parser("select",
|
||||
help="Select hash to use for a given timestamp, release, suite.")
|
||||
|
||||
parser_inject.set_defaults(func=self.inject)
|
||||
parser_list.set_defaults(func=self.list)
|
||||
parser_select.set_defaults(func=self.select)
|
||||
|
||||
# Options common to all commands
|
||||
for parser in [parser_inject, parser_list, parser_select]:
|
||||
cutoff_time_required = True if parser != parser_list else False
|
||||
|
||||
parser.add_argument("-t", "--cutoff-time", dest="timestamp",
|
||||
type=int, required=cutoff_time_required,
|
||||
help="A POSIX timestamp to pin the repo to.")
|
||||
parser.add_argument("--cache-file", dest="cachefile", type=str,
|
||||
help="A file where to cache intermediate results (optional).")
|
||||
|
||||
mirror = "http://archive.ubuntu.com/ubuntu"
|
||||
|
||||
# Options common to list, select commands
|
||||
for parser in [parser_list, parser_select]:
|
||||
parser.add_argument("-m", "--mirror", dest="mirror", type=str,
|
||||
default=mirror, help="The URL of the mirror to use.")
|
||||
parser.add_argument("-s", "--suite",
|
||||
dest="suite", type=str, required=True,
|
||||
help="The suite to scan (e.g. 'bionic', 'bionic-updates').")
|
||||
|
||||
# Extra option for inject command
|
||||
parser_inject.add_argument("-o", "--output-file", dest="outfile",
|
||||
type=str, help="")
|
||||
parser_inject.add_argument("infile", type=str,
|
||||
help="The sources.list file to modify.")
|
||||
|
||||
if not args:
|
||||
main_parser.print_help()
|
||||
sys.exit(EXIT_ERR)
|
||||
|
||||
return main_parser.parse_args(args)
|
||||
|
||||
def _list(self, mirror, suite):
|
||||
"""Internal helper for the list command. This is also used
|
||||
implicitly by the _select method."""
|
||||
index = LPInReleaseIndex(mirror, suite, cache=self._cache)
|
||||
|
||||
inrelease_files = \
|
||||
reversed(
|
||||
sorted(
|
||||
list(index.inrelease_files()),
|
||||
key=lambda x: x.published
|
||||
)
|
||||
)
|
||||
|
||||
return inrelease_files
|
||||
|
||||
def _select(self, mirror, suite):
|
||||
"""Internal helper for the select command."""
|
||||
candidate = None
|
||||
|
||||
for inrelease in self._list(mirror, suite):
|
||||
if inrelease.published > self._timestamp:
|
||||
continue
|
||||
if not candidate or inrelease.published > candidate.published:
|
||||
candidate = inrelease
|
||||
|
||||
return candidate
|
||||
|
||||
|
||||
class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||
"""Request handler providing a virtual snapshot of the package
|
||||
repositories."""
|
||||
|
||||
def do_HEAD(self):
|
||||
"""Process a HEAD request."""
|
||||
self.__get_request(verb="HEAD")
|
||||
|
||||
def do_GET(self):
|
||||
"""Process a GET request."""
|
||||
self.__get_request()
|
||||
|
||||
def __get_request(self, verb="GET"):
|
||||
"""Pass all requests on to the destination server 1:1 except when the
|
||||
target is an InRelease file or a resource listed in an InRelease files.
|
||||
|
||||
In that case we silently download the resource via the by-hash URL
|
||||
which was most recent at the cutoff (or repo snapshot) time and inject
|
||||
it into the response.
|
||||
|
||||
It is important to understand that there is no status 3xx HTTP redirect
|
||||
happening here, the client does not know that what it receives is not
|
||||
exactly what it requested."""
|
||||
|
||||
host, path = self.__get_host_path()
|
||||
|
||||
m = re.match(
|
||||
r"^(?P<base>.*?)/dists/(?P<suite>[^/]+)/(?P<target>.*)$",
|
||||
path
|
||||
)
|
||||
|
||||
if m:
|
||||
mirror = "http://" + host + m.group("base")
|
||||
base = m.group("base")
|
||||
suite = m.group("suite")
|
||||
target = m.group("target")
|
||||
|
||||
index = LPInReleaseIndex(mirror, suite,
|
||||
cache=self.server.inrelease_cache)
|
||||
inrelease = index.get_inrelease_for_timestamp(
|
||||
self.server.snapshot_stamp)
|
||||
|
||||
if inrelease is None:
|
||||
self.__send_error(404, "No InRelease file found for given "
|
||||
"mirror, suite and timestamp.")
|
||||
return
|
||||
|
||||
if target == "InRelease":
|
||||
# If target is InRelease, send back contents directly.
|
||||
data = inrelease.data.encode("utf-8")
|
||||
|
||||
self.log_message(
|
||||
"Inject InRelease '{}'".format(inrelease.hash))
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Length", len(data))
|
||||
self.end_headers()
|
||||
|
||||
if verb == "GET":
|
||||
self.wfile.write(data)
|
||||
|
||||
return
|
||||
else:
|
||||
# If target hash is listed, then redirect to by-hash URL.
|
||||
hash_ = inrelease.get_hash_for(target)
|
||||
|
||||
if hash_:
|
||||
self.log_message(
|
||||
"Inject {} for {}".format(hash_, target))
|
||||
|
||||
target_path = target.rsplit("/", 1)[0]
|
||||
|
||||
path = "{}/dists/{}/{}/by-hash/SHA256/{}"\
|
||||
.format(base, suite, target_path, hash_)
|
||||
|
||||
try:
|
||||
client = http.client.HTTPConnection(host)
|
||||
client.request(verb, path)
|
||||
except Exception as e:
|
||||
self.log_error("Failed to retrieve http://{}{}: {}"
|
||||
.format(host, path, str(e)))
|
||||
return
|
||||
|
||||
try:
|
||||
self.__send_response(client.getresponse())
|
||||
except Exception as e:
|
||||
self.log_error("Error delivering response: {}".format(str(e)))
|
||||
|
||||
def __get_host_path(self):
|
||||
"""Figure out the host to contact and the path of the resource that is
|
||||
being requested."""
|
||||
host = self.headers.get("host")
|
||||
url = urllib.parse.urlparse(self.path)
|
||||
path = url.path
|
||||
|
||||
return host, path
|
||||
|
||||
def __send_response(self, response):
|
||||
"""Pass on upstream response headers and body to the client."""
|
||||
self.send_response(response.status)
|
||||
|
||||
for name, value in response.getheaders():
|
||||
self.send_header(name, value)
|
||||
|
||||
self.end_headers()
|
||||
shutil.copyfileobj(response, self.wfile)
|
||||
|
||||
def __send_error(self, status, message):
|
||||
"""Return an HTTP error status and a message in the response body."""
|
||||
self.send_response(status)
|
||||
self.send_header("Content-Type", "text/plain; charset=utf-8")
|
||||
self.end_headers()
|
||||
self.wfile.write(message.encode("utf-8"))
|
||||
|
||||
|
||||
class MagicHTTPProxy(socketserver.ThreadingMixIn, http.server.HTTPServer):
|
||||
"""Tiny HTTP server using ProxyingHTTPRequestHandler instances to provide
|
||||
a snapshot view of the package repositories."""
|
||||
|
||||
def __init__(self, server_address, server_port, cache_file=None,
|
||||
repo_snapshot_stamp=time.time(), run_as=None):
|
||||
|
||||
try:
|
||||
super(http.server.HTTPServer, self).__init__(
|
||||
(server_address, server_port), ProxyingHTTPRequestHandler)
|
||||
except OSError as e:
|
||||
raise LPInReleaseProxyError(
|
||||
"Could not initialize proxy: {}".format(str(e)))
|
||||
|
||||
self.inrelease_cache = LPInReleaseCache(filename=cache_file)
|
||||
self.snapshot_stamp = repo_snapshot_stamp
|
||||
|
||||
|
||||
class MagicHTTPProxyCli:
|
||||
"""A CLI interface for the MagicHTTPProxy."""
|
||||
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
self._address = "127.0.0.1"
|
||||
self._port = 8080
|
||||
self._timestamp = time.time()
|
||||
self._run_as = None
|
||||
self._pid_file = None
|
||||
self._log_file = None
|
||||
self._background = False
|
||||
self._setsid = False
|
||||
|
||||
def __call__(self, args):
|
||||
options = self._parse_opts(args)
|
||||
|
||||
proxy = MagicHTTPProxy(
|
||||
options.address,
|
||||
options.port,
|
||||
cache_file=None,
|
||||
repo_snapshot_stamp=options.timestamp
|
||||
)
|
||||
|
||||
# Detach, but keep all streams open.
|
||||
if options.background:
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
os._exit(EXIT_OK)
|
||||
|
||||
if options.log_file:
|
||||
fd = open(options.log_file, "wb+")
|
||||
os.dup2(fd.fileno(), sys.stdout.fileno())
|
||||
os.dup2(fd.fileno(), sys.stderr.fileno())
|
||||
|
||||
# Become session leader and give up controlling terminal.
|
||||
if options.setsid:
|
||||
if not options.log_file:
|
||||
fd = open(os.devnull, "wb+")
|
||||
os.dup2(fd.fileno(), sys.stdout.fileno())
|
||||
os.dup2(fd.fileno(), sys.stderr.fileno())
|
||||
os.setsid()
|
||||
|
||||
if options.pid_file:
|
||||
with open(options.pid_file, "w+", encoding="utf-8") as fp:
|
||||
fp.write(str(os.getpid()))
|
||||
|
||||
if options.run_as is not None:
|
||||
try:
|
||||
uid = pwd.getpwnam(options.run_as).pw_uid
|
||||
os.setuid(uid)
|
||||
except KeyError as e:
|
||||
sys.stderr.write("Failed to lookup {}: {}\n"
|
||||
.format(options.run_as, str(e)))
|
||||
sys.exit(EXIT_ERR)
|
||||
except PermissionError as e:
|
||||
sys.stderr.write("Cannot setuid: {}\n".format(str(e)))
|
||||
sys.exit(EXIT_ERR)
|
||||
|
||||
proxy.serve_forever()
|
||||
|
||||
def _parse_opts(self, args):
|
||||
"""Parse command line arguments and initialize the CLI object."""
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--address", dest="address", type=str,
|
||||
default="127.0.0.1", help="The address of the interface to "
|
||||
"bind to (default: 127.0.0.1)")
|
||||
parser.add_argument("--port", dest="port", type=int, default=8080,
|
||||
help="The port to listen on (default: 8080)")
|
||||
parser.add_argument("-t", "--cutoff-time", dest="timestamp", type=int,
|
||||
required=True, help="A POSIX timestamp to pin the repo to.")
|
||||
parser.add_argument("--run-as", dest="run_as", type=str,
|
||||
help="Drop privileges and run as this user.")
|
||||
parser.add_argument("--pid-file", dest="pid_file", type=str,
|
||||
help="Store the PID to this file.")
|
||||
parser.add_argument("--log-file", dest="log_file", type=str,
|
||||
help="Re-direct all streams to this file.")
|
||||
parser.add_argument("--background", dest="background",
|
||||
action="store_true",
|
||||
help="Whether to go into the background.")
|
||||
parser.add_argument("--setsid", dest="setsid",
|
||||
action="store_true",
|
||||
help="Become session leader and drop controlling TTY.")
|
||||
|
||||
return parser.parse_args(args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
name = os.path.basename(sys.argv[0])
|
||||
|
||||
try:
|
||||
if name == "lp-in-release":
|
||||
cli = LPInReleaseIndexCli(name)
|
||||
else:
|
||||
cli = MagicHTTPProxyCli(name)
|
||||
|
||||
cli(sys.argv[1:])
|
||||
except LPInReleaseBaseError as e:
|
||||
sys.stderr.write("{}: {}\n".format(name, str(e)))
|
||||
sys.exit(EXIT_ERR)
|
||||
except KeyboardInterrupt:
|
||||
sys.stderr.write("{}: Caught keyboard interrupt, exiting...\n"
|
||||
.format(name))
|
||||
sys.exit(EXIT_ERR)
|
Loading…
x
Reference in New Issue
Block a user