mirror of
https://git.launchpad.net/livecd-rootfs
synced 2025-08-17 19:54:05 +00:00
Compare commits
258 Commits
ubuntu/mas
...
2.525.61
Author | SHA1 | Date | |
---|---|---|---|
|
906abc2147 | ||
|
6131d23f4b | ||
|
e5f2644841 | ||
|
2c5fff730c | ||
|
7b09dd78c8 | ||
|
5dbf8ff3e4 | ||
|
a64eb47ad1 | ||
|
cbe21e23e9 | ||
|
521309d082 | ||
|
926a2b6b18 | ||
|
89719c2ff3 | ||
|
e792132145 | ||
|
83c09aabbe | ||
|
edc13ece4c | ||
|
9aca15b3d2 | ||
|
dcb39cfe18 | ||
|
2d0ffb244f | ||
|
332213f44c | ||
|
aace616e19 | ||
|
a66fde3fbc | ||
|
b6e8364381 | ||
|
fa07937842 | ||
|
387ae191a1 | ||
|
bc6ae91a70 | ||
|
f1b20982e1 | ||
|
8039471375 | ||
|
42c5cafe90 | ||
|
205019dbde | ||
|
6c2b5816c7 | ||
|
09da74e154 | ||
|
d447d167dc | ||
|
bcb48c1625 | ||
|
319341bda5 | ||
|
6faede5710 | ||
|
417ee52e1d | ||
|
7a834f3f75 | ||
|
92612859cc | ||
|
464b18de1a | ||
|
8b7b59dee3 | ||
|
ca2d1a728f | ||
|
69195a0026 | ||
|
0e35dfdb46 | ||
|
a976382d9a | ||
|
83b582c401 | ||
|
cc3715193f | ||
|
e6dc732f34 | ||
|
b5e21920dd | ||
|
8842b0528f | ||
|
183466629b | ||
|
45abf7c253 | ||
|
03a990188a | ||
|
bc9f8bad69 | ||
|
d5a2099c33 | ||
|
275c5294f6 | ||
|
70d675e5ab | ||
|
a4a696053b | ||
|
2c7aed242e | ||
|
a61cfb6668 | ||
|
5938663078 | ||
|
a93faf9008 | ||
|
07c17a8258 | ||
|
ecc146ea5d | ||
|
d0f81ae08a | ||
|
b4ceae03d5 | ||
|
d278984cbf | ||
|
570fcb79ff | ||
|
7bc4c77a03 | ||
|
e4952519b3 | ||
|
622adfc4eb | ||
|
5e442437fa | ||
|
db2e54491a | ||
|
6e37b05312 | ||
|
1c4a376ced | ||
|
1c27c6a88a | ||
|
64ca75261f | ||
|
75a1408d0e | ||
|
294db12c82 | ||
|
037c4e4ac6 | ||
|
61eae75d5e | ||
|
467065461b | ||
|
457170a92c | ||
|
650cced79e | ||
|
eb708c9ab6 | ||
|
6430ba477e | ||
|
bb6c17545f | ||
|
a00ac033d9 | ||
|
77cb232e8b | ||
|
f60c01ef0b | ||
|
9ad6d93a2f | ||
|
127ed59fb1 | ||
|
68e8afd528 | ||
|
f5c65024a3 | ||
|
d74b68f9aa | ||
|
075d1e85bf | ||
|
0cd622e863 | ||
|
7d5a52d8f2 | ||
|
15a0b598d1 | ||
|
a668e09ac4 | ||
|
6fed316068 | ||
|
b628d90ef8 | ||
|
58962d67d0 | ||
|
686425a634 | ||
|
6b8bcf00ad | ||
|
15c419eafb | ||
|
b8865cb17d | ||
|
632ab2b982 | ||
|
ec954a80b9 | ||
|
d516e68807 | ||
|
e2dd084fe0 | ||
|
fcaa6d447c | ||
|
ddb9c7f127 | ||
|
efd428856b | ||
|
89bcec68c5 | ||
|
b47acc9456 | ||
|
ffc33b6904 | ||
|
38cfb82a84 | ||
|
2f29a62ec5 | ||
|
d381420065 | ||
|
e0c02f2c28 | ||
|
42f2ccd60a | ||
|
d4ea6dff58 | ||
|
f212eb9a73 | ||
|
d614407bd4 | ||
|
98f52a0f7b | ||
|
6e35342592 | ||
|
f38122e070 | ||
|
ce1b1cf815 | ||
|
f8dd8502ce | ||
|
5604820cc1 | ||
|
78aa0844cf | ||
|
f16cac1842 | ||
|
335470331c | ||
|
0fac541fb6 | ||
|
dda6e5953a | ||
|
06af3d7242 | ||
|
73e3864f25 | ||
|
1f8eb7b8dc | ||
|
08e6f93a75 | ||
|
39fb950de2 | ||
|
f7cd30f59a | ||
|
17d9f1cedd | ||
|
a5ac8614b5 | ||
|
864e082569 | ||
|
f0c42fe33a | ||
|
139983b8a1 | ||
|
3b3fe0fce8 | ||
|
51ef72e5b9 | ||
|
f77a1b26cf | ||
|
31e4f4fe02 | ||
|
f85e411e6e | ||
|
315516a372 | ||
|
f03d6992d5 | ||
|
0c39f1bdbe | ||
|
0cf23f796f | ||
|
aa06de9ae1 | ||
|
7da22ddf55 | ||
|
afc482a375 | ||
|
2742b06211 | ||
|
7907544390 | ||
|
3029676441 | ||
|
d154f08e04 | ||
|
f2e2efcbbb | ||
|
5611a31a26 | ||
|
1a48706a37 | ||
|
df69f3bcd9 | ||
|
62965ae154 | ||
|
223bf910df | ||
|
9265829772 | ||
|
0e50a351a6 | ||
|
f8ebe8e98e | ||
|
fef6023ce5 | ||
|
9b624ab187 | ||
|
87a26d2cd2 | ||
|
97256770a8 | ||
|
fb4849aec1 | ||
|
3229f51cc6 | ||
|
c87f87e4e1 | ||
|
a38a62e09f | ||
|
be180bcf93 | ||
|
6e27dfdaf8 | ||
|
96f73ba3eb | ||
|
fbba9fe46e | ||
|
0607004a4d | ||
|
d10c56d13d | ||
|
845b15c6d7 | ||
|
1003e76191 | ||
|
edbe44458f | ||
|
b9e5fcdf32 | ||
|
a4fd32a7f1 | ||
|
f9d7714b3b | ||
|
2adceb461e | ||
|
71ec79e55d | ||
|
e351909818 | ||
|
0ccce31ed5 | ||
|
bd5f3fcd3f | ||
|
08e890fd32 | ||
|
20efc781e3 | ||
|
86f2505871 | ||
|
dbdf9e4a00 | ||
|
033bab8473 | ||
|
770a7b4c85 | ||
|
d2d07acf54 | ||
|
6b63fa210f | ||
|
e35dde7f68 | ||
|
f129e5797a | ||
|
a473683ac7 | ||
|
7632df8e50 | ||
|
de447eb3e5 | ||
|
f3a458a85f | ||
|
bfeebc90ab | ||
|
bcbf9ea36e | ||
|
f475de24bf | ||
|
d3eadc704c | ||
|
7ad0444511 | ||
|
e6aa5a0b16 | ||
|
26ab1e69b2 | ||
|
2edb15bd10 | ||
|
de78ba0427 | ||
|
cdb4234912 | ||
|
8b65ec6c2c | ||
|
209f1857f2 | ||
|
486a30e967 | ||
|
1a39926dd0 | ||
|
ac8e88866d | ||
|
64e5330029 | ||
|
6527da055d | ||
|
00c8b06004 | ||
|
465d4f8db7 | ||
|
a251bb7ee5 | ||
|
7ce7902cd0 | ||
|
e31bba1195 | ||
|
7a614635f8 | ||
|
35c780c190 | ||
|
d372016933 | ||
|
96065b96c8 | ||
|
535a2d9174 | ||
|
85d022247c | ||
|
a443abae28 | ||
|
52b22f61ab | ||
|
545e68e676 | ||
|
c09a6c4e96 | ||
|
6ea20d222a | ||
|
e28ebf6a38 | ||
|
df0803df52 | ||
|
d97233a7bb | ||
|
d4c1f99a61 | ||
|
b4dc030fdb | ||
|
2ab2c5a74e | ||
|
1c99627313 | ||
|
6673d4d6ee | ||
|
7717615f58 | ||
|
f865cf4d55 | ||
|
14b7d8f801 | ||
|
7c09badeec | ||
|
4be07bbd0e | ||
|
3b4d2befa3 | ||
|
f299cb7bd3 | ||
|
a2309c6177 |
571
debian/changelog
vendored
571
debian/changelog
vendored
@ -1,3 +1,574 @@
|
||||
livecd-rootfs (2.525.61) bionic; urgency=medium
|
||||
|
||||
[ Samir Akarioh ]
|
||||
* feat: Add metadata on ubuntu-oci image. (LP: #1998229)
|
||||
|
||||
-- Utkarsh Gupta <utkarsh@ubuntu.com> Mon, 12 Dec 2022 15:36:15 +0530
|
||||
|
||||
livecd-rootfs (2.525.60) bionic; urgency=medium
|
||||
|
||||
[ Michał Sawicz ]
|
||||
* ubuntu-buildd: Add arm64 buildd bootable image. (LP: #1966636)
|
||||
|
||||
-- jchittum <john.chittum@canonical.com> Fri, 29 Jul 2022 09:55:35 +1200
|
||||
|
||||
livecd-rootfs (2.525.59) bionic; urgency=medium
|
||||
|
||||
* ubuntu-cpc: Install `shim-signed` and `grub-efi-arm64-signed` to enable
|
||||
secureboot on ARM64 images (LP: #1980358)
|
||||
|
||||
-- Ivan Kapelyukhin <ivan.kapelyukhin@canonical.com> Thu, 30 Jun 2022 14:10:50 +0200
|
||||
|
||||
livecd-rootfs (2.525.58) bionic; urgency=medium
|
||||
|
||||
* Split UEFI image out of `disk-image` series file into
|
||||
`disk-image-uefi`. (LP: #1961760)
|
||||
|
||||
-- Ivan Kapelyukhin <ivan.kapelyukhin@canonical.com> Thu, 03 Mar 2022 15:59:26 +0100
|
||||
|
||||
livecd-rootfs (2.525.57) bionic; urgency=medium
|
||||
|
||||
* Add sleep due to e2fsck error in umount_partition. LP: #1960537
|
||||
|
||||
-- Brian Murray <brian@ubuntu.com> Thu, 10 Feb 2022 15:55:55 -0800
|
||||
|
||||
livecd-rootfs (2.525.56) bionic; urgency=medium
|
||||
|
||||
[ Jason C. McDonald ]
|
||||
* Generate manifest for HyperV desktop image. (LP: #1940136)
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Fri, 08 Oct 2021 14:42:28 +1300
|
||||
|
||||
livecd-rootfs (2.525.55) bionic; urgency=medium
|
||||
|
||||
[ Thomas Bechtold ]
|
||||
* Remove device node files in /dev from rootfs tarball for oci project
|
||||
builds (LP: #1930686)
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Tue, 08 Jun 2021 08:02:19 +1200
|
||||
|
||||
livecd-rootfs (2.525.54) bionic; urgency=medium
|
||||
|
||||
* Add grub config to produce console output to allow buildd vm images to
|
||||
produce console output in lxd vms (LP: #1915571)
|
||||
|
||||
-- Cody Shepherd <cody.shepherd@canonical.com> Fri, 21 May 2021 17:30:56 -0700
|
||||
|
||||
livecd-rootfs (2.525.53) bionic; urgency=medium
|
||||
|
||||
[ Thomas Bechtold ]
|
||||
* Add a new ubuntu-oci project that contains the customizations currently
|
||||
performed downstream for the official Ubuntu images on dockerhub.
|
||||
(LP: #1926732)
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Tue, 11 May 2021 11:39:46 +1200
|
||||
|
||||
livecd-rootfs (2.525.52) bionic; urgency=medium
|
||||
|
||||
[ Gauthier Jolly ]
|
||||
* ubuntu-cpc: secure esp mountpoint (LP: #1881006)
|
||||
Change mount option for ubuntu-cpc images from "defaults" to "umask=0077"
|
||||
ESP partitions might contain sensitive data and non-root users shouldn't
|
||||
have read access on it.
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Sat, 10 Apr 2021 05:24:43 -0500
|
||||
|
||||
livecd-rootfs (2.525.51) bionic; urgency=medium
|
||||
|
||||
[ David Krauser ]
|
||||
* buildd: produce kernel and initrd as separate artifacts LP: #1910557
|
||||
* buildd: call update-initramfs for all installed kernels
|
||||
We only have one kernel installed, so we don't need to
|
||||
specify an explicit version. LP: #1910557
|
||||
|
||||
[ Dimitri John Ledkov ]
|
||||
* esp: install grub in ubuntu bootloader id path, instead of removable.
|
||||
(LP: #1912830)
|
||||
* esp: perform fsck. (LP: #1912835)
|
||||
* Perform fsck on all rootfs. (LP: #1912835)
|
||||
* functions: stop removing systemd-detect-virt unconditionally in undivert_grub
|
||||
(LP: #1902260)
|
||||
|
||||
-- Dimitri John Ledkov <xnox@ubuntu.com> Tue, 09 Feb 2021 00:59:37 +0000
|
||||
|
||||
livecd-rootfs (2.525.50) bionic; urgency=medium
|
||||
|
||||
[ Cody Shepherd ]
|
||||
* Create manifests for buildd tarball images (LP: #1914445)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Fri, 05 Feb 2021 15:00:35 -0600
|
||||
|
||||
livecd-rootfs (2.525.49) bionic; urgency=medium
|
||||
|
||||
* Create additional qcow2 images for ubuntu-appliances (LP: #1906805).
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Mon, 11 Jan 2021 15:44:20 +0100
|
||||
|
||||
livecd-rootfs (2.525.48) bionic; urgency=medium
|
||||
|
||||
[ John Chittum ]
|
||||
* Ensure vmtools version entered into vmdk header (LP: #1893898)
|
||||
|
||||
[ Dimitri John Ledkov & Joshua Powers ]
|
||||
* amd64: always install grub-pc with shim-signed (LP: #1901906), and
|
||||
ensure to autoremove packages
|
||||
|
||||
[ Patrick Viafore ]
|
||||
* Increase Vagrant disk size from 10G to 40G (LP: #1580596)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Fri, 20 Nov 2020 15:08:54 -0600
|
||||
|
||||
livecd-rootfs (2.525.47) bionic; urgency=medium
|
||||
|
||||
* Apparently the lxd appliance needs to use a custom track (4.0) and since
|
||||
model assertions do not accept track names in required-snaps, we need to do
|
||||
this by hand during build. (LP: #1891505)
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Wed, 12 Aug 2020 15:58:19 +0200
|
||||
|
||||
livecd-rootfs (2.525.46) bionic; urgency=medium
|
||||
|
||||
[ Cody Shepherd ]
|
||||
* Add dist-upgrade to bootable-buildd hook to ensure the built image
|
||||
doesn't contain vulnerable kernels or other packages. LP: #1891061.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Mon, 10 Aug 2020 14:17:15 -0700
|
||||
|
||||
livecd-rootfs (2.525.45) bionic; urgency=medium
|
||||
|
||||
[ Francis Ginther ]
|
||||
* Use the linux-raspi-hwe-18.04 kernel for raspi3 classic image builds.
|
||||
(LP: #1889188)
|
||||
|
||||
[ Michael Hudson-Doyle ]
|
||||
* Backport snap seeding and other fixes for live-server ISOs. (LP: #1883156)
|
||||
* Stop building and mounting the mass-rack and maas-region squashfses for
|
||||
the server-live build as subiquity does not use them any more.
|
||||
|
||||
[ Dimitri John Ledkov ]
|
||||
* Run snap info on the downloaded snap, rather than against the
|
||||
store. First of all snap info doesn't have --channel argument, thus
|
||||
queries the wrong channel, and depening on the cohort, a different
|
||||
snap might be visible too. Thus seed the base of the snap revision we
|
||||
dowanloaded, rather than some random one from the store.
|
||||
* Use snap-tool to seed subiquity snap.
|
||||
* Subiquity: install linux-firmware in the installer layer, to make
|
||||
firmware blobs available in the live session. LP: #1847835
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* Fix logic to ensure snapd is seeded in core18-only images (LP: #1871919)
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Wed, 22 Jul 2020 14:44:38 +1200
|
||||
|
||||
livecd-rootfs (2.525.44) bionic; urgency=medium
|
||||
|
||||
* Add support for building uc18 appliance images (LP: #1887673).
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Thu, 07 May 2020 18:38:54 +0200
|
||||
|
||||
livecd-rootfs (2.525.43) bionic; urgency=medium
|
||||
|
||||
[ David Krauser ]
|
||||
* Add bootable buildd image w/ fixes (LP: #1863024)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Mon, 30 Mar 2020 09:39:16 -0500
|
||||
|
||||
livecd-rootfs (2.525.42) bionic; urgency=medium
|
||||
|
||||
[ Cody Shepherd / David Krauser ]
|
||||
* Add bootable buildd image (LP: #1863024)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Fri, 27 Mar 2020 14:32:27 -0500
|
||||
|
||||
livecd-rootfs (2.525.41) bionic; urgency=medium
|
||||
|
||||
* Use snap cli rather than custom snap-tool (LP: #1864252)
|
||||
* Address snap base regression after snap-tool removal
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Fri, 28 Feb 2020 08:29:57 -0600
|
||||
|
||||
livecd-rootfs (2.525.40) bionic; urgency=medium
|
||||
|
||||
* Stop building per-pi-flavor images and only build a pi generic image for
|
||||
core18. (LP: #1861520)
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Fri, 31 Jan 2020 16:57:01 +0100
|
||||
|
||||
livecd-rootfs (2.525.39) bionic; urgency=medium
|
||||
|
||||
* Ensure seed partition is mounted on no-cloud images which use system-boot
|
||||
as their seed (LP: #1860046)
|
||||
* Have getty wait for cloud-init to complete to ensure that the default
|
||||
user exists before presenting a login prompt
|
||||
|
||||
-- Dave Jones <dave.jones@canonical.com> Fri, 24 Jan 2020 15:17:56 +0000
|
||||
|
||||
livecd-rootfs (2.525.38) bionic; urgency=medium
|
||||
|
||||
* Support generating a .disk/info file via ubuntu-image from the passed-in
|
||||
datestamp parameter (using the $NOW environment variable). (LP: #1856684)
|
||||
* Switch raspi2-hwe-18.04-edge to raspi2-hwe-18.04. (LP: #1859478)
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Wed, 22 Jan 2020 09:18:06 +0100
|
||||
|
||||
livecd-rootfs (2.525.37) bionic; urgency=medium
|
||||
|
||||
* Use the raspi2-hwe-18.04-edge kernel flavour for the raspi3 images. This
|
||||
kernel is required for proper pi4 support. (LP: #1859478)
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Mon, 13 Jan 2020 15:33:20 +0100
|
||||
|
||||
livecd-rootfs (2.525.36) bionic; urgency=medium
|
||||
|
||||
* Preserve apt preferences created by any package we install (i.e.
|
||||
ubuntu-advantage-tools) against live-build's attempt to delete them.
|
||||
(LP: #1855354)
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Thu, 19 Dec 2019 22:43:33 +1300
|
||||
|
||||
livecd-rootfs (2.525.35) bionic; urgency=medium
|
||||
|
||||
* Did not expect the bionic branch being so outdated. Backport support
|
||||
for fetching core18 model assertions on bionic hosts. This enables build
|
||||
support for ubuntu-core 18 images. (LP: #1852332)
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Tue, 12 Nov 2019 21:05:29 +0100
|
||||
|
||||
livecd-rootfs (2.525.34) bionic; urgency=medium
|
||||
|
||||
* Add support for HyperV Gallery Images (LP: #1837088)
|
||||
|
||||
-- David Krauser <david.krauser@canonical.com> Wed, 05 Nov 2019 15:19:29 -0400
|
||||
|
||||
livecd-rootfs (2.525.33) bionic; urgency=medium
|
||||
|
||||
* Add support for the 'kassel' subflavor of UC18 images. (LP: #1850674)
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Wed, 30 Oct 2019 18:09:02 +0100
|
||||
|
||||
livecd-rootfs (2.525.32) bionic; urgency=medium
|
||||
|
||||
* Fix a terrible typo in auto/build that causes all core18 builds to fail on
|
||||
this series (LP: #1849343).
|
||||
* Backport 999-ubuntu-image-customization.chroot to have a common place for
|
||||
our cloud-init datasource. Add some modifications on top to make sure the
|
||||
legacy raspi2 configuration stays the same (LP: #1849472).
|
||||
* Backport enabling of adding extra snaps to ubuntu-core images via the
|
||||
EXTRA_SNAPS environment (LP: #1849491).
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Tue, 22 Oct 2019 17:34:31 +0200
|
||||
|
||||
livecd-rootfs (2.525.31) bionic; urgency=medium
|
||||
|
||||
* magic-proxy: dump proxy log to stdout on failure (LP: #1847300)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Tue, 08 Oct 2019 11:00:07 -0500
|
||||
|
||||
livecd-rootfs (2.525.30) bionic; urgency=medium
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* ubuntu-cpc: Only produce explicitly specified artifacts (LP: #1837254)
|
||||
|
||||
[ Ethan Hsieh ]
|
||||
* Add support for the Nitrogen6x board (LP: #1838064).
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Tue, 10 Sep 2019 16:56:26 +0200
|
||||
|
||||
livecd-rootfs (2.525.29) bionic; urgency=medium
|
||||
|
||||
* Add retry logic to snap-tool to make downloads more resilient.
|
||||
(LP: #1837871)
|
||||
|
||||
-- Tobias Koch <tobias.koch@canonical.com> Mon, 26 Aug 2019 13:41:50 +0200
|
||||
|
||||
livecd-rootfs (2.525.28) bionic; urgency=medium
|
||||
|
||||
[ Tobias Koch ]
|
||||
* Do proper error checking when calling snap-tool info to determine
|
||||
the base of a snap. (LP: #1828500)
|
||||
|
||||
[ Michael Vogt ]
|
||||
* Run "snap debug validate-seed" during preseeding to do basic validation of
|
||||
the generated seed.yaml
|
||||
|
||||
[ Iain Lane ]
|
||||
* Seed core for non minimized builds, as it is still required (LP:
|
||||
#1836594).
|
||||
|
||||
-- Iain Lane <iain.lane@canonical.com> Tue, 16 Jul 2019 13:20:18 +0100
|
||||
|
||||
livecd-rootfs (2.525.27) bionic; urgency=medium
|
||||
|
||||
* Backport improvements to snap seeding from trunk. LP: #1831675.
|
||||
|
||||
[ Tobias Koch ]
|
||||
* Replace "snap download" with tool that uses snap store's coherence
|
||||
feature.
|
||||
* Detect which core snaps are required and install them on-the-fly.
|
||||
* If image has core18 snaps only, automatically preseed snapd.
|
||||
|
||||
-- Dimitri John Ledkov <xnox@ubuntu.com> Mon, 10 Jun 2019 18:46:16 +0100
|
||||
|
||||
livecd-rootfs (2.525.26) bionic; urgency=medium
|
||||
|
||||
* Strip translation files out of the minimal images, another thing that
|
||||
goes unused when there is no human console user (and we already don't
|
||||
have the locales themselves present on a minimal image). LP: #1829333.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Tue, 28 May 2019 08:54:04 -0700
|
||||
|
||||
livecd-rootfs (2.525.25) bionic; urgency=medium
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* ubuntu-cpc: parallel builds (LP: #1829938)
|
||||
- Inject a proxy into the build providing a snapshot view of the package repo.
|
||||
- Use series files with dependency handling to generate hook symlinks dynamically
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Tue, 21 May 2019 15:35:35 -0700
|
||||
|
||||
livecd-rootfs (2.525.24) bionic; urgency=medium
|
||||
|
||||
* Build WSL rootfs tarball (LP: #1827930)
|
||||
|
||||
-- Balint Reczey <rbalint@ubuntu.com> Fri, 17 May 2019 15:11:42 +0200
|
||||
|
||||
livecd-rootfs (2.525.23) bionic; urgency=medium
|
||||
|
||||
* Backport two minimizations for the docker images: remove apt lists that
|
||||
are removed downstream anyway, and remove device nodes from the image.
|
||||
(LP: #1828118)
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Wed, 08 May 2019 10:48:39 +1200
|
||||
|
||||
livecd-rootfs (2.525.22) bionic; urgency=medium
|
||||
|
||||
* Subiquity specific changes SRU LP: #1827357
|
||||
- subiquity: make subiquity_config.mount optional
|
||||
- Make serial-subiquity@ use the same codepath as tty1 subiquity.
|
||||
- Fix ubuntu-server-live images to generate initrd with casper UUID.
|
||||
|
||||
-- Dimitri John Ledkov <xnox@ubuntu.com> Thu, 02 May 2019 11:34:34 +0100
|
||||
|
||||
livecd-rootfs (2.525.21) bionic; urgency=medium
|
||||
|
||||
* Remove crufty files after minimize-manual (LP: #1826377)
|
||||
|
||||
-- Julian Andres Klode <juliank@ubuntu.com> Fri, 26 Apr 2019 10:40:08 +0200
|
||||
|
||||
livecd-rootfs (2.525.20) bionic; urgency=medium
|
||||
|
||||
[ Steve Langasek ]
|
||||
* Drop /etc/update-motd.d/51-cloudguest from cloud images; this is not
|
||||
consistent with current Ubuntu Advantage product language. Any future
|
||||
customizations to update-motd for cloud images should be done via a
|
||||
package instead.
|
||||
|
||||
[ Julian Andres Klode ]
|
||||
* Minimize the number of manually installed packages in images by marking
|
||||
dependencies of metapackages as automatically installed. (LP: #1800610);
|
||||
but do not mark direct dependencies of ubiquity as auto installed. This
|
||||
caused cryptsetup to remain auto on the installed system in bionic (see
|
||||
LP #1801629)
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Thu, 14 Mar 2019 11:04:49 -0700
|
||||
|
||||
livecd-rootfs (2.525.19) bionic; urgency=medium
|
||||
|
||||
* Make sure buildd images have a /usr/sbin/policy-rc.d symlink
|
||||
(LP: #1815251).
|
||||
|
||||
-- Colin Watson <cjwatson@ubuntu.com> Mon, 18 Feb 2019 16:03:46 +0000
|
||||
|
||||
livecd-rootfs (2.525.18) bionic; urgency=medium
|
||||
|
||||
[ Michael Hudson-Doyle ]
|
||||
* Changes to kernel handling for live-server: do not include kernel/initrd
|
||||
in filesystem.squashfs but rather install it in a throwaway layer on top
|
||||
of installer.squashfs and fish kernel, initrd and modules out of that with
|
||||
an initrd hook that records kernel metapackage name in /run and mounts
|
||||
/lib/modules from a squashfs on the ISO.
|
||||
* Do no install openssh-server in the base filsystem for the live server
|
||||
installer.
|
||||
* A few simple tweaks to reduce size of live servers installer.squashfs:
|
||||
- Do not run apt-get update (which can bring in package lists if we are
|
||||
unlucky wrt publisher schedules).
|
||||
- Run apt-get clean to clear out downloaded debs of curtin/casper and
|
||||
dependencies.
|
||||
- Do not install user-setup.
|
||||
- Use the core snap from the base filesystem if present.
|
||||
* Do not include curtin in the live-server installer.squashfs as the
|
||||
version of subiquity that includes it in the snap has now been released to
|
||||
stable.
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Tue, 12 Feb 2019 11:35:48 -0700
|
||||
|
||||
livecd-rootfs (2.525.17) bionic; urgency=medium
|
||||
|
||||
* Add a buildd subproject (LP: #1815251).
|
||||
* Add a LXD image to builds for the buildd subproject.
|
||||
* Move buildd image building to binary hooks.
|
||||
|
||||
-- Colin Watson <cjwatson@ubuntu.com> Fri, 08 Feb 2019 22:57:59 +0000
|
||||
|
||||
livecd-rootfs (2.525.16) bionic; urgency=medium
|
||||
|
||||
* lubuntu: Select lubuntu-gtk-core task as well, so we get the HWE filter
|
||||
applied to the explicit dependencies it has on Xorg-recommended packages.
|
||||
* lubuntu: Manually install some packages that fall out due to the above.
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Mon, 04 Feb 2019 12:48:51 -0700
|
||||
|
||||
livecd-rootfs (2.525.15) bionic; urgency=medium
|
||||
|
||||
* Forward-port HWE support from xenial, make it more generic, and enable
|
||||
it for the current LTS flavours.
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Fri, 01 Feb 2019 12:41:39 -0700
|
||||
|
||||
livecd-rootfs (2.525.14) bionic; urgency=medium
|
||||
|
||||
* More changes for raspi3 build support (LP: #1805668):
|
||||
- Fix 100-purge-grub-legacy-ec2-arm.chroot to not gate on ARCH as that's
|
||||
not defined in .chroot hooks.
|
||||
- Use the new linux-firmware-raspi2 for boot binary blobs as
|
||||
raspi3-firmware includes upgrade hooks that conflict with our image
|
||||
configuration.
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Fri, 18 Jan 2019 16:56:58 +0100
|
||||
|
||||
livecd-rootfs (2.525.13) bionic; urgency=medium
|
||||
|
||||
[ Balint Reczey ]
|
||||
* Update Vcs-* fields in debian/control to point to git.
|
||||
|
||||
[ Łukasz 'sil2100' Zemczak ]
|
||||
* Another batch of cherry-picks for raspi3 support (LP: #1805668)
|
||||
- Add wpasupplicant to the additional packages installed for the raspi2 and
|
||||
raspi3 targets.
|
||||
- Default to IMAGEFORMAT=ubuntu-image for raspi3 ubuntu-cpc builds.
|
||||
- Link the resulting raspi3 image to a filename that cdimage expects from a
|
||||
preinstalled image build.
|
||||
* Add the 100-purge-grub-legacy-ec2-arm.chroot hook to remove
|
||||
grub-legacy-ec2 from any ARM based ubuntu-cpc images we create. The package
|
||||
is no longer in the server seed of newer series anyway.
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Fri, 11 Jan 2019 14:53:10 +0100
|
||||
|
||||
livecd-rootfs (2.525.12) bionic; urgency=medium
|
||||
|
||||
* Key netplan delegation to NetworkManager on presence of
|
||||
/usr/sbin/NetworkManager, not on /usr/lib/NetworkManager which may have
|
||||
hooks from other packages (i.e., wpasupplicant).
|
||||
|
||||
[ Cody Shepherd ]
|
||||
* Include snaps in image manifests (LP: #1805497)
|
||||
* Change call to add grub efi packages using new create_manifests()
|
||||
function.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Mon, 10 Dec 2018 12:46:46 -0800
|
||||
|
||||
livecd-rootfs (2.525.11) bionic; urgency=medium
|
||||
|
||||
* Backport all the required changes to enable Raspberry Pi 3 armhf and arm64
|
||||
preinstalled image builds. (LP: #1805668)
|
||||
- Add support for raspi3 rootfs builds (based on Ryan Finnie's changes).
|
||||
- For ubuntu-image consumption, export the kernel and initrd to
|
||||
image/boot/uboot for raspi*.
|
||||
- Avoid issues of hard-linking to a symbolic vmlinuz as this can lead to a
|
||||
dangling symlink.
|
||||
- Add raspi3 arm64 rootfs build support.
|
||||
- Add git to the build dependencies for the gadget tree pull.
|
||||
- Minor fixes to raspi3 builds: add arm64+raspi3 to the supported model
|
||||
list, pass SUITE on to the build stage and use the git:// url for the
|
||||
gadget tree.
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Thu, 29 Nov 2018 16:24:23 +0100
|
||||
|
||||
livecd-rootfs (2.525.10) bionic; urgency=medium
|
||||
|
||||
[ Cody Shepherd ]
|
||||
* Include grub efi packages in manifests for uefi images.
|
||||
(LP: #1805190)
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* Disable checksum generation. (LP: #1799773)
|
||||
|
||||
[Steve Langasek]
|
||||
* Fix Vcs-Bzr link.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Mon, 26 Nov 2018 12:55:11 -0800
|
||||
|
||||
livecd-rootfs (2.525.9) bionic; urgency=medium
|
||||
|
||||
* Ensure /lib/modules exists in root tarballs and sqashfs.
|
||||
(LP: #1792905)
|
||||
|
||||
-- Tobias Koch <tobias.koch@canonical.com> Thu, 20 Sep 2018 09:30:34 +0200
|
||||
|
||||
livecd-rootfs (2.525.8) bionic; urgency=medium
|
||||
|
||||
* Disentangle enabling universe in the final image a little from having
|
||||
PREINSTALLED=true set and enable it for a live-server build.
|
||||
(LP: #1783129)
|
||||
* Fix live-server journald config snippet to actually disable journald rate
|
||||
limiting.
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Tue, 28 Aug 2018 11:03:37 +1200
|
||||
|
||||
livecd-rootfs (2.525.7) bionic; urgency=medium
|
||||
|
||||
* Disable journald rate limiting in the live-server live session.
|
||||
(LP: #1776891)
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Tue, 14 Aug 2018 21:41:53 -0700
|
||||
|
||||
livecd-rootfs (2.525.6) bionic; urgency=medium
|
||||
|
||||
[ Steve Langasek ]
|
||||
* generate all tar files with --xattrs. LP: #1302192.
|
||||
|
||||
[ Daniel Watkins ]
|
||||
* ubuntu-cpc: Reintroduce the -root.tar.xz artifact (LP: #1585233).
|
||||
* ubuntu-cpc: Generate the root image contents once, and use it for both the
|
||||
-root.tar.xz and the .squashfs.
|
||||
* ubuntu-cpc: Generate -root.tar.xz with --xattrs.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Mon, 06 Aug 2018 14:16:04 -0700
|
||||
|
||||
livecd-rootfs (2.525.5) bionic; urgency=medium
|
||||
|
||||
* live-build/auto/config: Improve linux-tools filter to work for i386 too.
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Tue, 24 Jul 2018 22:21:27 -0600
|
||||
|
||||
livecd-rootfs (2.525.4) bionic; urgency=medium
|
||||
|
||||
* live-build/auto/config: Dirty hack to filter linux-tools-aws from budgie.
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Tue, 24 Jul 2018 21:55:49 -0600
|
||||
|
||||
livecd-rootfs (2.525.3) bionic; urgency=medium
|
||||
|
||||
* live-build/auto/config: Add nasty hack to order gtk-common-themese snap
|
||||
first in snapd's seed.yaml to work around the snapd bug in LP: #1772844
|
||||
|
||||
-- Adam Conrad <adconrad@ubuntu.com> Mon, 23 Jul 2018 10:02:04 -0600
|
||||
|
||||
livecd-rootfs (2.525.2) bionic; urgency=medium
|
||||
|
||||
* Update unminimize script text and install ubuntu-standard when
|
||||
unminimizing a minimal image (LP: #1778777)
|
||||
|
||||
-- Francis Ginther <francis.ginther@canonical.com> Mon, 02 Jul 2018 13:27:15 -0500
|
||||
|
||||
livecd-rootfs (2.525.1) bionic; urgency=medium
|
||||
|
||||
* Symlink systemd-networkd-wait-online to /bin/true in the live installer
|
||||
live session to avoid depending on buggy systemd behaviour. (LP: #1773719)
|
||||
* Remove "optional: true" from installer netplan config.
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Wed, 23 May 2018 14:27:01 +1200
|
||||
|
||||
livecd-rootfs (2.525) bionic; urgency=medium
|
||||
|
||||
* Don't fail to build CPC images if /lib/modules already exists in the
|
||||
|
9
debian/control
vendored
9
debian/control
vendored
@ -4,12 +4,13 @@ Priority: optional
|
||||
Build-Depends: debhelper (>= 7)
|
||||
Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
|
||||
Standards-Version: 3.9.6
|
||||
Vcs-Bzr: http://bazaar.launchpad.net/~ubuntu-core-dev/livecd-rootfs/trunk
|
||||
Vcs-Git: https://git.launchpad.net/livecd-rootfs -b ubuntu/bionic
|
||||
|
||||
Package: livecd-rootfs
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends},
|
||||
apt-utils,
|
||||
attr,
|
||||
debootstrap,
|
||||
distro-info,
|
||||
dosfstools,
|
||||
@ -17,21 +18,25 @@ Depends: ${misc:Depends},
|
||||
gdisk,
|
||||
genisoimage,
|
||||
germinate (>= 1.25.1),
|
||||
git,
|
||||
gnupg,
|
||||
grep-dctrl,
|
||||
kpartx,
|
||||
live-build (>= 3.0~a57-1ubuntu31~),
|
||||
lsb-release,
|
||||
lzma,
|
||||
make,
|
||||
parted,
|
||||
procps,
|
||||
python-minimal | python,
|
||||
python3-apt,
|
||||
python3-software-properties,
|
||||
qemu-utils,
|
||||
rsync,
|
||||
snapd,
|
||||
snapd (>= 2.39),
|
||||
squashfs-tools (>= 1:3.3-1),
|
||||
sudo,
|
||||
u-boot-tools [armhf arm64],
|
||||
ubuntu-image,
|
||||
vmdk-stream-converter [amd64 i386],
|
||||
xz-utils,
|
||||
|
3
debian/install
vendored
3
debian/install
vendored
@ -1,2 +1,5 @@
|
||||
live-build usr/share/livecd-rootfs
|
||||
get-ppa-fingerprint usr/share/livecd-rootfs
|
||||
minimize-manual usr/share/livecd-rootfs
|
||||
magic-proxy usr/share/livecd-rootfs
|
||||
lp-in-release usr/share/livecd-rootfs
|
||||
|
1
debian/tests/default-bootstraps
vendored
1
debian/tests/default-bootstraps
vendored
@ -25,6 +25,7 @@ ALL_TRIPLETS="
|
||||
mythbuntu::
|
||||
ubuntu::
|
||||
ubuntu-base::
|
||||
ubuntu-base:buildd:
|
||||
ubuntu-budgie::
|
||||
ubuntu-budgie-desktop::
|
||||
ubuntu-budgie-live::
|
||||
|
@ -17,18 +17,68 @@ fi
|
||||
|
||||
. config/functions
|
||||
|
||||
# Link output files somewhere BuildLiveCD will be able to find them.
|
||||
if [ -n "$REPO_SNAPSHOT_STAMP" ]; then
|
||||
if [ "`whoami`" != "root" ]; then
|
||||
echo "Magic repo snapshots only work when running as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
apt-get -qyy install iptables
|
||||
|
||||
# Redirect all outgoing traffic to port 80 to proxy instead.
|
||||
iptables -t nat -A OUTPUT -p tcp --dport 80 -m owner ! --uid-owner daemon \
|
||||
-j REDIRECT --to 8080
|
||||
|
||||
# Run proxy as "daemon" to avoid infinite loop.
|
||||
/usr/share/livecd-rootfs/magic-proxy \
|
||||
--address="127.0.0.1" \
|
||||
--port=8080 \
|
||||
--run-as=daemon \
|
||||
--cutoff-time="$REPO_SNAPSHOT_STAMP" \
|
||||
--log-file=/build/livecd.magic-proxy.log \
|
||||
--pid-file=config/magic-proxy.pid \
|
||||
--background \
|
||||
--setsid
|
||||
fi
|
||||
|
||||
# Link output files somewhere launchpad-buildd will be able to find them.
|
||||
PREFIX="livecd.$PROJECT${SUBARCH:+-$SUBARCH}"
|
||||
|
||||
if [ "${IMAGEFORMAT:-}" = "ubuntu-image" ]; then
|
||||
# Use ubuntu-image instead of live-build
|
||||
|
||||
CHANNEL="${CHANNEL:-edge}"
|
||||
env SNAPPY_STORE_NO_CDN=1 \
|
||||
ubuntu-image -c "$CHANNEL" $UBUNTU_IMAGE_ARGS \
|
||||
-o "$PREFIX".img "$PREFIX".model-assertion
|
||||
xz -0 -T4 "$PREFIX".img
|
||||
mv seed.manifest "$PREFIX".manifest
|
||||
if [ "$PROJECT" = "ubuntu-core" ]; then
|
||||
CHANNEL="${CHANNEL:-edge}"
|
||||
env SNAPPY_STORE_NO_CDN=1 \
|
||||
ubuntu-image snap -c "$CHANNEL" $UBUNTU_IMAGE_ARGS \
|
||||
-O output "$PREFIX".model-assertion
|
||||
# XXX: currently we only have one image generated, but really
|
||||
# we should be supporting more than one for models that
|
||||
# define those.
|
||||
mv output/*.img "$PREFIX".img
|
||||
# For amd64 appliance images we also need to generate qcow2 images.
|
||||
case "${SUBARCH:-}" in appliance-*-amd64)
|
||||
convert_to_qcow2 "$PREFIX".img "$PREFIX".qcow2 ;;
|
||||
esac
|
||||
# Now regular, compressed images.
|
||||
xz -0 -T4 "$PREFIX".img
|
||||
mv output/seed.manifest "$PREFIX".manifest
|
||||
else
|
||||
# First we need to build the gadget tree
|
||||
make -C "config/$PREFIX-gadget" ARCH=$ARCH SERIES=$SUITE
|
||||
ubuntu-image classic $UBUNTU_IMAGE_ARGS \
|
||||
-s $SUITE -p $PROJECT -a $ARCH --subarch $SUBARCH \
|
||||
-O output config/$PREFIX-gadget/install
|
||||
# XXX: currently we only have one image generated, but really
|
||||
# we should be supporting more than one for models that
|
||||
# define those.
|
||||
mv output/*.img "$PREFIX".img
|
||||
xz -0 -T4 "$PREFIX".img
|
||||
# Also link the output image to a filename that cdimage expects
|
||||
ln "$PREFIX".img.xz livecd.ubuntu-cpc.disk1.img.xz
|
||||
mv output/filesystem.manifest "$PREFIX".manifest
|
||||
fi
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@ -63,8 +113,8 @@ Expire-Date: 0
|
||||
|
||||
lb bootstrap "$@"
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu-server|ubuntu-cpc)
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
ubuntu-server:*|ubuntu-cpc:*|ubuntu:desktop-preinstalled)
|
||||
# Set locale to C.UTF-8 by default. We should
|
||||
# probably do this for all images early in the
|
||||
# 18.10 cycle but for now just do it for
|
||||
@ -86,6 +136,9 @@ Expire-Date: 0
|
||||
# Drop all man pages
|
||||
path-exclude=/usr/share/man/*
|
||||
|
||||
# Drop all translations
|
||||
path-exclude=/usr/share/locale/*/LC_MESSAGES/*.mo
|
||||
|
||||
# Drop all documentation ...
|
||||
path-exclude=/usr/share/doc/*
|
||||
|
||||
@ -109,12 +162,16 @@ set -e
|
||||
echo "This system has been minimized by removing packages and content that are"
|
||||
echo "not required on a system that users do not log into."
|
||||
echo ""
|
||||
echo "This script restores the content and packages that are found on a default"
|
||||
echo "Ubuntu server system."
|
||||
echo "This script restores content and packages that are found on a default"
|
||||
echo "Ubuntu server system in order to make this system more suitable for"
|
||||
echo "interactive use."
|
||||
echo ""
|
||||
echo "Reinstallation of packages may fail due to changes to the system"
|
||||
echo "configuration, the presence of third-party packages, or for other"
|
||||
echo "reasons."
|
||||
echo ""
|
||||
echo "This operation may take some time."
|
||||
echo ""
|
||||
read -p "Would you like to continue? [y/N]" REPLY
|
||||
echo # (optional) move to a new line
|
||||
if [ "$REPLY" != "y" ] && [ "$REPLY" != "Y" ]
|
||||
@ -142,6 +199,10 @@ if [ -f /etc/dpkg/dpkg.cfg.d/excludes ] || [ -f /etc/dpkg/dpkg.cfg.d/excludes.dp
|
||||
# This step processes the packages which still have missing documentation
|
||||
dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/doc/ {print $2}' | sed 's|/[^/]*$||' | sort |uniq \
|
||||
| xargs dpkg -S | sed 's|, |\n|g;s|: [^:]*$||' | uniq | DEBIAN_FRONTEND=noninteractive xargs apt-get install --reinstall -y
|
||||
echo "Restoring system translations..."
|
||||
# This step processes the packages which still have missing translations
|
||||
dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/locale/ {print $2}' | sed 's|/[^/]*$||' | sort |uniq \
|
||||
| xargs dpkg -S | sed 's|, |\n|g;s|: [^:]*$||' | uniq | DEBIAN_FRONTEND=noninteractive xargs apt-get install --reinstall -y
|
||||
if dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/doc/ {exit 1}'; then
|
||||
echo "Documentation has been restored successfully."
|
||||
rm /etc/dpkg/dpkg.cfg.d/excludes.dpkg-tmp
|
||||
@ -155,7 +216,7 @@ fi
|
||||
|
||||
if ! dpkg-query --show --showformat='${db:Status-Status}\n' ubuntu-minimal 2> /dev/null | grep -q '^installed$'; then
|
||||
echo "Installing ubuntu-minimal package to provide the familiar Ubuntu minimal system..."
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y ubuntu-minimal
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y ubuntu-minimal ubuntu-standard
|
||||
fi
|
||||
|
||||
if dpkg-query --show --showformat='${db:Status-Status}\n' ubuntu-server 2> /dev/null | grep -q '^installed$' \
|
||||
@ -205,6 +266,12 @@ EOF
|
||||
|
||||
lb chroot "$@"
|
||||
|
||||
if [ -d chroot/etc/apt/preferences.d.save ]; then
|
||||
# https://twitter.com/infinite_scream
|
||||
mv chroot/etc/apt/preferences.d.save/* chroot/etc/apt/preferences.d/
|
||||
rmdir chroot/etc/apt/preferences.d.save
|
||||
fi
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
# force removal of initramfs-tools, which we assert is not
|
||||
# required for any minimized images but is still pulled in by
|
||||
@ -229,11 +296,7 @@ EOF
|
||||
apt-get -y --purge autoremove"
|
||||
fi
|
||||
|
||||
# remove crufty files that shouldn't be left in an image
|
||||
rm -f chroot/var/cache/debconf/*-old chroot/var/lib/dpkg/*-old
|
||||
Chroot chroot apt clean
|
||||
|
||||
if [ -f config/oem-config-preinstalled ]; then
|
||||
if [ -f config/universe-enabled ]; then
|
||||
|
||||
# This is cargo-culted almost verbatim (with some syntax changes for
|
||||
# preinstalled being slightly different in what it doesn't ask) from
|
||||
@ -376,6 +439,14 @@ deb file:/var/lib/preinstalled-pool/ $LB_DISTRIBUTION $LB_PARENT_ARCHIVE_AREAS
|
||||
Chroot chroot "ln -s /etc/media-info /var/log/installer/media-info"
|
||||
fi
|
||||
fi
|
||||
if [ "$PROJECT" = "ubuntu-oci" ]; then
|
||||
if [ -n "$BUILDSTAMP" ]; then
|
||||
configure_oci chroot "$BUILDSTAMP"
|
||||
else
|
||||
echo "The \$BUILDSTAMP variable is empty"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if [ "$PROJECT" = "ubuntu-cpc" ]; then
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
BUILD_NAME=minimal
|
||||
@ -392,7 +463,7 @@ EOF
|
||||
# default. Installing NM on an existing system only manages wifi and wwan via
|
||||
# /usr/lib/NetworkManager/conf.d/10-globally-managed-devices.conf. When setting
|
||||
# the global backend to NM, netplan overrides that file.
|
||||
if [ -d chroot/usr/lib/NetworkManager ]; then
|
||||
if [ -e chroot/usr/sbin/NetworkManager ]; then
|
||||
echo "===== Enabling all devices in NetworkManager ===="
|
||||
mkdir -p chroot/etc/netplan
|
||||
cat <<EOF > chroot/etc/netplan/01-network-manager-all.yaml
|
||||
@ -410,6 +481,23 @@ EOF
|
||||
(cd chroot && find usr/share/doc -maxdepth 1 -type d | xargs du -s | sort -nr)
|
||||
echo END docdirs
|
||||
|
||||
/usr/share/livecd-rootfs/minimize-manual chroot
|
||||
|
||||
# remove crufty files that shouldn't be left in an image
|
||||
rm -f chroot/var/cache/debconf/*-old chroot/var/lib/dpkg/*-old
|
||||
Chroot chroot apt clean
|
||||
# For the docker images we remove even more stuff.
|
||||
if [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-base:minimized" ] || [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-oci:minimized" ]; then
|
||||
# Remove apt lists (that are currently removed downstream
|
||||
# anyway)
|
||||
rm -rf chroot/var/lib/apt/lists/*
|
||||
# Having device nodes in the docker image can cause problems
|
||||
# (https://github.com/tianon/docker-brew-ubuntu-core/issues/62)
|
||||
# so remove them. We only do this for docker out of an
|
||||
# abundance of caution.
|
||||
rm -rf chroot/dev/*
|
||||
fi
|
||||
|
||||
lb binary "$@"
|
||||
touch binary.success
|
||||
) 2>&1 | tee binary.log
|
||||
@ -420,6 +508,12 @@ EOF
|
||||
if [ -e binary.success ]; then
|
||||
rm -f binary.success
|
||||
else
|
||||
# Dump the magic-proxy log to stdout on failure to aid debugging
|
||||
if [ -f /build/livecd.magic-proxy.log ] ; then
|
||||
echo "================= Magic proxy log (start) ================="
|
||||
cat /build/livecd.magic-proxy.log
|
||||
echo "================== Magic proxy log (end) =================="
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -455,7 +549,7 @@ for ISO in binary.iso binary.hybrid.iso; do
|
||||
done
|
||||
|
||||
if [ -e "binary/$INITFS/filesystem.dir" ]; then
|
||||
(cd "binary/$INITFS/filesystem.dir/" && tar -c *) | \
|
||||
(cd "binary/$INITFS/filesystem.dir/" && tar -c --xattrs *) | \
|
||||
gzip -9 --rsyncable > "$PREFIX.rootfs.tar.gz"
|
||||
chmod 644 "$PREFIX.rootfs.tar.gz"
|
||||
elif [ -e binary-tar.tar.gz ]; then
|
||||
@ -492,13 +586,14 @@ EOF
|
||||
fi
|
||||
|
||||
if [ "$PROJECT" = "ubuntu-touch" ] || [ "$PROJECT" = "ubuntu-touch-custom" ]; then
|
||||
(cd "binary/$INITFS/custom.dir/" && tar -c *) | \
|
||||
(cd "binary/$INITFS/custom.dir/" && tar -c --xattrs *) | \
|
||||
gzip -9 --rsyncable > "$PREFIX.custom.tar.gz"
|
||||
chmod 644 "$PREFIX.custom.tar.gz"
|
||||
fi
|
||||
|
||||
# '--initramfs none' produces different manifest names.
|
||||
if [ -e "binary/$INITFS/filesystem.packages" ]; then
|
||||
./config/snap-seed-parse "chroot/" "binary/${INITFS}/filesystem.packages"
|
||||
ln "binary/$INITFS/filesystem.packages" "$PREFIX.manifest"
|
||||
chmod 644 "$PREFIX.manifest"
|
||||
fi
|
||||
@ -763,6 +858,8 @@ for FLAVOUR in $LB_LINUX_FLAVOURS; do
|
||||
if [ -z "$LB_LINUX_FLAVOURS" ] || [ "$LB_LINUX_FLAVOURS" = "none" ]; then
|
||||
continue
|
||||
fi
|
||||
# hwe-* kernels don't use the hwe suffix on the filesystem:
|
||||
FLAVOUR=${FLAVOUR%%-hwe-*}
|
||||
if [ "$FLAVOUR" = "virtual" ]; then
|
||||
# The virtual kernel is named generic in /boot
|
||||
FLAVOUR="generic"
|
||||
@ -796,50 +893,67 @@ done
|
||||
NUMFLAVOURS="$(set -- $LB_LINUX_FLAVOURS; echo $#)"
|
||||
if [ "$NUMFLAVOURS" = 1 ] && [ "$LB_LINUX_FLAVOURS" != "none" ]; then
|
||||
# only one kernel flavour
|
||||
if [ -e "binary/$INITFS/vmlinuz" ]; then
|
||||
FLAVOUR=${LB_LINUX_FLAVOURS%%-hwe-*}
|
||||
if [ -f "binary/$INITFS/vmlinuz" ] && ! [ -h "binary/$INITFS/vmlinuz" ]; then
|
||||
ln "binary/$INITFS/vmlinuz" "$PREFIX.kernel"
|
||||
chmod 644 "$PREFIX.kernel"
|
||||
else
|
||||
ln -sf "$PREFIX.kernel-$LB_LINUX_FLAVOURS" "$PREFIX.kernel"
|
||||
ln -sf "$PREFIX.kernel-$FLAVOUR" "$PREFIX.kernel"
|
||||
fi
|
||||
if [ -e "binary/$INITFS/initrd.lz" ]; then
|
||||
if [ -f "binary/$INITFS/initrd.lz" ] && ! [ -h "binary/$INITFS/initrd.lz" ]; then
|
||||
ln "binary/$INITFS/initrd.lz" "$PREFIX.initrd"
|
||||
chmod 644 "$PREFIX.initrd"
|
||||
else
|
||||
ln -sf "$PREFIX.initrd-$LB_LINUX_FLAVOURS" "$PREFIX.initrd"
|
||||
ln -sf "$PREFIX.initrd-$FLAVOUR" "$PREFIX.initrd"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$SUBARCH" = "ac100" ]; then
|
||||
# create the md5sum and size files for which we are actually doing all this
|
||||
md5sum $PREFIX.rootfs.tar.gz >chroot/installer.md5
|
||||
wc -c $PREFIX.rootfs.tar.gz >chroot/installer.size
|
||||
case $SUBARCH in
|
||||
ac100)
|
||||
# create the md5sum and size files for which we are actually doing all this
|
||||
md5sum $PREFIX.rootfs.tar.gz >chroot/installer.md5
|
||||
wc -c $PREFIX.rootfs.tar.gz >chroot/installer.size
|
||||
|
||||
INFO_DESC="$(lsb_release -d -s)"
|
||||
INFO_STAMP=$(date +20%y%m%d-%H:%M)
|
||||
|
||||
echo "$INFO_DESC - $ARCH ($INFO_STAMP)" >chroot/media-info
|
||||
|
||||
# make sure update-initramfs feels cosy and warm in the environment
|
||||
lb chroot_proc install "$@"
|
||||
lb chroot_sysfs install "$@"
|
||||
lb chroot_devpts install "$@"
|
||||
|
||||
# re-create initrd to contain the installer.md5 file
|
||||
Chroot chroot "env FLASH_KERNEL_SKIP=1 update-initramfs -k all -t -u -v"
|
||||
|
||||
# create boot.img
|
||||
Chroot chroot "abootimg --create /boot/installer-${KVERS}.img -f /boot/bootimg.cfg-$SUBARCH -r /boot/initrd.img-${KVERS} -k /boot/vmlinuz-${KVERS}"
|
||||
|
||||
# clean up
|
||||
lb chroot_devpts remove "$@"
|
||||
lb chroot_sysfs remove "$@"
|
||||
lb chroot_proc remove "$@"
|
||||
|
||||
cp "chroot/boot/installer-${KVERS}.img" "$PREFIX.bootimg-$FLAVOUR"
|
||||
ln -sf "$PREFIX.bootimg-$FLAVOUR" "$PREFIX.bootimg"
|
||||
;;
|
||||
|
||||
INFO_DESC="$(lsb_release -d -s)"
|
||||
INFO_STAMP=$(date +20%y%m%d-%H:%M)
|
||||
raspi2|raspi3)
|
||||
# copy the kernel and initrd to a predictable directory for
|
||||
# ubuntu-image consumption. In some cases, like in pi2/3
|
||||
# u-boot, the bootloader needs to contain the kernel and initrd,
|
||||
# so during rootfs build we copy it over to a directory that
|
||||
# ubuntu-image looks for and shoves into the bootloader
|
||||
# partition.
|
||||
UBOOT_BOOT="image/boot/uboot"
|
||||
|
||||
echo "$INFO_DESC - $ARCH ($INFO_STAMP)" >chroot/media-info
|
||||
mkdir -p $UBOOT_BOOT
|
||||
|
||||
# make sure update-initramfs feels cosy and warm in the environment
|
||||
lb chroot_proc install "$@"
|
||||
lb chroot_sysfs install "$@"
|
||||
lb chroot_devpts install "$@"
|
||||
|
||||
# re-create initrd to contain the installer.md5 file
|
||||
Chroot chroot "env FLASH_KERNEL_SKIP=1 update-initramfs -k all -t -u -v"
|
||||
|
||||
# create boot.img
|
||||
Chroot chroot "abootimg --create /boot/installer-${KVERS}.img -f /boot/bootimg.cfg-$SUBARCH -r /boot/initrd.img-${KVERS} -k /boot/vmlinuz-${KVERS}"
|
||||
|
||||
# clean up
|
||||
lb chroot_devpts remove "$@"
|
||||
lb chroot_sysfs remove "$@"
|
||||
lb chroot_proc remove "$@"
|
||||
|
||||
cp "chroot/boot/installer-${KVERS}.img" "$PREFIX.bootimg-$FLAVOUR"
|
||||
ln -sf "$PREFIX.bootimg-$FLAVOUR" "$PREFIX.bootimg"
|
||||
|
||||
fi
|
||||
cp $PREFIX.initrd $UBOOT_BOOT/initrd.img || true
|
||||
cp $PREFIX.kernel $UBOOT_BOOT/vmlinuz || true
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$PROJECT" = "ubuntu-touch" ] || [ "$PROJECT" = "ubuntu-touch-custom" ]; then
|
||||
sourceslist="chroot/etc/apt/sources.list"
|
||||
@ -920,3 +1034,17 @@ case $PROJECT in
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -f "config/magic-proxy.pid" ]; then
|
||||
kill -TERM $(cat config/magic-proxy.pid)
|
||||
rm -f config/magic-proxy.pid
|
||||
|
||||
# Remove previously-inserted iptables rule.
|
||||
iptables -t nat -D OUTPUT -p tcp --dport 80 -m owner ! --uid-owner daemon \
|
||||
-j REDIRECT --to 8080
|
||||
fi
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu-cpc)
|
||||
config/hooks.d/remove-implicit-artifacts
|
||||
esac
|
||||
|
@ -4,6 +4,8 @@ set -e
|
||||
rm -rf config
|
||||
|
||||
echo "Building on $(hostname --fqdn)"
|
||||
SUITENUM=$(distro-info --series="$SUITE" -r | awk '{ print $1 }')
|
||||
HWE_SUFFIX="hwe-${SUITENUM}"
|
||||
|
||||
SEEDMIRROR=http://people.canonical.com/~ubuntu-archive/seeds/
|
||||
if [ -z "$MIRROR" ]; then
|
||||
@ -33,6 +35,7 @@ fi
|
||||
|
||||
mkdir -p config
|
||||
cp -af /usr/share/livecd-rootfs/live-build/functions config/functions
|
||||
cp -af /usr/share/livecd-rootfs/live-build/snap-seed-parse.py config/snap-seed-parse
|
||||
|
||||
mkdir -p config/package-lists
|
||||
|
||||
@ -59,9 +62,19 @@ add_task ()
|
||||
# failure.
|
||||
|
||||
for task; do
|
||||
if [ "$task" = "ubuntu-budgie-desktop" ]; then
|
||||
filter="| grep -v '^linux.*tools'"
|
||||
else
|
||||
filter=""
|
||||
fi
|
||||
if [ -n "$HWE" ]; then
|
||||
# If HWE is set, we strip out drivers not available in the
|
||||
# HWE stack, then sed the rest with their HWE suffixes:
|
||||
filter="$filter | sed -e 's/xserver-xorg.*/&-${HWE_SUFFIX}/'"
|
||||
fi
|
||||
# We need a ridiculous number of backslashes to protect
|
||||
# parentheses from eval.
|
||||
echo "!chroot chroot apt-cache dumpavail | grep-dctrl -nsPackage \\\\\\( -XFArchitecture $ARCH -o -XFArchitecture all \\\\\\) -a -wFTask $task" >> "config/package-lists/livecd-rootfs.list.chroot_$pass"
|
||||
echo "!chroot chroot apt-cache dumpavail | grep-dctrl -nsPackage \\\\\\( -XFArchitecture $ARCH -o -XFArchitecture all \\\\\\) -a -wFTask $task $filter" >> "config/package-lists/livecd-rootfs.list.chroot_$pass"
|
||||
done
|
||||
}
|
||||
|
||||
@ -107,8 +120,13 @@ add_binary_hook ()
|
||||
|
||||
if [ -z "${IMAGEFORMAT:-}" ]; then
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
ubuntu-cpc:*)
|
||||
IMAGEFORMAT=ext4
|
||||
ubuntu-cpc:*|ubuntu:desktop-preinstalled)
|
||||
if [ "$SUBARCH" = "raspi3" ]; then
|
||||
# For now only raspi3, but others are soon to follow
|
||||
IMAGEFORMAT=ubuntu-image
|
||||
else
|
||||
IMAGEFORMAT=ext4
|
||||
fi
|
||||
;;
|
||||
ubuntu-server:live)
|
||||
IMAGEFORMAT=plain
|
||||
@ -132,8 +150,10 @@ case $IMAGEFORMAT in
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
;;
|
||||
*)
|
||||
case $PROJECT in
|
||||
ubuntu-cpc)
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
ubuntu-base:buildd)
|
||||
;;
|
||||
ubuntu-cpc:*)
|
||||
;;
|
||||
*)
|
||||
add_package live jasper
|
||||
@ -144,54 +164,115 @@ case $IMAGEFORMAT in
|
||||
;;
|
||||
|
||||
plain)
|
||||
INITRAMFS_TYPE=none
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
ubuntu-server:live)
|
||||
# Stop lb installing casper into filesystem.squashfs
|
||||
# by skipping lb_chroot_live-packages.
|
||||
skip_lb_stage chroot_live-packages
|
||||
INITRAMFS_TYPE=auto
|
||||
touch config/universe-enabled
|
||||
;;
|
||||
*)
|
||||
PREINSTALLED=true
|
||||
;;
|
||||
esac
|
||||
OPTS="${OPTS:+$OPTS }--initramfs $INITRAMFS_TYPE --chroot-filesystem $IMAGEFORMAT"
|
||||
OPTS="${OPTS:+$OPTS }--initramfs none --chroot-filesystem $IMAGEFORMAT"
|
||||
;;
|
||||
|
||||
ubuntu-image)
|
||||
UBUNTU_IMAGE_ARGS=""
|
||||
BRAND_ID="canonical"
|
||||
|
||||
case "$ARCH+${SUBARCH:-}" in
|
||||
# Handle appliance images
|
||||
*+appliance-*)
|
||||
BRAND_ID="ubuntu"
|
||||
MODEL="${SUBARCH#appliance-}"
|
||||
APPLIANCE=1
|
||||
;;
|
||||
amd64+*)
|
||||
MODEL=pc-amd64 ;;
|
||||
i386+*)
|
||||
MODEL=pc-i386 ;;
|
||||
arm64+snapdragon)
|
||||
MODEL=dragonboard ;;
|
||||
armhf+raspi2)
|
||||
MODEL=pi2 ;;
|
||||
armhf+raspi3)
|
||||
MODEL=pi3 ;;
|
||||
armhf+cm3)
|
||||
MODEL=cm3 ;;
|
||||
MODEL=pi ;;
|
||||
arm64+raspi3)
|
||||
MODEL=pi-arm64 ;;
|
||||
armhf+imx6)
|
||||
MODEL=nitrogen6x ;;
|
||||
*)
|
||||
echo "Model $ARCH+${SUBARCH:-} unknown to livecd-rootfs" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
case $MODEL in
|
||||
pc-amd64|pc-i386)
|
||||
UBUNTU_IMAGE_ARGS="--image-size 3700M" ;;
|
||||
*)
|
||||
UBUNTU_IMAGE_ARGS="" ;;
|
||||
esac
|
||||
|
||||
echo "IMAGEFORMAT=$IMAGEFORMAT" >> config/common
|
||||
echo "UBUNTU_IMAGE_ARGS=\"$UBUNTU_IMAGE_ARGS\"" >> config/common
|
||||
# Store model assertion in top dir to get it picked up later as a build artifact
|
||||
env SNAPPY_STORE_NO_CDN=1 snap known --remote model series=16 model="$MODEL" brand-id=canonical > "$PREFIX".model-assertion
|
||||
echo "Configured ubuntu-image for the following model assertion:"
|
||||
cat "$PREFIX".model-assertion
|
||||
echo "----------------------------------------------------------"
|
||||
# If we have a datestamp coming from cdimage, use that to populate
|
||||
# .disk/info on the target image
|
||||
if [ -n "$NOW" ]; then
|
||||
echo "$NOW" > config/disk-info
|
||||
UBUNTU_IMAGE_ARGS="$UBUNTU_IMAGE_ARGS --disk-info config/disk-info"
|
||||
fi
|
||||
|
||||
if [ $PROJECT = "ubuntu-core" ]; then
|
||||
# snap-based core images
|
||||
|
||||
case $MODEL in
|
||||
pc-amd64|pc-i386)
|
||||
[ -z "${SUBARCH:-}" ] \
|
||||
&& UBUNTU_IMAGE_ARGS="$UBUNTU_IMAGE_ARGS --image-size 3700M"
|
||||
;;
|
||||
# Some appliances need different snap tracks
|
||||
lxd-core18*)
|
||||
EXTRA_SNAPS="lxd=4.0"
|
||||
;;
|
||||
esac
|
||||
case $SUITE in
|
||||
xenial)
|
||||
# Ubuntu Core 16
|
||||
;;
|
||||
*)
|
||||
# Ubuntu Core 18
|
||||
[ -z "${APPLIANCE:-}" ] \
|
||||
&& MODEL="ubuntu-core-18-${MODEL#pc-}"
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$ARCH+${SUBARCH:-}" in
|
||||
amd64+kassel)
|
||||
EXTRA_SNAPS="core bluez alsa-utils"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
for snap in $EXTRA_SNAPS; do
|
||||
UBUNTU_IMAGE_ARGS="$UBUNTU_IMAGE_ARGS --extra-snaps $snap"
|
||||
done
|
||||
echo "IMAGEFORMAT=$IMAGEFORMAT" >> config/common
|
||||
echo "UBUNTU_IMAGE_ARGS=\"$UBUNTU_IMAGE_ARGS\"" >> config/common
|
||||
# Store model assertion in top dir to get it picked up later as a build artifact
|
||||
env SNAPPY_STORE_NO_CDN=1 snap known --remote model series=16 model="$MODEL" brand-id="$BRAND_ID" > "$PREFIX".model-assertion
|
||||
echo "Configured ubuntu-image for the following model assertion:"
|
||||
cat "$PREFIX".model-assertion
|
||||
echo "----------------------------------------------------------"
|
||||
else
|
||||
# classic images
|
||||
|
||||
# Certain models have different names but are built from the same source gadget tree
|
||||
case $MODEL in
|
||||
pi-arm64|pi)
|
||||
# XXX: Temporarily this is still built from the pi3 tree,
|
||||
# but eventually we'll use the pi one.
|
||||
MODEL=pi3 ;;
|
||||
esac
|
||||
|
||||
UBUNTU_IMAGE_ARGS="$UBUNTU_IMAGE_ARGS${PROPOSED:+ --with-proposed}"
|
||||
UBUNTU_IMAGE_ARGS="$UBUNTU_IMAGE_ARGS${EXTRA_PPAS:+ --extra-ppas \"$EXTRA_PPAS\"}"
|
||||
|
||||
git clone git://git.launchpad.net/~canonical-foundations/snap-$MODEL/+git/github-mirror -b classic config/$PREFIX-gadget
|
||||
|
||||
echo "IMAGEFORMAT=$IMAGEFORMAT" >> config/common
|
||||
echo "SUITE=$SUITE" >> config/common
|
||||
echo "UBUNTU_IMAGE_ARGS=\"$UBUNTU_IMAGE_ARGS\"" >> config/common
|
||||
|
||||
echo "Configured ubuntu-image for the following gadget model: $MODEL"
|
||||
fi
|
||||
# Fake finished configuration for lb build
|
||||
mkdir -p .build
|
||||
touch .build/config
|
||||
@ -213,9 +294,9 @@ case $IMAGEFORMAT in
|
||||
esac
|
||||
|
||||
if [ "$PREINSTALLED" = "true" ]; then
|
||||
# This is an oem-config preinstalled image, touch a random file that
|
||||
# we can refer back to during build, cause that's wildly hackish
|
||||
touch config/oem-config-preinstalled
|
||||
# Touch a random file that we can refer back to during build,
|
||||
# cause that's wildly hackish
|
||||
touch config/universe-enabled
|
||||
case $PROJECT in
|
||||
kubuntu*)
|
||||
add_package live oem-config-kde ubiquity-frontend-kde
|
||||
@ -236,7 +317,14 @@ if [ "$PREINSTALLED" = "true" ]; then
|
||||
ubuntu-server)
|
||||
add_package live oem-config-debconf ubiquity-frontend-debconf
|
||||
;;
|
||||
ubuntu-core|ubuntu-base|base|ubuntu-touch|ubuntu-touch-custom|ubuntu-cpc|ubuntu-desktop-next)
|
||||
ubuntu-core|ubuntu-base|base|ubuntu-touch|ubuntu-touch-custom|ubuntu-cpc|ubuntu-desktop-next|ubuntu-oci)
|
||||
;;
|
||||
ubuntu)
|
||||
add_package live oem-config-gtk ubiquity-frontend-gtk
|
||||
add_package live ubiquity-slideshow-ubuntu
|
||||
if [ "$SUBPROJECT" = "desktop-preinstalled" ]; then
|
||||
add_package live language-pack-en-base
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
add_package live oem-config-gtk ubiquity-frontend-gtk
|
||||
@ -254,8 +342,6 @@ case $BINARYFORMAT in
|
||||
;;
|
||||
esac
|
||||
|
||||
SIGNED_KERNEL_PACKAGE="linux-signed-generic"
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ]; then
|
||||
OPTS="${OPTS:+$OPTS }--bootstrap-flavour=minimal --linux-packages=linux-image"
|
||||
fi
|
||||
@ -330,10 +416,12 @@ esac
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu|ubuntu-dvd)
|
||||
HWE="yes"
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard ubuntu-desktop
|
||||
LIVE_TASK='ubuntu-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live $SIGNED_KERNEL_PACKAGE ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
;;
|
||||
|
||||
@ -341,9 +429,9 @@ case $PROJECT in
|
||||
add_task install minimal standard ubuntu-desktop-next ubuntu-sdk-libs
|
||||
COMPONENTS='main restricted universe'
|
||||
LIVE_TASK='ubuntu-touch-live'
|
||||
KERNEL_FLAVOURS=generic
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
|
||||
# system image snappy desktop next image
|
||||
@ -357,11 +445,13 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
kubuntu|kubuntu-dvd)
|
||||
HWE="yes"
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard
|
||||
add_task install kubuntu-desktop
|
||||
LIVE_TASK='kubuntu-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe'
|
||||
add_chroot_hook remove-gnome-icon-cache
|
||||
@ -387,25 +477,25 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
edubuntu|edubuntu-dvd)
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard ubuntu-desktop edubuntu-desktop-gnome
|
||||
LIVE_TASK='edubuntu-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe'
|
||||
;;
|
||||
|
||||
xubuntu)
|
||||
HWE="yes"
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard xubuntu-desktop
|
||||
add_package install xterm
|
||||
LIVE_TASK='xubuntu-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
case $ARCH in
|
||||
amd64|i386) KERNEL_FLAVOURS=generic ;;
|
||||
esac
|
||||
;;
|
||||
|
||||
ubuntu-netbook)
|
||||
@ -414,24 +504,29 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
mythbuntu)
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard mythbuntu-desktop
|
||||
LIVE_TASK='mythbuntu-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
;;
|
||||
|
||||
lubuntu)
|
||||
add_task install minimal standard lubuntu-desktop
|
||||
HWE="yes"
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard lubuntu-gtk-core lubuntu-desktop
|
||||
# Installing the lubuntu-gtk-core task explicitly appears
|
||||
# to make dbus-user-session not get installed anymore:
|
||||
add_package install dbus-user-session
|
||||
# These themes also get dropped, maybe worth looking closer:
|
||||
add_package install humanity-icon-theme gnome-icon-theme adwaita-icon-theme
|
||||
LIVE_TASK='lubuntu-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
case $ARCH in
|
||||
amd64|i386) KERNEL_FLAVOURS=generic ;;
|
||||
esac
|
||||
|
||||
# The Lubuntu STRUCTURE file has "feature
|
||||
# no-follow-recommends". Mirror this.
|
||||
@ -439,15 +534,13 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
lubuntu-next)
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard lubuntu-qt-desktop
|
||||
LIVE_TASK='lubuntu-live-qt'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
case $ARCH in
|
||||
amd64|i386) KERNEL_FLAVOURS=generic ;;
|
||||
esac
|
||||
|
||||
# The Lubuntu STRUCTURE file has "feature
|
||||
# no-follow-recommends". Mirror this.
|
||||
@ -455,46 +548,51 @@ case $PROJECT in
|
||||
;;
|
||||
|
||||
ubuntu-gnome)
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard ubuntu-gnome-desktop
|
||||
LIVE_TASK='ubuntu-gnome-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe'
|
||||
;;
|
||||
|
||||
ubuntu-budgie)
|
||||
HWE="yes"
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard ubuntu-budgie-desktop
|
||||
LIVE_TASK='ubuntu-budgie-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe'
|
||||
;;
|
||||
|
||||
ubuntu-mate)
|
||||
HWE="yes"
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard ubuntu-mate-core ubuntu-mate-desktop
|
||||
LIVE_TASK='ubuntu-mate-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
;;
|
||||
|
||||
ubuntustudio-dvd)
|
||||
KERNEL_FLAVOURS="lowlatency${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard ubuntustudio-desktop ubuntustudio-audio ubuntustudio-fonts ubuntustudio-graphics ubuntustudio-video ubuntustudio-publishing ubuntustudio-photography
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
case $ARCH in
|
||||
amd64|i386) KERNEL_FLAVOURS=lowlatency ;;
|
||||
esac
|
||||
;;
|
||||
|
||||
ubuntukylin)
|
||||
HWE="yes"
|
||||
KERNEL_FLAVOURS="generic${HWE:+-$HWE_SUFFIX}"
|
||||
add_task install minimal standard ubuntukylin-desktop
|
||||
add_package install ubuntukylin-default-settings
|
||||
LIVE_TASK='ubuntukylin-live'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
amd64) add_package live linux-signed-$KERNEL_FLAVOURS ;;
|
||||
esac
|
||||
COMPONENTS='main restricted universe'
|
||||
;;
|
||||
@ -509,10 +607,7 @@ case $PROJECT in
|
||||
live)
|
||||
add_task install standard
|
||||
add_task install server
|
||||
LIVE_TASK='cloud-image'
|
||||
case $ARCH in
|
||||
amd64) add_package live linux-signed-generic ;;
|
||||
esac
|
||||
add_package install cloud-init
|
||||
;;
|
||||
esac
|
||||
COMPONENTS='main'
|
||||
@ -559,8 +654,7 @@ case $PROJECT in
|
||||
add_package install grub-pc
|
||||
;;
|
||||
amd64)
|
||||
add_package install grub-pc-bin
|
||||
add_package install grub-efi-amd64-signed
|
||||
add_package install grub-pc
|
||||
add_package install shim-signed
|
||||
;;
|
||||
esac
|
||||
@ -578,6 +672,10 @@ case $PROJECT in
|
||||
OPTS="${OPTS:+$OPTS }--bootstrap-flavour=minimal"
|
||||
;;
|
||||
|
||||
ubuntu-oci)
|
||||
OPTS="${OPTS:+$OPTS }--bootstrap-flavour=minimal"
|
||||
;;
|
||||
|
||||
ubuntu-touch|ubuntu-touch-custom)
|
||||
HINTS="packagekit ubuntu-system-settings"
|
||||
case $ARCH in
|
||||
@ -668,6 +766,36 @@ case $PROJECT in
|
||||
;;
|
||||
esac
|
||||
|
||||
case $SUBPROJECT in
|
||||
buildd)
|
||||
OPTS="${OPTS:+$OPTS }--archive-areas main"
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
OPTS="${OPTS:+$OPTS }--apt-recommends false"
|
||||
OPTS="${OPTS:+$OPTS }--apt-secure false"
|
||||
OPTS="${OPTS:+$OPTS }--parent-mirror-binary ${MIRROR}"
|
||||
# XXX cjwatson 2018-04-27: We need to work out how to make
|
||||
# this conditional so that we can do things like building
|
||||
# buildd chroots with -updates. This probably involves
|
||||
# either extending the PROPOSED hack or fixing the strange
|
||||
# way that SUITE is in fact a series; in either case it's
|
||||
# likely to involve work both here and in launchpad-buildd.
|
||||
OPTS="${OPTS:+$OPTS }--security false --volatile false"
|
||||
|
||||
add_package install adduser
|
||||
add_package install pkgbinarymangler
|
||||
add_package install ca-certificates
|
||||
add_package install gpg
|
||||
add_package install gpg-agent
|
||||
add_package install tzdata
|
||||
add_package install fakeroot
|
||||
add_package install build-essential
|
||||
# Needed for LXD-based builds.
|
||||
add_package install init
|
||||
|
||||
cp -af /usr/share/livecd-rootfs/live-build/make-lxd-metadata.py config/make-lxd-metadata
|
||||
;;
|
||||
esac
|
||||
|
||||
# we'll expand the base seed given here according to the STRUCTURE file, and
|
||||
# then look in all of the seeds found to see which snaps are seeded
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
@ -700,7 +828,12 @@ if [ -n "${BASE_SEED}" ]; then
|
||||
seed_snaps=$(sed -rn '1,/-----/d;/-----/,$d; s/(.*) \|.*/\1/; s, \(classic\),/classic,; p' "${file}")
|
||||
for snap in ${seed_snaps}; do
|
||||
echo "snap: found ${snap}"
|
||||
ALL_SNAPS="${ALL_SNAPS:+${ALL_SNAPS} }${snap}"
|
||||
# Reorder gtk-common-themes first due to LP: #1772844
|
||||
if [ "${snap}" = "gtk-common-themes" ]; then
|
||||
ALL_SNAPS="${snap}${ALL_SNAPS:+ ${ALL_SNAPS}}"
|
||||
else
|
||||
ALL_SNAPS="${ALL_SNAPS:+${ALL_SNAPS} }${snap}"
|
||||
fi
|
||||
done
|
||||
done
|
||||
if [ -n "${ALL_SNAPS}" ] || [ -n "${HOOK_SNAPS}" ]; then
|
||||
@ -740,7 +873,7 @@ case $PROJECT in
|
||||
esac
|
||||
|
||||
case $ARCH in
|
||||
armel|armhf)
|
||||
armel|armhf|arm64)
|
||||
KERNEL_FLAVOURS="${SUBARCH:-$KERNEL_FLAVOURS}"
|
||||
case $SUBARCH in
|
||||
dove)
|
||||
@ -775,17 +908,28 @@ case $ARCH in
|
||||
;;
|
||||
raspi2)
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
add_package install linux-firmware-raspi2 u-boot-rpi flash-kernel u-boot-tools
|
||||
add_package install linux-firmware-raspi2 u-boot-rpi flash-kernel u-boot-tools wpasupplicant
|
||||
BINARY_REMOVE_LINUX=false
|
||||
;;
|
||||
raspi3)
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
KERNEL_FLAVOURS=raspi-hwe-18.04
|
||||
add_package install linux-firmware-raspi2 u-boot-rpi flash-kernel u-boot-tools wpasupplicant
|
||||
BINARY_REMOVE_LINUX=false
|
||||
;;
|
||||
imx6)
|
||||
COMPONENTS='main restricted universe multiverse'
|
||||
KERNEL_FLAVOURS=generic
|
||||
add_package install flash-kernel u-boot-tools wpasupplicant
|
||||
BINARY_REMOVE_LINUX=false
|
||||
;;
|
||||
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
ubuntu-server:live)
|
||||
;;
|
||||
ubuntu-server:*|ubuntu-base:*|ubuntu-touch:*|ubuntu-touch-custom:*)
|
||||
ubuntu-server:*|ubuntu-base:*|ubuntu-touch:*|ubuntu-touch-custom:*|ubuntu-oci:*)
|
||||
OPTS="${OPTS:+$OPTS }--linux-packages=none --initramfs=none"
|
||||
KERNEL_FLAVOURS=none
|
||||
BINARY_REMOVE_LINUX=false
|
||||
@ -828,6 +972,7 @@ lb config noauto \
|
||||
--initsystem none \
|
||||
--bootloader "$BOOTLOADER" \
|
||||
${INITRAMFS_COMPRESSION:+--initramfs-compression "$INITRAMFS_COMPRESSION"} \
|
||||
--checksums none \
|
||||
--cache false \
|
||||
${BOOTAPPEND_LIVE:+--bootappend-live "$BOOTAPPEND_LIVE"} \
|
||||
$OPTS \
|
||||
@ -840,9 +985,11 @@ echo "LB_BINARY_HOOKS=\"$BINARY_HOOKS\"" >> config/binary
|
||||
echo "BUILDSTAMP=\"$NOW\"" >> config/binary
|
||||
echo "SUBPROJECT=\"${SUBPROJECT:-}\"" >> config/binary
|
||||
echo "LB_DISTRIBUTION=\"$SUITE\"" >> config/binary
|
||||
echo "IMAGEFORMAT=\"$IMAGEFORMAT\"" >> config/chroot
|
||||
echo "SUBARCH=\"$SUBARCH\"" >> config/chroot
|
||||
|
||||
case $ARCH+$SUBARCH in
|
||||
armhf+raspi2)
|
||||
armhf+raspi2|armhf+raspi3|arm64+raspi3)
|
||||
cat > config/hooks/01-firmware-directory.chroot_early <<EOF
|
||||
#!/bin/sh -ex
|
||||
mkdir -p /boot/firmware
|
||||
@ -923,6 +1070,19 @@ rm -f /etc/fstab
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [ $PROJECT != ubuntu-cpc ]; then
|
||||
cat > config/hooks/100-preserve-apt-prefs.chroot <<\EOF
|
||||
#! /bin/sh -ex
|
||||
|
||||
# live-build "helpfully" removes /etc/apt/preferences.d/* so we put a
|
||||
# copy somewhere it won't touch it.
|
||||
|
||||
if [ -n "$(ls -A /etc/apt/preferences.d)" ]; then
|
||||
cp -a /etc/apt/preferences.d /etc/apt/preferences.d.save
|
||||
fi
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [ $PROJECT = ubuntukylin ]; then
|
||||
cat > config/hooks/100-ubuntukylin.chroot <<EOF
|
||||
#! /bin/sh
|
||||
@ -968,15 +1128,46 @@ EOF
|
||||
fi
|
||||
;;
|
||||
|
||||
ubuntu-touch:*|ubuntu-touch-custom:*|ubuntu-core:system-image|ubuntu-desktop-next:system-image|ubuntu-cpc:*|ubuntu-server:live)
|
||||
cp -af /usr/share/livecd-rootfs/live-build/${PROJECT}/* \
|
||||
config/
|
||||
ubuntu-touch:*|ubuntu-touch-custom:*|ubuntu-core:system-image|ubuntu-desktop-next:system-image|ubuntu-cpc:*|ubuntu-server:live|ubuntu:desktop-preinstalled)
|
||||
# Ensure that most things e.g. includes.chroot are copied as is
|
||||
for entry in /usr/share/livecd-rootfs/live-build/${PROJECT}/*; do
|
||||
case $entry in
|
||||
*hooks*)
|
||||
# But hooks are shared across the projects with symlinks
|
||||
# dereference them
|
||||
cp -afL $entry config/
|
||||
;;
|
||||
*)
|
||||
# Most places want to preserve symlinks as is
|
||||
cp -af $entry config/
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$PROJECT" = "ubuntu-cpc" ]; then
|
||||
case ${IMAGE_TARGETS:-} in
|
||||
"")
|
||||
config/hooks.d/make-hooks --hooks-dir config/hooks all
|
||||
;;
|
||||
*)
|
||||
config/hooks.d/make-hooks --hooks-dir config/hooks \
|
||||
"$IMAGE_TARGETS"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ "$IMAGEFORMAT" = none ]; then
|
||||
rm -f config/hooks/*.binary*
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
case $SUBPROJECT in
|
||||
buildd)
|
||||
cp -af /usr/share/livecd-rootfs/live-build/buildd/* config/
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$EXTRA_PPAS" ]; then
|
||||
rm -f config/archives/extra-ppas.list.chroot \
|
||||
config/archives/extra-ppas.pref.chroot \
|
||||
|
5
live-build/buildd/hooks/00-kernel-img.chroot
Executable file
5
live-build/buildd/hooks/00-kernel-img.chroot
Executable file
@ -0,0 +1,5 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
# At one point, kernel builds needed this.
|
||||
echo do_initrd = Yes >>/etc/kernel-img.conf
|
12
live-build/buildd/hooks/00-mirror.binary
Executable file
12
live-build/buildd/hooks/00-mirror.binary
Executable file
@ -0,0 +1,12 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
. config/bootstrap
|
||||
|
||||
# Use a public-facing mirror URL, for the benefit of
|
||||
# sbuild-launchpad-chroot. We deliberately do this only after live-build
|
||||
# has run "apt-get update" for the last time, in order that
|
||||
# /var/lib/apt/lists/ has suitable cached Packages files; this speeds up
|
||||
# builds on buildds.
|
||||
sed -i "s,${LB_PARENT_MIRROR_BINARY},${LB_MIRROR_BINARY},g" \
|
||||
chroot/etc/apt/sources.list
|
10
live-build/buildd/hooks/01-pkgbinarymangler.chroot
Executable file
10
live-build/buildd/hooks/01-pkgbinarymangler.chroot
Executable file
@ -0,0 +1,10 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
# Configure pkgbinarymangler.
|
||||
sed -i /^enable/s/false/true/ \
|
||||
/etc/pkgbinarymangler/maintainermangler.conf \
|
||||
/etc/pkgbinarymangler/striptranslations.conf || true
|
||||
sed -i /^invalid_current/s/ignore/fail/ \
|
||||
/etc/pkgbinarymangler/maintainermangler.conf \
|
||||
/etc/pkgbinarymangler/striptranslations.conf || true
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash -eux
|
||||
|
||||
case $ARCH in
|
||||
amd64|arm64|armhf)
|
||||
amd64|arm64)
|
||||
;;
|
||||
*)
|
||||
echo "We don't create EFI images for $ARCH."
|
||||
@ -9,8 +9,8 @@ case $ARCH in
|
||||
;;
|
||||
esac
|
||||
|
||||
IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
FS_LABEL="cloudimg-rootfs"
|
||||
IMAGE_STR="# BUILDD_IMG: This file was created/modified by the Buildd Image build process"
|
||||
FS_LABEL="buildd-rootfs"
|
||||
|
||||
. config/binary
|
||||
|
||||
@ -49,7 +49,7 @@ create_and_mount_uefi_partition() {
|
||||
mount "${uefi_dev}" "$mountpoint"/boot/efi
|
||||
|
||||
cat << EOF >> "mountpoint/etc/fstab"
|
||||
LABEL=UEFI /boot/efi vfat defaults 0 0
|
||||
LABEL=UEFI /boot/efi vfat defaults 0 1
|
||||
EOF
|
||||
}
|
||||
|
||||
@ -64,11 +64,7 @@ install_grub() {
|
||||
efi_boot_dir="/boot/efi/EFI/BOOT"
|
||||
chroot mountpoint mkdir -p "${efi_boot_dir}"
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ] && [ -n "$partuuid" ]; then
|
||||
# FIXME: code duplicated between 032-disk-image.binary
|
||||
# and 033-disk-image-uefi.binary. We want to fix this to not
|
||||
# have initramfs-tools installed at all on these images.
|
||||
echo "partuuid found for root device; omitting initrd"
|
||||
if [ -n "$partuuid" ]; then
|
||||
echo "GRUB_FORCE_PARTUUID=$partuuid" >> mountpoint/etc/default/grub.d/40-force-partuuid.cfg
|
||||
fi
|
||||
|
||||
@ -88,11 +84,20 @@ install_grub() {
|
||||
efi_target=arm-efi
|
||||
;;
|
||||
amd64)
|
||||
chroot mountpoint apt-get install -qqy grub-efi-amd64-signed grub-efi-amd64 shim-signed
|
||||
chroot mountpoint apt-get install -qqy grub-pc shim-signed
|
||||
efi_target=x86_64-efi
|
||||
;;
|
||||
esac
|
||||
|
||||
# This call to rewrite the debian package manifest is added here to capture
|
||||
# grub-efi packages that otherwise would not make it into the base
|
||||
# manifest. filesystem.packages is moved into place via symlinking to
|
||||
# livecd.ubuntu-cpc.manifest by live-build/auto/build after lb_binary runs
|
||||
# and at that time snaps are added to the manifest (create-manifest is
|
||||
# not called here as it calls snap-seed-parse, resulting in duplicate
|
||||
# snap listings)
|
||||
chroot mountpoint dpkg-query -W > binary/boot/filesystem.packages
|
||||
|
||||
chroot mountpoint grub-install "${loop_device}" \
|
||||
--boot-directory=/boot \
|
||||
--efi-directory=/boot/efi \
|
||||
@ -116,6 +121,16 @@ install_grub() {
|
||||
chroot mountpoint grub-install --target=i386-pc "${loop_device}"
|
||||
fi
|
||||
|
||||
cat > mountpoint/etc/default/grub.d/50-builddimg-settings.cfg << EOF
|
||||
GRUB_DEFAULT=0
|
||||
GRUB_HIDDEN_TIMEOUT=0.1
|
||||
GRUB_HIDDEN_TIMEOUT_QUIET=true
|
||||
GRUB_TIMEOUT=0.1
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="console=ttyS0"
|
||||
GRUB_RECORDFAIL_TIMEOUT=0
|
||||
GRUB_TERMINAL=console
|
||||
EOF
|
||||
|
||||
divert_grub mountpoint
|
||||
chroot mountpoint update-grub
|
||||
replace_grub_root_with_label mountpoint
|
9
live-build/buildd/hooks/02-user.chroot
Executable file
9
live-build/buildd/hooks/02-user.chroot
Executable file
@ -0,0 +1,9 @@
|
||||
#! /bin/sh
|
||||
set -e
|
||||
|
||||
# Create the buildd user and group.
|
||||
addgroup --gid 2501 buildd
|
||||
adduser --system --disabled-password --gecos 'Build Daemon user' \
|
||||
--ingroup buildd --uid 2001 --shell /bin/bash buildd
|
||||
mkdir -p /build/buildd
|
||||
chown buildd:buildd /build/buildd
|
18
live-build/buildd/hooks/48-policy-rc-d.binary
Executable file
18
live-build/buildd/hooks/48-policy-rc-d.binary
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
# This is run after 02-disk-image-uefi.binary because
|
||||
# we don't want policyrcd-script-zg2 installed in
|
||||
# bootable images.
|
||||
|
||||
. config/functions
|
||||
|
||||
cleanup() {
|
||||
teardown_mountpoint chroot
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
setup_mountpoint chroot
|
||||
|
||||
chroot chroot apt-get install -y policyrcd-script-zg2
|
3
live-build/buildd/hooks/49-empty-resolv-conf.binary
Executable file
3
live-build/buildd/hooks/49-empty-resolv-conf.binary
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh -e
|
||||
chroot chroot rm /etc/resolv.conf
|
||||
chroot chroot touch /etc/resolv.conf
|
14
live-build/buildd/hooks/50-buildd-tar.binary
Executable file
14
live-build/buildd/hooks/50-buildd-tar.binary
Executable file
@ -0,0 +1,14 @@
|
||||
#! /bin/sh
|
||||
# A few things (launchpad-buildd, sbuild-launchpad-chroot) rely on the
|
||||
# top-level directory being "chroot-autobuild", so we have to do this
|
||||
# ourselves.
|
||||
set -e
|
||||
|
||||
. config/functions
|
||||
|
||||
create_manifest chroot "livecd.$PROJECT.rootfs.manifest"
|
||||
|
||||
# gzip was chosen for fastest decompression speed: it decompresses buildd
|
||||
# chroots about twice as fast as xz and about five times as fast as bzip2.
|
||||
tar --transform='s,^chroot,chroot-autobuild,' --sort=name --numeric-owner \
|
||||
-czf "livecd.$PROJECT.rootfs.tar.gz" chroot
|
20
live-build/buildd/hooks/51-buildd-lxd.binary
Executable file
20
live-build/buildd/hooks/51-buildd-lxd.binary
Executable file
@ -0,0 +1,20 @@
|
||||
#! /bin/sh
|
||||
# Some build types prefer a LXD image over a traditional chroot tarball.
|
||||
set -e
|
||||
|
||||
. config/bootstrap
|
||||
. config/functions
|
||||
|
||||
TMPDIR="$(mktemp -d)"
|
||||
config/make-lxd-metadata "${LB_DISTRIBUTION%-*}" "$ARCH" \
|
||||
>"$TMPDIR/metadata.yaml"
|
||||
tar --numeric-owner -cf "livecd.$PROJECT.lxd.tar" -C "$TMPDIR" metadata.yaml
|
||||
rm -rf "$TMPDIR"
|
||||
|
||||
create_manifest chroot "livecd.$PROJECT.lxd.manifest"
|
||||
|
||||
# When using the combined metadata/rootfs form, the rootfs must be under
|
||||
# rootfs/ rather than under chroot-autobuild/.
|
||||
tar --transform='s,^chroot,rootfs,' --sort=name --numeric-owner \
|
||||
-rf "livecd.$PROJECT.lxd.tar" chroot
|
||||
gzip -9 "livecd.$PROJECT.lxd.tar"
|
76
live-build/buildd/hooks/52-linux-virtual-image.binary
Executable file
76
live-build/buildd/hooks/52-linux-virtual-image.binary
Executable file
@ -0,0 +1,76 @@
|
||||
#!/bin/bash -eux
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Generate linux-virtual image
|
||||
#
|
||||
|
||||
case $ARCH in
|
||||
amd64|arm64)
|
||||
;;
|
||||
*)
|
||||
echo "We don't build bootable Buildd images for $ARCH."
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Building bootable Buildd image"
|
||||
|
||||
IMAGE_STR="# BUILDD_IMG: This file was created/modified by the Buildd Image build process"
|
||||
|
||||
. config/functions
|
||||
|
||||
mount_d=$(mktemp -d)
|
||||
|
||||
create_derivative uefi linux-virtual #sets $derivative_img
|
||||
mount_disk_image $derivative_img $mount_d
|
||||
|
||||
# unmount disk image and remove created folders on exit
|
||||
# even though we unmount manually before we convert to
|
||||
# qcow2, we have this here just in case we error out before
|
||||
# that step
|
||||
cleanup_linux_virtual() {
|
||||
if [ -d "$mount_d" ]; then
|
||||
umount_disk_image "$mount_d"
|
||||
fi
|
||||
rm -rf $mount_d $derivative_img
|
||||
}
|
||||
trap cleanup_linux_virtual EXIT
|
||||
|
||||
# Install dependencies
|
||||
env DEBIAN_FRONTEND=noninteractive chroot "$mount_d" apt-get \
|
||||
update --assume-yes
|
||||
# Perform a dist-upgrade to pull in package updates
|
||||
env DEBIAN_FRONTEND=noninteractive chroot "$mount_d" apt-get \
|
||||
dist-upgrade --assume-yes
|
||||
env DEBIAN_FRONTEND=noninteractive chroot "$mount_d" apt-get \
|
||||
install -y lsb-release locales initramfs-tools busybox-initramfs \
|
||||
udev dbus netplan.io cloud-init openssh-server sudo snapd
|
||||
|
||||
# Install a kernel
|
||||
divert_grub "$mount_d"
|
||||
env DEBIAN_FRONTEND=noninteractive chroot "$mount_d" apt-get \
|
||||
install --assume-yes linux-image-virtual
|
||||
env DEBIAN_FRONTEND=noninteractive chroot "$mount_d" apt-get \
|
||||
autoremove --purge --assume-yes
|
||||
chroot "$mount_d" update-grub
|
||||
undivert_grub "$mount_d"
|
||||
|
||||
# Update initramfs image
|
||||
chroot "$mount_d" update-initramfs -c -v -k all
|
||||
|
||||
# extract kernel and initrd
|
||||
cp $mount_d/boot/initrd.img-* livecd.$PROJECT.initrd-generic
|
||||
cp $mount_d/boot/vmlinuz-* livecd.$PROJECT.vmlinuz-generic
|
||||
|
||||
# Cleanup
|
||||
env DEBIAN_FRONTEND=noninteractive chroot "$mount_d" apt-get \
|
||||
clean
|
||||
|
||||
create_manifest $mount_d "livecd.$PROJECT.disk-linux-virtual.manifest"
|
||||
|
||||
# unmount disk image to prevent corruption
|
||||
# and remove it so the trap doesn't try to unmount it again
|
||||
umount_disk_image $mount_d
|
||||
rm -rf $mount_d
|
||||
|
||||
convert_to_qcow2 $derivative_img "livecd.$PROJECT.disk-linux-virtual.img"
|
@ -0,0 +1,2 @@
|
||||
DPkg::Options {"--force-unsafe-io";};
|
||||
DPkg::Use-Pty "false";
|
@ -0,0 +1,3 @@
|
||||
Package: *
|
||||
Pin: release a=*-backports
|
||||
Pin-Priority: 500
|
1
live-build/buildd/includes.chroot/etc/fstab
Normal file
1
live-build/buildd/includes.chroot/etc/fstab
Normal file
@ -0,0 +1 @@
|
||||
/dev/root / ext2 noatime,errors=remount-ro 0 1
|
1
live-build/buildd/includes.chroot/etc/hostname
Normal file
1
live-build/buildd/includes.chroot/etc/hostname
Normal file
@ -0,0 +1 @@
|
||||
ubuntu
|
9
live-build/buildd/includes.chroot/etc/hosts
Normal file
9
live-build/buildd/includes.chroot/etc/hosts
Normal file
@ -0,0 +1,9 @@
|
||||
127.0.0.1 localhost
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
ff02::3 ip6-allhosts
|
@ -0,0 +1 @@
|
||||
/lib/systemd/system/systemd-networkd.service
|
@ -0,0 +1 @@
|
||||
/lib/systemd/system/systemd-networkd-wait-online.service
|
@ -0,0 +1 @@
|
||||
/lib/systemd/system/systemd-networkd.socket
|
13
live-build/buildd/includes.chroot/usr/local/sbin/policy-rc.d
Executable file
13
live-build/buildd/includes.chroot/usr/local/sbin/policy-rc.d
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
# policy-rc.d script for chroots.
|
||||
# Copyright (c) 2007 Peter Palfrader <peter@palfrader.org>
|
||||
# License: <weasel> MIT, if you want one.
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
-*) shift ;;
|
||||
makedev) exit 0;;
|
||||
*) echo "Not running services in chroot."; exit 101 ;;
|
||||
esac
|
||||
done
|
2
live-build/buildd/preseed/debconf.preseed
Normal file
2
live-build/buildd/preseed/debconf.preseed
Normal file
@ -0,0 +1,2 @@
|
||||
# We never want debconf interaction.
|
||||
debconf debconf/frontend select Noninteractive
|
3
live-build/buildd/preseed/man-db.preseed
Normal file
3
live-build/buildd/preseed/man-db.preseed
Normal file
@ -0,0 +1,3 @@
|
||||
# Avoid unnecessary manual page database builds (see
|
||||
# https://bugs.debian.org/554914).
|
||||
man-db man-db/auto-update boolean false
|
3
live-build/buildd/preseed/sun-java6.preseed
Normal file
3
live-build/buildd/preseed/sun-java6.preseed
Normal file
@ -0,0 +1,3 @@
|
||||
# Pre-accept interactive EULA prompts.
|
||||
sun-java6-bin shared/accepted-sun-dlj-v1-1 boolean true
|
||||
sun-java6-jre shared/accepted-sun-dlj-v1-1 boolean true
|
@ -1,7 +1,7 @@
|
||||
# vi: ts=4 expandtab syntax=sh
|
||||
|
||||
#imagesize=${IMAGE_SIZE:-$((2252*1024**2))} # 2.2G (the current size we ship)
|
||||
imagesize=${IMAGE_SIZE:-2361393152} # 2.2G (the current size we ship)
|
||||
# default imagesize = 2252*1024**2 = 2.2G (the current size we ship)
|
||||
imagesize=${IMAGE_SIZE:-2361393152}
|
||||
fs_label="${FS_LABEL:-rootfs}"
|
||||
|
||||
rootfs_dev_mapper=
|
||||
@ -43,6 +43,17 @@ create_empty_disk_image() {
|
||||
dd if=/dev/zero of="$1" bs=1 count=0 seek="${imagesize}"
|
||||
}
|
||||
|
||||
create_manifest() {
|
||||
local chroot_root=${1}
|
||||
local target_file=${2}
|
||||
echo "create_manifest chroot_root: ${chroot_root}"
|
||||
dpkg-query --show --admindir="${chroot_root}/var/lib/dpkg" > ${target_file}
|
||||
echo "create_manifest call to dpkg-query finished."
|
||||
./config/snap-seed-parse "${chroot_root}" "${target_file}"
|
||||
echo "create_manifest call to snap_seed_parse finished."
|
||||
echo "create_manifest finished"
|
||||
}
|
||||
|
||||
make_ext4_partition() {
|
||||
device="$1"
|
||||
label=${fs_label:+-L "${fs_label}"}
|
||||
@ -154,6 +165,8 @@ umount_partition() {
|
||||
teardown_mountpoint $mountpoint
|
||||
umount -R $mountpoint
|
||||
udevadm settle
|
||||
# workaround for LP: 1960537
|
||||
sleep 30
|
||||
|
||||
if [ -n "${rootfs_dev_mapper}" -a -b "${rootfs_dev_mapper}" ]; then
|
||||
# buildd's don't have /etc/mtab symlinked
|
||||
@ -195,28 +208,46 @@ modify_vmdk_header() {
|
||||
|
||||
# Extract the vmdk header for manipulation
|
||||
dd if="${vmdk_name}" of="${descriptor}" bs=1 skip=512 count=1024
|
||||
echo "Cat'ing original vmdk disk descriptor to console for debugging."
|
||||
# cat header so we are aware of the original descriptor for debugging
|
||||
cat $descriptor
|
||||
|
||||
# trim null bytes to treat as standard text file
|
||||
tr -d '\000' < $descriptor > $newdescriptor
|
||||
|
||||
# The sed lines below is where the magic is. Specifically:
|
||||
# ddb.toolsVersion: sets the open-vm-tools so that VMware shows
|
||||
# the tooling as current
|
||||
# ddb.virtualHWVersion: set the version to 7, which covers most
|
||||
# current versions of VMware
|
||||
# createType: make sure its set to stream Optimized
|
||||
# remove the vmdk-stream-converter comment and replace with
|
||||
# # Disk DescriptorFile. This is needed for Virtualbox
|
||||
# remove the comments from vmdk-stream-converter which causes
|
||||
# VirtualBox and others to fail VMDK validation
|
||||
|
||||
sed -e 's|# Description file.*|# Disk DescriptorFile|' \
|
||||
sed -i -e 's|# Description file.*|# Disk DescriptorFile|' \
|
||||
-e '/# Believe this is random*/d' \
|
||||
-e '/# Indicates no parent/d' \
|
||||
-e '/# The Disk Data Base/d' \
|
||||
-e 's|ddb.comment.*|ddb.toolsVersion = "2147483647"|' \
|
||||
"${descriptor}" > "${newdescriptor}"
|
||||
${newdescriptor}
|
||||
|
||||
# The header is cannot be bigger than 1024
|
||||
expr $(stat --format=%s ${newdescriptor}) \< 1024 > /dev/null 2>&1 || {
|
||||
echo "descriptor is too large, VMDK will be invalid!"; exit 1; }
|
||||
# add newline to newdescriptor
|
||||
echo "" >> $newdescriptor
|
||||
|
||||
# add required tools version
|
||||
echo -n 'ddb.toolsVersion = "2147483647"' >> $newdescriptor
|
||||
|
||||
echo "Cat'ing modified descriptor for debugging."
|
||||
cat $newdescriptor
|
||||
|
||||
# diff original descriptor and new descriptor for debugging
|
||||
# diff exits 1 if difference. pipefail not set so piping diff
|
||||
# to cat prints diff and swallows exit 1
|
||||
echo "Printing diff of original and new descriptors."
|
||||
diff --text $descriptor $newdescriptor | cat
|
||||
|
||||
# The header must be 1024 or less before padding
|
||||
if ! expr $(stat --format=%s ${newdescriptor}) \< 1025 > /dev/null 2>&1; then
|
||||
echo "descriptor is too large, VMDK will be invalid!";
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# reset newdescriptor to be 1024
|
||||
truncate --no-create --size=1K $newdescriptor
|
||||
|
||||
# Overwrite the vmdk header with our new, modified one
|
||||
dd conv=notrunc,nocreat \
|
||||
@ -326,7 +357,9 @@ undivert_grub() {
|
||||
--divert /etc/grub.d/30_os-prober.dpkg-divert \
|
||||
--rename /etc/grub.d/30_os-prober
|
||||
|
||||
rm "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
|
||||
if grep -q "^exit 1$" "$CHROOT_ROOT"/usr/bin/systemd-detect-virt; then
|
||||
rm "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
|
||||
fi
|
||||
chroot "$CHROOT_ROOT" dpkg-divert --remove --local \
|
||||
--rename /usr/bin/systemd-detect-virt
|
||||
}
|
||||
@ -367,6 +400,47 @@ release_ver() {
|
||||
distro-info --series="$LB_DISTRIBUTION" -r | awk '{ print $1 }'
|
||||
}
|
||||
|
||||
_snap_post_process() {
|
||||
# Look for the 'core' snap. If it is not present, assume that the image
|
||||
# contains only snaps with bases >= core18. In that case snapd is
|
||||
# preseeded. However, when 'core' is being installed and snapd has not
|
||||
# been installed by a call to 'snap_preseed' (see below) then it is
|
||||
# removed again.
|
||||
local CHROOT_ROOT=$1
|
||||
local SNAP_NAME=$2
|
||||
|
||||
local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
|
||||
local snaps_dir="$seed_dir/snaps"
|
||||
local seed_yaml="$seed_dir/seed.yaml"
|
||||
local assertions_dir="$seed_dir/assertions"
|
||||
local snapd_install_stamp="$seed_dir/.snapd-explicit-install-stamp"
|
||||
|
||||
case $SNAP_NAME in
|
||||
core[0-9]*)
|
||||
# If the 'core' snap is not present, assume we are coreXX-only and
|
||||
# install the snapd snap.
|
||||
if [ ! -f ${snaps_dir}/core_[0-9]*.snap ]; then
|
||||
_snap_preseed $CHROOT_ROOT snapd stable
|
||||
fi
|
||||
;;
|
||||
core)
|
||||
# If the snapd snap has been seeded, but not marked as explicitly
|
||||
# installed (see snap_preseed below), then remove it.
|
||||
if [ -f ${snaps_dir}/snapd_[0-9]*.snap ] && \
|
||||
[ ! -f "$snapd_install_stamp" ]
|
||||
then
|
||||
# Remove snap, assertions and entry in seed.yaml
|
||||
rm -f ${snaps_dir}/snapd_[0-9]*.snap
|
||||
rm -f ${assertions_dir}/snapd_[0-9]*.assert
|
||||
sed -i -e'N;/name: snapd/,+2d' $seed_yaml
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# ignore
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_snap_preseed() {
|
||||
# Download the snap/assertion and add to the preseed
|
||||
local CHROOT_ROOT=$1
|
||||
@ -381,11 +455,18 @@ _snap_preseed() {
|
||||
|
||||
# Download the snap & assertion
|
||||
local snap_download_failed=0
|
||||
chroot $CHROOT_ROOT sh -c "
|
||||
|
||||
# Preseed a snap only once
|
||||
if [ -f ${snaps_dir}/${SNAP_NAME}_[0-9]*.snap ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
sh -c "
|
||||
set -x;
|
||||
cd /var/lib/snapd/seed;
|
||||
cd \"$CHROOT_ROOT/var/lib/snapd/seed\";
|
||||
SNAPPY_STORE_NO_CDN=1 snap download \
|
||||
--channel=$CHANNEL \"$SNAP_NAME\"" || snap_download_failed=1
|
||||
--cohort="${COHORT_KEY:-}" \
|
||||
--channel=\"$CHANNEL\" \"$SNAP_NAME\"" || snap_download_failed=1
|
||||
if [ $snap_download_failed = 1 ] ; then
|
||||
echo "If the channel ($CHANNEL) includes '*/ubuntu-##.##' track per "
|
||||
echo "Ubuntu policy (ex. stable/ubuntu-18.04) the publisher will need "
|
||||
@ -398,6 +479,36 @@ _snap_preseed() {
|
||||
mv -v $seed_dir/*.assert $assertions_dir
|
||||
mv -v $seed_dir/*.snap $snaps_dir
|
||||
|
||||
# Pre-seed snap's base
|
||||
local core_snap=""
|
||||
case $SNAP_NAME in
|
||||
snapd)
|
||||
# snapd is self-contained, ignore base
|
||||
;;
|
||||
core|core[0-9][0-9])
|
||||
# core and core## are self-contained, ignore base
|
||||
;;
|
||||
*)
|
||||
# Determine which core snap is needed
|
||||
local snap_info
|
||||
|
||||
# snap info doesn't have --channel, so must run agains the downloaded snap
|
||||
snap_info=$(snap info --verbose ${snaps_dir}/${SNAP_NAME}_[0-9]*.snap)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to retrieve base of $SNAP_NAME!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
core_snap=$(echo "$snap_info" | grep '^base:' | awk '{print $2}')
|
||||
|
||||
# If snap info does not list a base use 'core'
|
||||
core_snap=${core_snap:-core}
|
||||
|
||||
_snap_preseed $CHROOT_ROOT $core_snap stable
|
||||
;;
|
||||
esac
|
||||
|
||||
# Add the snap to the seed.yaml
|
||||
! [ -e $seed_yaml ] && echo "snaps:" > $seed_yaml
|
||||
cat <<EOF >> $seed_yaml
|
||||
@ -410,6 +521,8 @@ EOF
|
||||
|
||||
echo -n " file: " >> $seed_yaml
|
||||
(cd $snaps_dir; ls -1 ${SNAP_NAME}_*.snap) >> $seed_yaml
|
||||
|
||||
_snap_post_process $CHROOT_ROOT $SNAP_NAME
|
||||
}
|
||||
|
||||
snap_prepare_assertions() {
|
||||
@ -465,21 +578,114 @@ snap_prepare() {
|
||||
local snaps_dir="$seed_dir/snaps"
|
||||
|
||||
snap_prepare_assertions "$CHROOT_ROOT" "$CUSTOM_BRAND_MODEL"
|
||||
|
||||
# Download the core snap
|
||||
if ! [ -f $snaps_dir/core_[0-9]*.snap ] ; then
|
||||
_snap_preseed $CHROOT_ROOT core stable
|
||||
fi
|
||||
}
|
||||
|
||||
snap_preseed() {
|
||||
# Preseed a snap in the image
|
||||
local CHROOT_ROOT=$1
|
||||
local SNAP=$2
|
||||
local SNAP_NAME=${SNAP%/*}
|
||||
# Per Ubuntu policy, all seeded snaps (with the exception of the core
|
||||
# snap) must pull from stable/ubuntu-$(release_ver) as their channel.
|
||||
local CHANNEL=${3:-"stable/ubuntu-$(release_ver)"}
|
||||
|
||||
snap_prepare $CHROOT_ROOT
|
||||
|
||||
_snap_preseed $CHROOT_ROOT $SNAP $CHANNEL
|
||||
|
||||
# Mark this image as having snapd installed explicitly.
|
||||
case $SNAP_NAME in
|
||||
snapd)
|
||||
touch "$CHROOT_ROOT/var/lib/snapd/seed/.snapd-explicit-install-stamp"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# Do basic validation of generated snapd seed.yaml, doing it here
|
||||
# means we catch all the places(tm) that snaps are added but the
|
||||
# downside is that each time a snap is added the seed must be valid,
|
||||
# i.e. snaps with bases need to add bases first etc.
|
||||
if [ -e chroot/var/lib/snapd/seed/seed.yaml ]; then
|
||||
snap debug validate-seed "$CHROOT_ROOT/var/lib/snapd/seed/seed.yaml"
|
||||
fi
|
||||
}
|
||||
|
||||
configure_oci() {
|
||||
# configure a chroot to be a OCI/docker container
|
||||
# theses changes are taken from the current Dockerfile modifications done
|
||||
# at https://github.com/tianon/docker-brew-ubuntu-core/blob/master/update.sh
|
||||
|
||||
local chroot=$1
|
||||
local serial=$2
|
||||
|
||||
if [ ! -d "${chroot}" ]; then
|
||||
echo "The chroot does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "==== Configuring OCI ===="
|
||||
|
||||
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L40-L48
|
||||
echo '#!/bin/sh' > ${chroot}/usr/sbin/policy-rc.d
|
||||
echo 'exit 101' >> ${chroot}/usr/sbin/policy-rc.d
|
||||
Chroot ${chroot} "chmod +x /usr/sbin/policy-rc.d"
|
||||
|
||||
|
||||
# Inject a build stamp into the image
|
||||
|
||||
mkdir -p ${chroot}/etc/cloud
|
||||
cat > ${chroot}/etc/cloud/build.info << EOF
|
||||
serial: $serial
|
||||
EOF
|
||||
|
||||
|
||||
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L54-L56
|
||||
Chroot ${chroot} "dpkg-divert --local --rename --add /sbin/initctl"
|
||||
cp -a ${chroot}/usr/sbin/policy-rc.d ${chroot}/sbin/initctl
|
||||
sed -i 's/^exit.*/exit 0/' ${chroot}/sbin/initctl
|
||||
|
||||
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L71-L78
|
||||
echo 'force-unsafe-io' > ${chroot}/etc/dpkg/dpkg.cfg.d/docker-apt-speedup
|
||||
|
||||
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L85-L105
|
||||
echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' > ${chroot}/etc/apt/apt.conf.d/docker-clean
|
||||
|
||||
echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' >> ${chroot}/etc/apt/apt.conf.d/docker-clean
|
||||
|
||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' >> ${chroot}/etc/apt/apt.conf.d/docker-clean
|
||||
|
||||
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L109-L115
|
||||
echo 'Acquire::Languages "none";' > ${chroot}/etc/apt/apt.conf.d/docker-no-languages
|
||||
|
||||
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L118-L130
|
||||
echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > ${chroot}/etc/apt/apt.conf.d/docker-gzip-indexes
|
||||
|
||||
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L134-L151
|
||||
echo 'Apt::AutoRemove::SuggestsImportant "false";' > ${chroot}/etc/apt/apt.conf.d/docker-autoremove-suggests
|
||||
|
||||
# delete all the apt list files since they're big and get stale quickly
|
||||
rm -rf ${chroot}/var/lib/apt/lists/*
|
||||
|
||||
# verify that the APT lists files do not exist
|
||||
Chroot chroot "apt-get indextargets" > indextargets.out
|
||||
[ ! -s indextargets.out ]
|
||||
rm indextargets.out
|
||||
# (see https://bugs.launchpad.net/cloud-images/+bug/1699913)
|
||||
|
||||
# make systemd-detect-virt return "docker"
|
||||
# See: https://github.com/systemd/systemd/blob/aa0c34279ee40bce2f9681b496922dedbadfca19/src/basic/virt.c#L434
|
||||
mkdir -p ${chroot}/run/systemd
|
||||
echo 'docker' > ${chroot}/run/systemd/container
|
||||
|
||||
rm -rf ${chroot}/var/cache/apt/*.bin
|
||||
echo "==== Configuring OCI done ===="
|
||||
}
|
||||
|
||||
is_live_layer () {
|
||||
local pass=$1
|
||||
for livepass in $LIVE_PASSES; do
|
||||
[ "$livepass" != "$pass" ] && continue
|
||||
return 0
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
49
live-build/make-lxd-metadata.py
Executable file
49
live-build/make-lxd-metadata.py
Executable file
@ -0,0 +1,49 @@
|
||||
#! /usr/bin/python3
|
||||
|
||||
"""Make a metadata.yaml file for a LXD image."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
# Map dpkg architecture names to LXD architecture names.
|
||||
lxd_arches = {
|
||||
"amd64": "x86_64",
|
||||
"arm64": "aarch64",
|
||||
"armhf": "armv7l",
|
||||
"i386": "i686",
|
||||
"powerpc": "ppc",
|
||||
"ppc64el": "ppc64le",
|
||||
"s390x": "s390x",
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("series", help="Ubuntu series name")
|
||||
parser.add_argument("architecture", help="Ubuntu architecture name")
|
||||
args = parser.parse_args()
|
||||
|
||||
metadata = {
|
||||
"architecture": lxd_arches[args.architecture],
|
||||
"creation_date": int(time.time()),
|
||||
"properties": {
|
||||
"os": "Ubuntu",
|
||||
"series": args.series,
|
||||
"architecture": args.architecture,
|
||||
"description": "Ubuntu buildd %s %s" % (
|
||||
args.series, args.architecture),
|
||||
},
|
||||
}
|
||||
|
||||
# Encoding this as JSON is good enough, and saves pulling in a YAML
|
||||
# library dependency.
|
||||
json.dump(
|
||||
metadata, sys.stdout, sort_keys=True, indent=4, separators=(",", ": "),
|
||||
ensure_ascii=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
68
live-build/snap-seed-parse.py
Executable file
68
live-build/snap-seed-parse.py
Executable file
@ -0,0 +1,68 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
"""
|
||||
Usage: snap-seed-parse [${chroot_dir}] <output file>
|
||||
|
||||
This script looks for a seed.yaml path in the given root directory, parsing
|
||||
it and appending the parsed lines to the given output file.
|
||||
|
||||
The $chroot_dir argument is optional and will default to the empty string.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import re
|
||||
import yaml
|
||||
|
||||
|
||||
def log(msg):
|
||||
print("snap-seed-parse: {}".format(msg))
|
||||
|
||||
|
||||
log("Parsing seed.yaml")
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('chroot', nargs='?', default='',
|
||||
help='root dir for the chroot from which to generate the '
|
||||
'manifest')
|
||||
parser.add_argument('file', help='Output manifest to this file')
|
||||
|
||||
ARGS = parser.parse_args()
|
||||
CHROOT_ROOT = ARGS.chroot
|
||||
FNAME = ARGS.file
|
||||
|
||||
# Trim any trailing slashes for correct appending
|
||||
log("CHROOT_ROOT: {}".format(CHROOT_ROOT))
|
||||
if len(CHROOT_ROOT) > 0 and CHROOT_ROOT[-1] == '/':
|
||||
CHROOT_ROOT = CHROOT_ROOT[:-1]
|
||||
|
||||
# This is where we expect to find the seed.yaml file
|
||||
YAML_PATH = CHROOT_ROOT + '/var/lib/snapd/seed/seed.yaml'
|
||||
|
||||
# Snaps are prepended with this string in the manifest
|
||||
LINE_PREFIX = 'snap:'
|
||||
|
||||
log("yaml path: {}".format(YAML_PATH))
|
||||
if not os.path.isfile(YAML_PATH):
|
||||
log("WARNING: yaml path not found; no seeded snaps found.")
|
||||
exit(0)
|
||||
else:
|
||||
log("yaml path found.")
|
||||
|
||||
with open(YAML_PATH, 'r') as fh:
|
||||
yaml_lines = yaml.safe_load(fh)['snaps']
|
||||
|
||||
log('Writing manifest to {}'.format(FNAME))
|
||||
|
||||
with open(FNAME, 'a+') as fh:
|
||||
for item in yaml_lines:
|
||||
filestring = item['file']
|
||||
# Pull the revision number off the file name
|
||||
revision = filestring[filestring.rindex('_')+1:]
|
||||
revision = re.sub(r'[^0-9]', '', revision)
|
||||
fh.write("{}{}\t{}\t{}\n".format(LINE_PREFIX,
|
||||
item['name'],
|
||||
item['channel'],
|
||||
revision,
|
||||
))
|
||||
log('Manifest output finished.')
|
@ -2,5 +2,5 @@
|
||||
|
||||
cat >>/etc/fstab<<EOT
|
||||
# Minimal setup required for systemd to provide a r/w FS
|
||||
/dev/root / rootfs defaults 0 0
|
||||
/dev/root / rootfs defaults 0 1
|
||||
EOT
|
||||
|
68
live-build/ubuntu-cpc/README.cpc.md
Normal file
68
live-build/ubuntu-cpc/README.cpc.md
Normal file
@ -0,0 +1,68 @@
|
||||
# TL;DR
|
||||
|
||||
In order to generate the hooks for a specific image target set, call the
|
||||
`make-hooks` script, located in `hooks.d` as
|
||||
|
||||
./make-hooks --hooks-dir ../hooks <image_set>
|
||||
|
||||
where `image_set` is the name of a series file (e.g. "vagrant") without leading
|
||||
path components. Do *not* check in the `hooks` folder, it is automatically
|
||||
generated by `auto/config` during Live Build runs.
|
||||
|
||||
|
||||
# Hook placement and ordering
|
||||
|
||||
Scripts live in subfolders below the `hooks.d` folder. Currently the folders
|
||||
`chroot` and `base` exist. The folder with the name `extra` is reserved for
|
||||
private scripts, which are not included in the source of livecd-rootfs. The
|
||||
scripts are not numbered, instead the order of their execution depends on the
|
||||
order in which they are listed in a *series* file.
|
||||
|
||||
Series files are placed in subfolders `hooks.d/base/series` or
|
||||
`hooks.d/extra/series`. Each series file contains a list of scripts to be
|
||||
executed. Empty lines and lines starting with a `#` are ignored.
|
||||
|
||||
Series files in `extra/series` override files in `base/series` with the same
|
||||
name. For example, if a series file `base/series/cloudA` exists and a series
|
||||
file `extra/series/cloudA`, then the latter will be preferred.
|
||||
|
||||
A series file in `extra/series` may also list scripts that are located in the
|
||||
`chroot` and `base` folders. In addition, series files can *depend* on other
|
||||
series files. For example, the series files for most custom images look similar
|
||||
to this:
|
||||
|
||||
depends disk-image
|
||||
depends extra-settings
|
||||
extra/cloudB.binary
|
||||
|
||||
Where `disk-image` and `extra-settings` may list scripts and dependencies which
|
||||
are to be processed before the script `extra/cloudB.binary` is called.
|
||||
|
||||
ACHTUNG: live build runs scripts with the suffix ".chroot" in a batch separate
|
||||
from scripts ending in ".binary". Even if you arrange them interleaved in your
|
||||
series files, the chroot scripts will be run before the binary scripts.
|
||||
|
||||
# Image set selection for Live Build
|
||||
|
||||
During a Live Build, enumerated symbolic links are generated based on the
|
||||
contents of one or more series files. The series files are selected according
|
||||
to the contents of the `IMAGE_TARGETS` environment variable. For example, in
|
||||
order to trigger the build of `squashfs` and `vagrant`, list them in the
|
||||
`IMAGE_TARGETS` variable as `squashfs,vagrant`. The separator can be a comma,
|
||||
a semi-colon or whitespace.
|
||||
|
||||
The generation of the symbolic links is triggered from the `auto/config` script,
|
||||
from where the contents of the `IMAGE_TARGETS` environment variable are passed
|
||||
on to the `make-hooks` script.
|
||||
|
||||
|
||||
# Symlink generation
|
||||
|
||||
Since Live Build itself does not know about series files, a traditional `hooks`
|
||||
folder is generated using the `make-hooks` script. The script takes as arguments
|
||||
the names of the series files to be processed.
|
||||
|
||||
The script parses the series files and generates enumerated symbolic links for
|
||||
all entries. Per default, these are placed into a directory named `hooks` next
|
||||
to the `hooks.d` directory. This can be changed using the `--hooks-dir`
|
||||
parameter.
|
29
live-build/ubuntu-cpc/hooks.d/base/create-root-dir.binary
Executable file
29
live-build/ubuntu-cpc/hooks.d/base/create-root-dir.binary
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash -ex
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Generate the root directory/manifest for rootfs.tar.xz and squashfs
|
||||
|
||||
if [ -n "$SUBARCH" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
. config/functions
|
||||
|
||||
rootfs_dir=rootfs.dir
|
||||
mkdir $rootfs_dir
|
||||
cp -a chroot/* $rootfs_dir
|
||||
|
||||
setup_mountpoint $rootfs_dir
|
||||
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $rootfs_dir apt-get --purge remove --assume-yes '^linux-.*' 'linux-base+'
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $rootfs_dir apt-get --purge remove --assume-yes '^grub-.*'
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $rootfs_dir apt-get autoremove --purge --assume-yes
|
||||
rm -rf $rootfs_dir/boot/grub
|
||||
|
||||
# Keep this as some derivatives mount a tempfs here
|
||||
mkdir -p $rootfs_dir/lib/modules
|
||||
|
||||
teardown_mountpoint $rootfs_dir
|
||||
|
||||
create_manifest "${rootfs_dir}" "${rootfs_dir}.manifest"
|
@ -33,6 +33,7 @@ install_grub() {
|
||||
chroot mountpoint apt-get -qqy update
|
||||
chroot mountpoint apt-get -qqy install grub-ieee1275
|
||||
chroot mountpoint apt-get -qqy remove --purge grub-legacy-ec2
|
||||
chroot mountpoint apt-get autoremove --purge --assume-yes
|
||||
|
||||
# set the kernel commandline to use hvc0
|
||||
mkdir -p mountpoint/etc/default/grub.d
|
159
live-build/ubuntu-cpc/hooks.d/base/disk-image-uefi.binary
Executable file
159
live-build/ubuntu-cpc/hooks.d/base/disk-image-uefi.binary
Executable file
@ -0,0 +1,159 @@
|
||||
#!/bin/bash -eux
|
||||
|
||||
case $ARCH in
|
||||
amd64|arm64|armhf)
|
||||
;;
|
||||
*)
|
||||
echo "We don't create EFI images for $ARCH."
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
case ${PROJECT:-} in
|
||||
ubuntu)
|
||||
IMAGE_STR="# DESKTOP_IMG: This file was created/modified by the Desktop Image build process"
|
||||
FS_LABEL="desktop-rootfs"
|
||||
IMAGE_SIZE=12884901888 # 12G
|
||||
;;
|
||||
*)
|
||||
IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
FS_LABEL="cloudimg-rootfs"
|
||||
;;
|
||||
esac
|
||||
|
||||
. config/binary
|
||||
|
||||
. config/functions
|
||||
|
||||
create_partitions() {
|
||||
disk_image="$1"
|
||||
sgdisk "${disk_image}" --zap-all
|
||||
case $ARCH in
|
||||
arm64|armhf)
|
||||
sgdisk "${disk_image}" \
|
||||
--new=15:0:204800 \
|
||||
--typecode=15:ef00 \
|
||||
--new=1:
|
||||
;;
|
||||
amd64)
|
||||
sgdisk "${disk_image}" \
|
||||
--new=14::+4M \
|
||||
--new=15::+106M \
|
||||
--new=1::
|
||||
sgdisk "${disk_image}" \
|
||||
-t 14:ef02 \
|
||||
-t 15:ef00
|
||||
;;
|
||||
esac
|
||||
sgdisk "${disk_image}" \
|
||||
--print
|
||||
}
|
||||
|
||||
create_and_mount_uefi_partition() {
|
||||
uefi_dev="/dev/mapper${loop_device///dev/}p15"
|
||||
mountpoint="$1"
|
||||
mkfs.vfat -F 32 -n UEFI "${uefi_dev}"
|
||||
|
||||
mkdir -p "${mountpoint}"/boot/efi
|
||||
mount "${uefi_dev}" "$mountpoint"/boot/efi
|
||||
|
||||
cat << EOF >> "mountpoint/etc/fstab"
|
||||
LABEL=UEFI /boot/efi vfat umask=0077 0 1
|
||||
EOF
|
||||
}
|
||||
|
||||
install_grub() {
|
||||
mkdir mountpoint
|
||||
mount_partition "${rootfs_dev_mapper}" mountpoint
|
||||
|
||||
create_and_mount_uefi_partition mountpoint
|
||||
|
||||
echo "(hd0) ${loop_device}" > mountpoint/tmp/device.map
|
||||
mkdir -p mountpoint/etc/default/grub.d
|
||||
efi_boot_dir="/boot/efi/EFI/BOOT"
|
||||
chroot mountpoint mkdir -p "${efi_boot_dir}"
|
||||
|
||||
if [ "${SUBPROJECT:-}" = minimized ] && [ -n "$partuuid" ]; then
|
||||
# FIXME: code duplicated between disk-image.binary
|
||||
# and disk-image-uefi.binary. We want to fix this to not
|
||||
# have initramfs-tools installed at all on these images.
|
||||
echo "partuuid found for root device; omitting initrd"
|
||||
echo "GRUB_FORCE_PARTUUID=$partuuid" >> mountpoint/etc/default/grub.d/40-force-partuuid.cfg
|
||||
fi
|
||||
|
||||
chroot mountpoint apt-get -y update
|
||||
|
||||
# UEFI GRUB modules are meant to be used equally by Secure Boot and
|
||||
# non-Secure Boot systems. If you need an extra module not already
|
||||
# provided or run into "Secure Boot policy forbids loading X" problems,
|
||||
# please file a bug against grub2 to include the affected module.
|
||||
case $ARCH in
|
||||
arm64)
|
||||
chroot mountpoint apt-get -qqy install --no-install-recommends shim-signed grub-efi-arm64-signed
|
||||
efi_target=arm64-efi
|
||||
;;
|
||||
armhf)
|
||||
chroot mountpoint apt-get -qqy install --no-install-recommends grub-efi-arm grub-efi-arm-bin
|
||||
efi_target=arm-efi
|
||||
;;
|
||||
amd64)
|
||||
chroot mountpoint apt-get install -qqy grub-pc shim-signed
|
||||
efi_target=x86_64-efi
|
||||
;;
|
||||
esac
|
||||
|
||||
chroot mountpoint apt-get autoremove --purge --assume-yes
|
||||
|
||||
# This call to populate the package manifest is added here to capture
|
||||
# grub-efi packages that otherwise would not make it into the base
|
||||
# manifest. filesystem.packages is moved into place via symlinking to
|
||||
# livecd.ubuntu-cpc.manifest by live-build/auto/build after lb_binary runs
|
||||
create_manifest "mountpoint" "binary/boot/filesystem.packages"
|
||||
|
||||
chroot mountpoint grub-install "${loop_device}" \
|
||||
--boot-directory=/boot \
|
||||
--efi-directory=/boot/efi \
|
||||
--target=${efi_target} \
|
||||
--uefi-secure-boot \
|
||||
--no-nvram
|
||||
|
||||
if [ "$ARCH" = "amd64" ]; then
|
||||
# Install the BIOS/GPT bits. Since GPT boots from the ESP partition,
|
||||
# it means that we just run this simple command and we're done
|
||||
chroot mountpoint grub-install --target=i386-pc "${loop_device}"
|
||||
fi
|
||||
|
||||
divert_grub mountpoint
|
||||
chroot mountpoint update-grub
|
||||
replace_grub_root_with_label mountpoint
|
||||
undivert_grub mountpoint
|
||||
|
||||
chroot mountpoint apt-get -y clean
|
||||
|
||||
rm mountpoint/tmp/device.map
|
||||
umount mountpoint/boot/efi
|
||||
mount
|
||||
umount_partition mountpoint
|
||||
rmdir mountpoint
|
||||
}
|
||||
|
||||
disk_image=binary/boot/disk-uefi.ext4
|
||||
|
||||
create_empty_disk_image "${disk_image}"
|
||||
create_partitions "${disk_image}"
|
||||
mount_image "${disk_image}" 1
|
||||
|
||||
partuuid=$(blkid -s PARTUUID -o value "$rootfs_dev_mapper")
|
||||
|
||||
# Copy the chroot in to the disk
|
||||
make_ext4_partition "${rootfs_dev_mapper}"
|
||||
mkdir mountpoint
|
||||
mount "${rootfs_dev_mapper}" mountpoint
|
||||
cp -a chroot/* mountpoint/
|
||||
umount mountpoint
|
||||
rmdir mountpoint
|
||||
|
||||
install_grub
|
||||
|
||||
clean_loops
|
||||
trap - EXIT
|
@ -12,6 +12,8 @@ BOOTPART_END=
|
||||
BOOT_MOUNTPOINT=
|
||||
ROOTPART_START=1
|
||||
|
||||
my_d=$(dirname $(readlink -f ${0}))
|
||||
|
||||
case $ARCH:$SUBARCH in
|
||||
ppc64el:*|powerpc:*)
|
||||
echo "POWER disk images are handled separately"
|
||||
@ -97,7 +99,7 @@ case $ARCH:$SUBARCH in
|
||||
# not the best place for this, but neither flash-kernel nor
|
||||
# u-boot have provisions for installing u-boot via maintainer
|
||||
# script
|
||||
config/hooks/raspi2/mkknlimg --dtok \
|
||||
${my_d}/raspi2/mkknlimg --dtok \
|
||||
mountpoint/usr/lib/u-boot/rpi_2/u-boot.bin \
|
||||
mountpoint/boot/firmware/uboot.bin
|
||||
;;
|
||||
@ -134,6 +136,7 @@ fi
|
||||
if [ "$ARCH" = "s390x" ]; then
|
||||
# Do ZIPL install bits
|
||||
chroot mountpoint apt-get -qqy install s390-tools sysconfig-hardware
|
||||
chroot mountpoint apt-get autoremove --purge --assume-yes
|
||||
|
||||
# Write out cloudy zipl.conf for future kernel updates
|
||||
cat << EOF > mountpoint/etc/zipl.conf
|
@ -1,14 +1,5 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
case $IMAGE_TARGETS in
|
||||
""|*qcow2*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipping qcow2 image build"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
case $ARCH:$SUBARCH in
|
||||
# Not sure if any other cloud images use subarch for something that
|
||||
# should take qcow2 format, so only skipping this on raspi2 for now.
|
20
live-build/ubuntu-cpc/hooks.d/base/root-squashfs.binary
Executable file
20
live-build/ubuntu-cpc/hooks.d/base/root-squashfs.binary
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash -ex
|
||||
# vi: ts=4 noexpandtab
|
||||
#
|
||||
# Generate a squashfs root and manifest
|
||||
|
||||
if [ -n "$SUBARCH" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# This is the directory created by create-root-dir.binary
|
||||
rootfs_dir=rootfs.dir
|
||||
|
||||
squashfs_f="$PWD/livecd.ubuntu-cpc.squashfs"
|
||||
|
||||
cp $rootfs_dir.manifest $squashfs_f.manifest
|
||||
|
||||
(cd $rootfs_dir &&
|
||||
mksquashfs . $squashfs_f \
|
||||
-no-progress -xattrs -comp xz )
|
15
live-build/ubuntu-cpc/hooks.d/base/root-xz.binary
Executable file
15
live-build/ubuntu-cpc/hooks.d/base/root-xz.binary
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash -ex
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Generate the rootfs.tar.xz and manifest
|
||||
|
||||
if [ -n "$SUBARCH" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# This is the directory created by create-root-dir.binary
|
||||
rootfs_dir=rootfs.dir
|
||||
|
||||
cp $rootfs_dir.manifest livecd.ubuntu-cpc.rootfs.manifest
|
||||
(cd $rootfs_dir/ && tar -c --xattrs *) | xz > livecd.ubuntu-cpc.rootfs.tar.xz
|
1
live-build/ubuntu-cpc/hooks.d/base/series/all
Symbolic link
1
live-build/ubuntu-cpc/hooks.d/base/series/all
Symbolic link
@ -0,0 +1 @@
|
||||
base
|
8
live-build/ubuntu-cpc/hooks.d/base/series/base
Normal file
8
live-build/ubuntu-cpc/hooks.d/base/series/base
Normal file
@ -0,0 +1,8 @@
|
||||
depends root-dir
|
||||
depends tarball
|
||||
depends squashfs
|
||||
depends disk-image
|
||||
depends qcow2
|
||||
depends vmdk
|
||||
depends vagrant
|
||||
depends wsl
|
10
live-build/ubuntu-cpc/hooks.d/base/series/disk-image
Normal file
10
live-build/ubuntu-cpc/hooks.d/base/series/disk-image
Normal file
@ -0,0 +1,10 @@
|
||||
depends disk-image-uefi
|
||||
base/disk-image.binary
|
||||
base/disk-image-ppc64el.binary
|
||||
provides livecd.ubuntu-cpc.ext4
|
||||
provides livecd.ubuntu-cpc.initrd-generic
|
||||
provides livecd.ubuntu-cpc.initrd-generic-lpae
|
||||
provides livecd.ubuntu-cpc.kernel-generic
|
||||
provides livecd.ubuntu-cpc.kernel-generic-lpae
|
||||
provides livecd.ubuntu-cpc.kernel-kvm
|
||||
provides livecd.ubuntu-cpc.manifest
|
@ -0,0 +1,8 @@
|
||||
base/disk-image-uefi.binary
|
||||
provides livecd.ubuntu-cpc.ext4
|
||||
provides livecd.ubuntu-cpc.initrd-generic
|
||||
provides livecd.ubuntu-cpc.initrd-generic-lpae
|
||||
provides livecd.ubuntu-cpc.kernel-generic
|
||||
provides livecd.ubuntu-cpc.kernel-generic-lpae
|
||||
provides livecd.ubuntu-cpc.manifest
|
||||
provides livecd.ubuntu-cpc.filelist
|
3
live-build/ubuntu-cpc/hooks.d/base/series/qcow2
Normal file
3
live-build/ubuntu-cpc/hooks.d/base/series/qcow2
Normal file
@ -0,0 +1,3 @@
|
||||
depends disk-image
|
||||
base/qcow2-image.binary
|
||||
provides livecd.ubuntu-cpc.img
|
1
live-build/ubuntu-cpc/hooks.d/base/series/root-dir
Normal file
1
live-build/ubuntu-cpc/hooks.d/base/series/root-dir
Normal file
@ -0,0 +1 @@
|
||||
base/create-root-dir.binary
|
4
live-build/ubuntu-cpc/hooks.d/base/series/squashfs
Normal file
4
live-build/ubuntu-cpc/hooks.d/base/series/squashfs
Normal file
@ -0,0 +1,4 @@
|
||||
depends root-dir
|
||||
base/root-squashfs.binary
|
||||
provides livecd.ubuntu-cpc.squashfs
|
||||
provides livecd.ubuntu-cpc.squashfs.manifest
|
4
live-build/ubuntu-cpc/hooks.d/base/series/tarball
Normal file
4
live-build/ubuntu-cpc/hooks.d/base/series/tarball
Normal file
@ -0,0 +1,4 @@
|
||||
depends root-dir
|
||||
base/root-xz.binary
|
||||
provides livecd.ubuntu-cpc.rootfs.tar.xz
|
||||
provides livecd.ubuntu-cpc.rootfs.manifest
|
3
live-build/ubuntu-cpc/hooks.d/base/series/vagrant
Normal file
3
live-build/ubuntu-cpc/hooks.d/base/series/vagrant
Normal file
@ -0,0 +1,3 @@
|
||||
depends disk-image
|
||||
base/vagrant.binary
|
||||
provides livecd.ubuntu-cpc.vagrant.box
|
5
live-build/ubuntu-cpc/hooks.d/base/series/vmdk
Normal file
5
live-build/ubuntu-cpc/hooks.d/base/series/vmdk
Normal file
@ -0,0 +1,5 @@
|
||||
depends disk-image
|
||||
base/vmdk-image.binary
|
||||
base/vmdk-ova-image.binary
|
||||
provides livecd.ubuntu-cpc.vmdk
|
||||
provides livecd.ubuntu-cpc.ova
|
4
live-build/ubuntu-cpc/hooks.d/base/series/wsl
Normal file
4
live-build/ubuntu-cpc/hooks.d/base/series/wsl
Normal file
@ -0,0 +1,4 @@
|
||||
depends root-dir
|
||||
base/wsl-gz.binary
|
||||
provides livecd.ubuntu-cpc.wsl.rootfs.tar.gz
|
||||
provides livecd.ubuntu-cpc.wsl.rootfs.manifest
|
@ -24,15 +24,6 @@ case ${SUBPROJECT:-} in
|
||||
;;
|
||||
esac
|
||||
|
||||
case $IMAGE_TARGETS in
|
||||
""|*vagrant*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipping Vagrant image build"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
cur_d=${PWD}
|
||||
my_d=$(dirname $(readlink -f ${0}))
|
||||
|
||||
@ -106,7 +97,7 @@ distro=$(chroot chroot lsb_release --id --short | tr [:upper:] [:lower:])
|
||||
# Get the VMDK in place
|
||||
prefix="${distro}-${suite}-${version}-cloudimg"
|
||||
vmdk_f="${box_d}/${prefix}.vmdk"
|
||||
create_vmdk ${derivative_img} ${vmdk_f}
|
||||
create_vmdk ${derivative_img} ${vmdk_f} 40960
|
||||
|
||||
####################################
|
||||
# Create the ConfigDrive
|
@ -18,15 +18,6 @@ case $ARCH in
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
case ${IMAGE_TARGETS:-} in
|
||||
""|*vmdk*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipping VMDK image build"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
. config/functions
|
||||
|
||||
if [ -e binary/boot/disk-uefi.ext4 ]; then
|
@ -7,7 +7,7 @@
|
||||
# and checksums. This step produces an OVA that is suitable for use with
|
||||
# Cloud's that support the OVF specification.
|
||||
#
|
||||
# For this step, we re-use the VMDK's made in 040-vmdk-image.binary
|
||||
# For this step, we re-use the VMDK's made in vmdk-image.binary
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
@ -32,15 +32,6 @@ case $ARCH in
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
case ${IMAGE_TARGETS:-} in
|
||||
""|*vmdk*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipping OVA image build"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
cur_d=${PWD}
|
||||
my_d=$(dirname $(readlink -f ${0}))
|
||||
|
57
live-build/ubuntu-cpc/hooks.d/base/wsl-gz.binary
Executable file
57
live-build/ubuntu-cpc/hooks.d/base/wsl-gz.binary
Executable file
@ -0,0 +1,57 @@
|
||||
#!/bin/bash -eux
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Generate the compressed root directory for WSL
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
echo "Skipping minimized $0 build as WSL systems are designed to be interactive"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case $ARCH in
|
||||
amd64|arm64)
|
||||
;;
|
||||
*)
|
||||
echo "WSL root tarballs are not generated for $ARCH."
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
if [ -n "${SUBARCH:-}" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
. config/functions
|
||||
|
||||
rootfs_dir=wslroot.dir
|
||||
|
||||
# This is the directory created by create-root-dir.binary
|
||||
cp -a rootfs.dir $rootfs_dir
|
||||
|
||||
setup_mountpoint $rootfs_dir
|
||||
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $rootfs_dir apt-get -y -qq install ubuntu-wsl
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $rootfs_dir apt-get autoremove --purge --assume-yes
|
||||
|
||||
create_manifest $rootfs_dir livecd.ubuntu-cpc.wsl.rootfs.manifest
|
||||
teardown_mountpoint $rootfs_dir
|
||||
|
||||
# remove attributes not supported by WSL's tar
|
||||
if [ -d $rootfs_dir/var/log/journal ]; then
|
||||
setfattr -x system.posix_acl_access $rootfs_dir/var/log/journal
|
||||
setfattr -x system.posix_acl_default $rootfs_dir/var/log/journal
|
||||
fi
|
||||
|
||||
# The reason not using just tar .. -C $rootfs_dir . is that using '.' was found
|
||||
# not working once and checking if using the simpler command is safe needs
|
||||
# verification of the app installation on all Windows 10 builds we support
|
||||
# with WSL.
|
||||
cd $rootfs_dir
|
||||
tar --xattrs --sort=name -czf ../livecd.ubuntu-cpc.wsl.rootfs.tar.gz *
|
||||
cd ..
|
||||
|
||||
rm -rf $rootfs_dir
|
8
live-build/ubuntu-cpc/hooks.d/chroot/100-preserve-apt-prefs.chroot
Executable file
8
live-build/ubuntu-cpc/hooks.d/chroot/100-preserve-apt-prefs.chroot
Executable file
@ -0,0 +1,8 @@
|
||||
#! /bin/sh -ex
|
||||
|
||||
# live-build "helpfully" removes /etc/apt/preferences.d/* so we put a
|
||||
# copy somewhere it won't touch it.
|
||||
|
||||
if [ -n "$(ls -A /etc/apt/preferences.d)" ]; then
|
||||
cp -a /etc/apt/preferences.d /etc/apt/preferences.d.save
|
||||
fi
|
13
live-build/ubuntu-cpc/hooks.d/chroot/100-purge-grub-legacy-ec2-arm.chroot
Executable file
13
live-build/ubuntu-cpc/hooks.d/chroot/100-purge-grub-legacy-ec2-arm.chroot
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/sh -eux
|
||||
|
||||
# Only execute the hack for ARM images
|
||||
# ARCH is not available in .chroot hooks so we need to get the architecture
|
||||
# manually.
|
||||
arch=$(dpkg --print-architecture)
|
||||
if [ "$arch" != "armhf" ] && [ "$arch" != "arm64" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
# Why is grub-legacy-ec2 even on the image?
|
||||
apt-get remove --yes --purge grub-legacy-ec2 || true
|
@ -99,27 +99,9 @@ if [ -n "${root_fs_label}" ]; then
|
||||
sed -i "s,^[^#${bl}]*\([${bl}]*/[${bl}].*\),${lstr}\1," "${rootd}/etc/fstab"
|
||||
fi
|
||||
cat > /etc/fstab << EOM
|
||||
LABEL=cloudimg-rootfs / ext4 defaults 0 0
|
||||
LABEL=cloudimg-rootfs / ext4 defaults 0 1
|
||||
EOM
|
||||
|
||||
## Make sure that the update-motd.d directory exists
|
||||
[ ! -e "${rootd}/etc/update-motd.d" ] &&
|
||||
mkdir -p "${rootd}/etc/update-motd.d"
|
||||
|
||||
## write a MOTD file advertising support for images
|
||||
cat > "${rootd}/etc/update-motd.d/51-cloudguest" << EOF
|
||||
#!/bin/sh
|
||||
#
|
||||
${CLOUD_IMG_STR}
|
||||
# This file is not managed by a package. If you no longer want to
|
||||
# see this message you can safely remove the file.
|
||||
echo ""
|
||||
echo " Get cloud support with Ubuntu Advantage Cloud Guest:"
|
||||
echo " http://www.ubuntu.com/business/services/cloud"
|
||||
EOF
|
||||
|
||||
chmod +x "${rootd}/etc/update-motd.d/51-cloudguest"
|
||||
|
||||
# for quantal and newer, add /etc/overlayroot.local.conf
|
||||
# but do not overwrite anything that somehow got there
|
||||
if [ -f "${rootd}/etc/overlayroot.conf" ] &&
|
||||
@ -160,22 +142,17 @@ fi
|
||||
#### END COMMON ARCH FUNCTIONS
|
||||
|
||||
|
||||
case $arch in
|
||||
# ARM images are special
|
||||
armhf|arm64)
|
||||
# Dirty hack because SUBARCH doesn't exist when running chroot hooks,
|
||||
# and we don't want raspi2 images to depend on a cloud data source:
|
||||
if _xchroot "${rootd}" dpkg -l linux-image-raspi2 2>/dev/null | grep -q '^.i'; then
|
||||
fake_cloud_init
|
||||
fi
|
||||
case $arch+${SUBARCH:-} in
|
||||
# raspi2 images are special
|
||||
armhf+raspi2)
|
||||
fake_cloud_init
|
||||
echo "Image architecture is ARM. Existing vmbuilder-fixups"
|
||||
exit 0
|
||||
;;
|
||||
|
||||
echo "Image architecture is ARM. Existing vmbuilder-fixups"
|
||||
|
||||
exit 0
|
||||
;;
|
||||
powerpc|ppc64el|s390x)
|
||||
exit 0
|
||||
;;
|
||||
armhf+*|arm64+*|powerpc+*|ppc64el+*|s390x+*)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
psuedo_grub_probe() {
|
37
live-build/ubuntu-cpc/hooks.d/chroot/999-ubuntu-image-customization.chroot
Executable file
37
live-build/ubuntu-cpc/hooks.d/chroot/999-ubuntu-image-customization.chroot
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
. /root/config/chroot
|
||||
|
||||
# Specific ubuntu-image chroot configuration goes here.
|
||||
# Currently the none IMAGEFORMAT is equivalent to building a rootfs for an image
|
||||
# for a physical device, not a cloud instance. For such images we want some
|
||||
# special cloud-init configuration that should not be happening for cloud
|
||||
# cases. This check should be changed to a per-MODEL/per-SUBARCH check once
|
||||
# we support building cloud images via ubuntu-image.
|
||||
if [ "$IMAGEFORMAT" == "none" ]; then
|
||||
# If the cloud does not provide a meta-data service this should be run
|
||||
# This will setup a nocloud datasource on the boot partition.
|
||||
# This is the default behavior for our ubuntu-image built preinstalled
|
||||
# images.
|
||||
cat << EOF > /etc/cloud/cloud.cfg.d/99-fake_cloud.cfg
|
||||
# configure cloud-init for NoCloud
|
||||
datasource_list: [ NoCloud, None ]
|
||||
datasource:
|
||||
NoCloud:
|
||||
fs_label: system-boot
|
||||
EOF
|
||||
mkdir -p /etc/systemd/system/cloud-init-local.service.d
|
||||
cat << EOF > /etc/systemd/system/cloud-init-local.service.d/mount-seed.conf
|
||||
# Ensure our customized seed location is mounted prior to execution
|
||||
|
||||
[Unit]
|
||||
RequiresMountsFor=/boot/firmware
|
||||
EOF
|
||||
mkdir -p /etc/systemd/system/cloud-config.service.d
|
||||
cat << EOF > /etc/systemd/system/cloud-config.service.d/getty-wait.conf
|
||||
# Wait for cloud-init to finish (creating users, etc.) before running getty
|
||||
|
||||
[Unit]
|
||||
Before=getty.target
|
||||
EOF
|
||||
fi
|
280
live-build/ubuntu-cpc/hooks.d/make-hooks
Executable file
280
live-build/ubuntu-cpc/hooks.d/make-hooks
Executable file
@ -0,0 +1,280 @@
|
||||
#!/usr/bin/env python3
|
||||
#-*- encoding: utf-8 -*-
|
||||
"""
|
||||
This script parses a series file and its dependencies and generates a hooks
|
||||
folder containing symbolic links to the scripts that need to be invoked for
|
||||
a given image target set.
|
||||
|
||||
For example, if you wish to build the image target sets "vmdk" and "vagrant",
|
||||
you would call this script as
|
||||
|
||||
./make-hooks --hooks-dir hooks vmdk vagrant
|
||||
|
||||
Scripts live in subfolders below the "hooks.d" folder. Currently the folders
|
||||
"chroot" and "base" exist. The folder with the name "extra" is reserved for
|
||||
private scripts, which are not included in the source of livecd-rootfs. The
|
||||
scripts are not numbered, instead the order of their execution depends on the
|
||||
order in which they are listed in a series file.
|
||||
|
||||
Series files are placed into the subfolders "base/series" or "extra/series".
|
||||
Each series file contains a list of scripts to be executed. Empty lines and
|
||||
lines starting with a '#' are ignored. Series files in "extra/series" override
|
||||
files in "base/series" with the same name. For example, if a series file
|
||||
"base/series/cloudA" exists and a series file "extra/series/cloudA", then the
|
||||
latter will be preferred.
|
||||
|
||||
A series file in "extra/series" may also list scripts that are located in the
|
||||
"chroot" and "base" folders. In addition, series files can depend on other
|
||||
series files. For example, the series files for most custom images look similar
|
||||
to this:
|
||||
|
||||
depends disk-image
|
||||
depends extra-settings
|
||||
extra/cloudB.binary
|
||||
provides livecd.ubuntu-cpc.disk-kvm.img
|
||||
provides livecd.ubuntu-cpc.disk-kvm.manifest
|
||||
|
||||
Where "disk-image" and "extra-settings" may list scripts and dependencies which
|
||||
are to be processed before the script "extra/cloudB.binary" is called.
|
||||
|
||||
The "provides" directive defines a file that the hook creates; it can be
|
||||
specified multiple times. The field is used by this script to generate a list
|
||||
of output files created explicitly by the named image targets. The list is
|
||||
saved to the "explicit_provides" file in the hooks output directory. In
|
||||
the case of the "all" target this list would be empty. This list is
|
||||
consumed by the "remove-implicit-artifacts" which is run at the end of the build.
|
||||
|
||||
ACHTUNG: live build runs scripts with the suffix ".chroot" in a batch separate
|
||||
from scripts ending in ".binary". Even if you arrange them interleaved in your
|
||||
series files, the chroot scripts will be run before the binary scripts.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
SCRIPT_DIR = os.path.normpath(os.path.dirname(os.path.realpath(sys.argv[0])))
|
||||
HOOKS_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "hooks"))
|
||||
|
||||
EXIT_OK = 0
|
||||
EXIT_ERR = 1
|
||||
|
||||
class MakeHooksError(Exception):
|
||||
pass
|
||||
|
||||
class MakeHooks:
|
||||
"""This class provides series file parsing and symlink generator
|
||||
functionality."""
|
||||
|
||||
def __init__(self, hooks_dir=None, quiet=False):
|
||||
"""The hooks_dir parameter can be used to specify the path to the
|
||||
directory, into which the hook symlinks to the actual script files
|
||||
should be placed.
|
||||
|
||||
If quiet is set to True, info messages during symlink creation will
|
||||
be suppressed. Use this if your build is not private, but you would
|
||||
like to hide which scripts are being run.
|
||||
"""
|
||||
self._script_dir = SCRIPT_DIR
|
||||
self._hooks_dir = hooks_dir or HOOKS_DIR
|
||||
self._quiet = quiet
|
||||
self._hooks_list = []
|
||||
self._included = set()
|
||||
self._provides = []
|
||||
|
||||
def reset(self):
|
||||
"""Reset the internal state allowing instance to be reused for
|
||||
another run."""
|
||||
self._hooks_list.clear()
|
||||
self._included.clear()
|
||||
|
||||
def print_usage(self):
|
||||
print(
|
||||
"CPC live build hook generator script \n"
|
||||
" \n"
|
||||
"Usage: ./make-hooks.sh [OPTIONS] <image_set> \n"
|
||||
" \n"
|
||||
"Options: \n"
|
||||
" \n"
|
||||
" --help, -h Show this message and exit. \n"
|
||||
" --quiet, -q Only show warnings and error messages. \n"
|
||||
" --hooks-dir, -d <dir> The directory where to write the symlinks.\n"
|
||||
)
|
||||
|
||||
def find_series_file(self, image_set):
|
||||
"""Search for the series file requested in the image_set parameter.
|
||||
|
||||
The image_set parameter should be a string containing the name of an
|
||||
image target set represented by a series file. First the "extra/series"
|
||||
folder is searched followed by the "base/series" folder.
|
||||
|
||||
When a file with the given name is found, the search stops and the
|
||||
full path to the file is returned.
|
||||
"""
|
||||
for subdir in ["extra", "base"]:
|
||||
series_file = os.path.join(self._script_dir, subdir, "series",
|
||||
image_set)
|
||||
if os.path.isfile(series_file):
|
||||
return series_file
|
||||
return None
|
||||
|
||||
def make_hooks(self, image_sets):
|
||||
"""Entry point for parsing series files and their dependencies and
|
||||
for generating the symlinks in the hooks folder.
|
||||
|
||||
The image_sets parameter must be an iterable containing the names of
|
||||
the series files representing the corresponding image target sets,
|
||||
e.g. "vmdk" or "vagrant".
|
||||
"""
|
||||
self.collect_chroot_hooks()
|
||||
self.collect_binary_hooks(image_sets, explicit_sets=True)
|
||||
self.create_symlinks()
|
||||
self.create_explicit_provides()
|
||||
|
||||
def collect_chroot_hooks(self):
|
||||
"""Chroot hooks are numbered and not explicitly mentioned in series
|
||||
files. Collect them, sort them and add them to the internal list of
|
||||
paths to hook sripts.
|
||||
"""
|
||||
chroot_hooks_dir = os.path.join(self._script_dir, "chroot")
|
||||
|
||||
chroot_entries = os.listdir(chroot_hooks_dir)
|
||||
chroot_entries.sort()
|
||||
|
||||
for entry in chroot_entries:
|
||||
if not (entry.endswith(".chroot_early") or
|
||||
entry.endswith(".chroot")):
|
||||
continue
|
||||
self._hooks_list.append(os.path.join("chroot", entry))
|
||||
|
||||
def collect_binary_hooks(self, image_sets, explicit_sets=False):
|
||||
"""Search the series files for the given image_sets and parse them
|
||||
and their dependencies to generate a list of hook scripts to be run
|
||||
during image build.
|
||||
|
||||
The image_sets parameter must be an iterable containing the names of
|
||||
the series files representing the corresponding image target sets,
|
||||
e.g. "vmdk" or "vagrant".
|
||||
|
||||
Populates the internal list of paths to hook scripts in the order in
|
||||
which the scripts are to be run.
|
||||
|
||||
If "explicit_sets" is True, the files specified on lines starting
|
||||
with "provides" will be added to self._provides to track explicit
|
||||
output artifacts. This is only True for the initial images_sets
|
||||
list, dependent image sets should set this to False.
|
||||
"""
|
||||
for image_set in image_sets:
|
||||
series_file = self.find_series_file(image_set)
|
||||
|
||||
if not series_file:
|
||||
raise MakeHooksError(
|
||||
"Series file for image set '%s' not found." % image_set)
|
||||
|
||||
with open(series_file, "r", encoding="utf-8") as fp:
|
||||
for line in fp:
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
|
||||
m = re.match(r"^\s*depends\s+(\S+.*)$", line)
|
||||
if m:
|
||||
include_set = m.group(1)
|
||||
if include_set in self._included:
|
||||
continue
|
||||
self._included.add(include_set)
|
||||
self.collect_binary_hooks([include_set,])
|
||||
continue
|
||||
|
||||
m = re.match(r"^\s*provides\s+(\S+.*)$", line)
|
||||
if m:
|
||||
if explicit_sets:
|
||||
self._provides.append(m.group(1))
|
||||
continue
|
||||
|
||||
if not line in self._hooks_list:
|
||||
self._hooks_list.append(line)
|
||||
|
||||
def create_symlinks(self):
|
||||
"""Once the internal list of hooks scripts has been populated by a
|
||||
call to collect_?_hooks, this method is used to populate the hooks
|
||||
folder with enumerated symbolic links to the hooks scripts. If the
|
||||
folder does not exist, it will be created. If it exists, it must be
|
||||
empty or a MakeHooksError will be thrown.
|
||||
"""
|
||||
if os.path.isdir(self._hooks_dir) and os.listdir(self._hooks_dir):
|
||||
# Only print a warning, because directory might have been created
|
||||
# by auto/config voodoo.
|
||||
sys.stderr.write("WARNING: Hooks directory exists and is not empty.\n")
|
||||
os.makedirs(self._hooks_dir, exist_ok=True)
|
||||
|
||||
for counter, hook in enumerate(self._hooks_list, start=1):
|
||||
hook_basename = os.path.basename(hook)
|
||||
|
||||
m = re.match(r"^\d+-(?:\d+-)?(?P<basename>.*)$", hook_basename)
|
||||
if m:
|
||||
hook_basename = m.group("basename")
|
||||
|
||||
linkname = ("%03d-" % counter) + hook_basename
|
||||
linkdest = os.path.join(self._hooks_dir, linkname)
|
||||
linksrc = os.path.relpath(os.path.join(self._script_dir, hook),
|
||||
self._hooks_dir)
|
||||
|
||||
if not self._quiet:
|
||||
print("[HOOK] %s => %s" % (linkname, hook))
|
||||
os.symlink(linksrc, linkdest)
|
||||
|
||||
def create_explicit_provides(self):
|
||||
"""
|
||||
Create a file named "explicit_provides" in self._script_dir
|
||||
listing all files named on "provides" in the series files of
|
||||
targets explicitly named by the user. The file is created but
|
||||
left empty if there are no explict "provides" keywords in the
|
||||
targets (this is the case for 'all')
|
||||
"""
|
||||
with open(os.path.join(self._script_dir, "explicit_provides"), "w",
|
||||
encoding="utf-8") as fp:
|
||||
empty = True
|
||||
for provides in self._provides:
|
||||
if not self._quiet:
|
||||
print("[PROVIDES] %s" % provides)
|
||||
fp.write("%s\n" % provides)
|
||||
empty = False
|
||||
if not empty:
|
||||
fp.write('livecd.magic-proxy.log\n')
|
||||
|
||||
def cli(self, args):
|
||||
"""Command line interface to the hooks generator."""
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("-q", "--quiet", dest="quiet", type=bool,
|
||||
help="Only show warnings and error messages.")
|
||||
parser.add_argument("-d", "--hooks-dir", dest="hooks_dir", type=str,
|
||||
help="The directory where to create the symlinks.")
|
||||
parser.add_argument("image_target", nargs="+", type=str,
|
||||
help="")
|
||||
|
||||
self.reset()
|
||||
options = parser.parse_args(args)
|
||||
|
||||
# Copy options to object attributes.
|
||||
for key, value in vars(options).items():
|
||||
if value and hasattr(self, "_" + key):
|
||||
setattr(self, "_" + key, value)
|
||||
|
||||
# Take remaining command line arguments, sanitize and turn into list.
|
||||
image_sets = re.sub(r";|,", " ", " ".join(options.image_target))\
|
||||
.split()
|
||||
|
||||
self.make_hooks(image_sets)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
MakeHooks().cli(sys.argv[1:])
|
||||
except MakeHooksError as e:
|
||||
sys.stderr.write("%s: %s\n" % (os.path.basename(sys.argv[0]), str(e)))
|
||||
sys.exit(EXIT_ERR)
|
41
live-build/ubuntu-cpc/hooks.d/remove-implicit-artifacts
Executable file
41
live-build/ubuntu-cpc/hooks.d/remove-implicit-artifacts
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python3
|
||||
#-*- encoding: utf-8 -*-
|
||||
"""
|
||||
Remove output files not created by explicitly specified image targets
|
||||
|
||||
This uses the 'explicit_provides' file generated by the 'make-hooks'
|
||||
script. If the file is empty, all output will be saved.
|
||||
"""
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
|
||||
if __name__ == "__main__":
|
||||
print('Running {}'.format(__file__))
|
||||
scriptname = os.path.basename(__file__)
|
||||
explicit = set()
|
||||
with open('./config/hooks.d/explicit_provides', 'r',
|
||||
encoding='utf-8') as fp:
|
||||
for filename in fp:
|
||||
explicit.add(filename.rstrip())
|
||||
|
||||
if not explicit:
|
||||
print('{}: explicit_provides is empty. '
|
||||
'All binary output will be included'.format(scriptname))
|
||||
sys.exit(0)
|
||||
|
||||
all = set(glob.glob('livecd.ubuntu-cpc.*'))
|
||||
implicit = all - explicit
|
||||
|
||||
print('{}: all artifacts considered: {}'.format(scriptname, all))
|
||||
print('{}: explict artifacts to keep: {}'.format(scriptname, explicit))
|
||||
print('{}: implicit artifacts to remove: {}'.format(scriptname, implicit))
|
||||
|
||||
for file in implicit:
|
||||
if os.path.islink(file):
|
||||
print('{}: unlinking {}'.format(scriptname, file))
|
||||
os.unlink(file)
|
||||
elif os.path.isfile(file):
|
||||
print('{}: removing {} '
|
||||
'{} bytes'.format(scriptname, file, os.stat(file).st_size))
|
||||
os.remove(file)
|
@ -1,49 +0,0 @@
|
||||
#!/bin/bash -ex
|
||||
# vi: ts=4 noexpandtab
|
||||
#
|
||||
# Generate a squashfs root and manifest
|
||||
|
||||
case $IMAGE_TARGETS in
|
||||
""|*squashfs*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipping squashfs build"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -n "$SUBARCH" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
. config/functions
|
||||
|
||||
mkdir binary/boot/squashfs.dir
|
||||
cp -a chroot/* binary/boot/squashfs.dir
|
||||
|
||||
setup_mountpoint binary/boot/squashfs.dir
|
||||
|
||||
chroot binary/boot/squashfs.dir dpkg-divert --local --rename /usr/sbin/grub-probe
|
||||
chroot binary/boot/squashfs.dir touch /usr/sbin/grub-probe
|
||||
chroot binary/boot/squashfs.dir chmod +x /usr/sbin/grub-probe
|
||||
|
||||
env DEBIAN_FRONTEND=noninteractive chroot binary/boot/squashfs.dir apt-get --purge remove --assume-yes '^linux-.*' 'linux-base+'
|
||||
env DEBIAN_FRONTEND=noninteractive chroot binary/boot/squashfs.dir apt-get --purge remove --assume-yes '^grub-.*'
|
||||
env DEBIAN_FRONTEND=noninteractive chroot binary/boot/squashfs.dir apt-get autoremove --purge --assume-yes
|
||||
rm -rf binary/boot/squashfs.dir/boot/grub
|
||||
chroot binary/boot/squashfs.dir mkdir -p /lib/modules
|
||||
|
||||
chroot binary/boot/squashfs.dir rm /usr/sbin/grub-probe
|
||||
chroot binary/boot/squashfs.dir dpkg-divert --remove --local --rename /usr/sbin/grub-probe
|
||||
|
||||
teardown_mountpoint binary/boot/squashfs.dir
|
||||
|
||||
squashfs_f="${PWD}/livecd.ubuntu-cpc.squashfs"
|
||||
squashfs_f_manifest="${squashfs_f}.manifest"
|
||||
|
||||
dpkg-query --admindir=binary/boot/squashfs.dir/var/lib/dpkg -W > ${squashfs_f_manifest}
|
||||
|
||||
(cd "binary/boot/squashfs.dir/" &&
|
||||
mksquashfs . ${squashfs_f} \
|
||||
-no-progress -xattrs -comp xz )
|
@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Execute extra binary hooks.
|
||||
|
||||
my_dir=$(dirname $(readlink -f ${0}))
|
||||
extra_d=${my_dir}/extra
|
||||
|
||||
if [ ! -d ${my_dir}/extra ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
export CLOUD_IMG_STR="$IMAGE_STR"
|
||||
export FS_LABEL="cloudimg-rootfs"
|
||||
|
||||
# Cleaner execution
|
||||
/bin/run-parts --exit-on-error --regex ".*\.binary" "${extra_d}"
|
@ -28,7 +28,7 @@ cp -a chroot/* binary/boot/squashfs.dir
|
||||
squashfs_f="${PWD}/livecd.${PROJECT}.squashfs"
|
||||
squashfs_f_manifest="${squashfs_f}.manifest"
|
||||
|
||||
dpkg-query --admindir=binary/boot/squashfs.dir/var/lib/dpkg -W > ${squashfs_f_manifest}
|
||||
create_manifest "binary/boot/squashfs.dir" "${squashfs_f_manifest}"
|
||||
|
||||
(cd "binary/boot/squashfs.dir/" &&
|
||||
mksquashfs . ${squashfs_f} \
|
||||
|
@ -1,91 +0,0 @@
|
||||
#!/bin/bash -ex
|
||||
# vi: ts=4 noexpandtab
|
||||
#
|
||||
# Generate a squashfs root and manifest
|
||||
|
||||
set -x
|
||||
|
||||
echo "031-maas-squashfs.binary"
|
||||
|
||||
case $IMAGE_TARGETS in
|
||||
""|*squashfs*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipping squashfs build"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -n "$SUBARCH" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
. config/functions
|
||||
. config/common
|
||||
|
||||
SQUASH_ROOT=binary/boot/squashfs.dir
|
||||
|
||||
RACK_ROOT=binary/boot/squashfs-rack.dir
|
||||
REGION_ROOT=binary/boot/squashfs-region.dir
|
||||
|
||||
OVERLAY_RACK_ROOT=binary/overlay-maas-rack
|
||||
OVERLAY_REGION_ROOT=binary/overlay-maas-region
|
||||
|
||||
mkdir -p "$RACK_ROOT"
|
||||
mkdir -p "$REGION_ROOT"
|
||||
|
||||
mkdir -p "$OVERLAY_RACK_ROOT"
|
||||
mkdir -p "$OVERLAY_REGION_ROOT"
|
||||
|
||||
# RACK is on top of the regular squashfs
|
||||
|
||||
mount_overlay "$SQUASH_ROOT/" "$OVERLAY_RACK_ROOT/" "$RACK_ROOT/" "RACK"
|
||||
|
||||
setup_mountpoint $RACK_ROOT
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $RACK_ROOT apt-get -y install maas-rack-controller
|
||||
rm -f $RACK_ROOT/etc/maas/rackd.conf
|
||||
chroot $RACK_ROOT sh -c 'echo RESET maas/default-maas-url | debconf-communicate maas-common'
|
||||
chroot $RACK_ROOT sh -c 'echo RESET maas-rack-controller/maas-url | debconf-communicate maas-common'
|
||||
chroot $RACK_ROOT sh -c 'echo RESET maas-rack-controller/maas-url | debconf-communicate maas-rack-controller'
|
||||
teardown_mountpoint $RACK_ROOT
|
||||
|
||||
umount "$RACK_ROOT"
|
||||
|
||||
# Full MAAS (Region) is on top of the rack
|
||||
# in overlayfs lowerdir are mounted right to left, colon separated
|
||||
mount_overlay "$OVERLAY_RACK_ROOT/:$SQUASH_ROOT/" "$OVERLAY_REGION_ROOT/" "$REGION_ROOT/" "REGION"
|
||||
|
||||
setup_mountpoint $REGION_ROOT
|
||||
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $REGION_ROOT apt-get -y install maas
|
||||
|
||||
# Forget how things had to be configured
|
||||
chroot $REGION_ROOT debconf -fnoninteractive -omaas-region-controller /var/lib/dpkg/info/maas-region-controller.prerm remove
|
||||
chroot $REGION_ROOT debconf -fnoninteractive -omaas-region-controller /var/lib/dpkg/info/maas-region-controller.postrm remove
|
||||
chroot $REGION_ROOT debconf -fnoninteractive -omaas-region-controller /var/lib/dpkg/info/maas-region-controller.postrm purge
|
||||
chroot $REGION_ROOT sh -c 'echo RESET maas/default-maas-url | debconf-communicate maas-common'
|
||||
chroot $REGION_ROOT sudo -u postgres psql -c 'alter role maas password null;'
|
||||
rm -f $REGION_ROOT/etc/maas/rackd.conf
|
||||
rm -f $REGION_ROOT/etc/maas/regiond.conf
|
||||
chroot $REGION_ROOT pg_ctlcluster $(/bin/ls $REGION_ROOT/var/lib/postgresql/) main stop || :
|
||||
|
||||
# After the install, one may call the below to "reconfigure" maas-region-controller
|
||||
## debconf -fnoninteractive -omaas-region-controller /var/lib/dpkg/info/maas-region-controller.config configure
|
||||
## debconf -fnoninteractive -omaas-region-controller /var/lib/dpkg/info/maas-region-controller.postinst configure
|
||||
|
||||
teardown_mountpoint "$REGION_ROOT"
|
||||
|
||||
umount "$REGION_ROOT"
|
||||
|
||||
squashfs_rack_f="${PWD}/livecd.${PROJECT}.maas-rack.squashfs"
|
||||
|
||||
(cd "$OVERLAY_RACK_ROOT/" &&
|
||||
mksquashfs . ${squashfs_rack_f} \
|
||||
-no-progress -xattrs -comp xz )
|
||||
|
||||
squashfs_region_f="${PWD}/livecd.${PROJECT}.maas-region.squashfs"
|
||||
|
||||
(cd "$OVERLAY_REGION_ROOT/" &&
|
||||
mksquashfs . ${squashfs_region_f} \
|
||||
-no-progress -xattrs -comp xz )
|
@ -23,14 +23,17 @@ fi
|
||||
|
||||
. config/functions
|
||||
. config/common
|
||||
# somehow i don't have LB_DISTRIBUTION set ?!
|
||||
. config/bootstrap
|
||||
|
||||
SQUASH_ROOT=binary/boot/squashfs.dir
|
||||
FILESYSTEM_ROOT=binary/boot/squashfs.dir
|
||||
INSTALLER_ROOT=binary/boot/installer.squashfs.dir
|
||||
OVERLAY_ROOT=binary/overlay
|
||||
|
||||
mkdir -p "$OVERLAY_ROOT"
|
||||
mkdir -p "$INSTALLER_ROOT" "$OVERLAY_ROOT"
|
||||
|
||||
# Create an installer squashfs layer
|
||||
mount_overlay "$SQUASH_ROOT/" "$OVERLAY_ROOT/" "$SQUASH_ROOT/"
|
||||
mount_overlay "$FILESYSTEM_ROOT/" "$OVERLAY_ROOT/" "$INSTALLER_ROOT/"
|
||||
|
||||
setup_mountpoint binary/boot/squashfs.dir
|
||||
|
||||
@ -40,59 +43,35 @@ setup_mountpoint binary/boot/squashfs.dir
|
||||
# It would be better to have this in ../includes.binary/overlay but
|
||||
# you can't have backslashes in filenames in bzr branches!
|
||||
DEVICE_UNIT_NAME='dev-disk-by\x2duuid-00c629d6\x2d06ab\x2d4dfd\x2db21e\x2dc3186f34105d.device'
|
||||
mkdir -p "$SQUASH_ROOT/etc/systemd/system/$DEVICE_UNIT_NAME.d"
|
||||
cat > "$SQUASH_ROOT/etc/systemd/system/$DEVICE_UNIT_NAME.d/override.conf" <<EOF
|
||||
mkdir -p "$INSTALLER_ROOT/etc/systemd/system/$DEVICE_UNIT_NAME.d"
|
||||
cat > "$INSTALLER_ROOT/etc/systemd/system/$DEVICE_UNIT_NAME.d/override.conf" <<EOF
|
||||
[Unit]
|
||||
JobRunningTimeoutSec=0s
|
||||
Wants=subiquity_config.mount
|
||||
EOF
|
||||
|
||||
# Prepare installer layer.
|
||||
|
||||
# Install any requirements for the installer, for things we don't want
|
||||
# to see on the installed system
|
||||
chroot $SQUASH_ROOT apt-get update
|
||||
chroot $SQUASH_ROOT apt-get -y install user-setup curtin lupin-casper
|
||||
# Install casper for live session magic.
|
||||
chroot $INSTALLER_ROOT apt-get -y install lupin-casper
|
||||
# Install linux-firmware for kernel to upload into hardware.
|
||||
chroot $INSTALLER_ROOT apt-get -y install linux-firmware
|
||||
chroot $INSTALLER_ROOT apt-get clean
|
||||
|
||||
# For bug #1743643 "Install to dirty disk with swap fails" remove the
|
||||
# "helpful" casper script that mounts any swap partitions it finds.
|
||||
rm -f $SQUASH_ROOT/usr/share/initramfs-tools/scripts/casper-bottom/*swap
|
||||
|
||||
# Installing casper means we need a new initramfs
|
||||
UPDATE_INITRAMFS_OPTIONS=CASPER_GENERATE_UUID=1 recreate_initramfs $SQUASH_ROOT
|
||||
rm -f $INSTALLER_ROOT/usr/share/initramfs-tools/scripts/casper-bottom/*swap
|
||||
|
||||
# Don't let cloud-init run in the live session.
|
||||
touch $SQUASH_ROOT/etc/cloud/cloud-init.disabled
|
||||
touch $INSTALLER_ROOT/etc/cloud/cloud-init.disabled
|
||||
|
||||
# Do the snap seeding dance.
|
||||
chroot $SQUASH_ROOT mkdir -p /var/lib/snapd/seed/snaps /var/lib/snapd/seed/assertions
|
||||
chroot $SQUASH_ROOT sh -c '
|
||||
set -x;
|
||||
cd /var/lib/snapd/seed;
|
||||
sudo SNAPPY_STORE_NO_CDN=1 snap download core;
|
||||
sudo SNAPPY_STORE_NO_CDN=1 snap download subiquity;
|
||||
# Preseed subiquity into installer layer
|
||||
snap_prepare $INSTALLER_ROOT
|
||||
snap_preseed $INSTALLER_ROOT subiquity/classic
|
||||
# Drop lxd from the installer layer preseed
|
||||
sed -i -e'N;/name: lxd/,+2d' $INSTALLER_ROOT/var/lib/snapd/seed/seed.yaml
|
||||
|
||||
CORE_SNAP=$(ls -1 core*.snap);
|
||||
SUBIQUITY_SNAP=$(ls -1 subiquity*.snap);
|
||||
|
||||
mv *.assert /var/lib/snapd/seed/assertions/;
|
||||
mv *.snap /var/lib/snapd/seed/snaps/;
|
||||
|
||||
cat <<EOF > /var/lib/snapd/seed/seed.yaml
|
||||
snaps:
|
||||
- name: core
|
||||
channel: stable
|
||||
file: ${CORE_SNAP}
|
||||
- name: subiquity
|
||||
channel: stable
|
||||
classic: true
|
||||
file: ${SUBIQUITY_SNAP}
|
||||
EOF
|
||||
'
|
||||
|
||||
teardown_mountpoint "$SQUASH_ROOT"
|
||||
|
||||
# Then unmount the overlay
|
||||
umount "$SQUASH_ROOT"
|
||||
teardown_mountpoint "$INSTALLER_ROOT"
|
||||
|
||||
squashfs_f="${PWD}/livecd.${PROJECT}.installer.squashfs"
|
||||
|
||||
|
90
live-build/ubuntu-server/hooks/033-kernel-bits.binary
Normal file
90
live-build/ubuntu-server/hooks/033-kernel-bits.binary
Normal file
@ -0,0 +1,90 @@
|
||||
#!/bin/bash -eux
|
||||
# vi: ts=4 noexpandtab
|
||||
#
|
||||
# Generate a squashfs root and manifest
|
||||
|
||||
echo "033-kernel-bits.binary"
|
||||
|
||||
case ${IMAGE_TARGETS-} in
|
||||
""|*squashfs*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipping squashfs build"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -n "${SUBARCH-}" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
. config/functions
|
||||
. config/common
|
||||
|
||||
INSTALLER_ROOT=binary/boot/installer.squashfs.dir
|
||||
|
||||
KERNEL_BITS_ROOT=binary/boot/kernel-bits.dir
|
||||
KERNEL_BITS_OVERLAY=binary/boot/overlay-kernel-bits
|
||||
|
||||
variants='ga hwe'
|
||||
|
||||
for variant in $variants; do
|
||||
if [ "$variant" = "ga" ]; then
|
||||
kernel_metapkg=linux-generic
|
||||
flavor=generic
|
||||
elif [ "$variant" = "hwe" ]; then
|
||||
kernel_metapkg=linux-generic-hwe-$(lsb_release -sr)
|
||||
flavor=generic-hwe
|
||||
else
|
||||
echo "bogus variant: $variant"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make preparations
|
||||
mkdir -p $KERNEL_BITS_ROOT $KERNEL_BITS_OVERLAY
|
||||
mount_overlay "$INSTALLER_ROOT/" "$KERNEL_BITS_OVERLAY/" "$KERNEL_BITS_ROOT/"
|
||||
setup_mountpoint $KERNEL_BITS_ROOT
|
||||
|
||||
# Our initramfs hook implements a kind of extreme version of
|
||||
# cloud-initramfs-copymods, so remove that and prevent duelling hooks
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $KERNEL_BITS_ROOT apt-get -y remove cloud-initramfs-copymods ubuntu-server || true
|
||||
|
||||
# Configure initramfs creation
|
||||
mkdir -p "$KERNEL_BITS_ROOT"/etc/initramfs-tools/conf.d/
|
||||
if [ -n "$LB_INITRAMFS_COMPRESSION" ]; then
|
||||
echo "COMPRESS=$LB_INITRAMFS_COMPRESSION" > "$KERNEL_BITS_ROOT"/etc/initramfs-tools/conf.d/livecd-rootfs.conf
|
||||
fi
|
||||
echo "export CASPER_GENERATE_UUID=1" > "$KERNEL_BITS_ROOT"/etc/initramfs-tools/conf.d/casper.conf
|
||||
|
||||
# Add a hook to record which kernel was booted and mount the
|
||||
# modules.squashfs created below.
|
||||
cat <<EOF > "$KERNEL_BITS_ROOT"/etc/initramfs-tools/scripts/init-bottom/live-server
|
||||
#!/bin/sh
|
||||
case \$1 in
|
||||
prereqs) exit 0;;
|
||||
esac
|
||||
|
||||
echo ${kernel_metapkg} > /run/kernel-meta-package
|
||||
mkdir -p \$rootmnt/lib/modules
|
||||
mount \$rootmnt/cdrom/casper/extras/modules.squashfs-$flavor \$rootmnt/lib/modules
|
||||
EOF
|
||||
chmod +x "$KERNEL_BITS_ROOT"/etc/initramfs-tools/scripts/init-bottom/live-server
|
||||
|
||||
# Install the kernel!
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $KERNEL_BITS_ROOT apt-get -y install ${kernel_metapkg}
|
||||
|
||||
# Fish out generated kernel image and initrd
|
||||
mv "$KERNEL_BITS_ROOT"/boot/initrd.img-* ${PWD}/livecd.${PROJECT}.initrd-$flavor
|
||||
mv "$KERNEL_BITS_ROOT"/boot/vmlinu?-* ${PWD}/livecd.${PROJECT}.kernel-$flavor
|
||||
|
||||
# Create squashfs containing all the modules
|
||||
modules_squashfs_path="${PWD}/livecd.${PROJECT}.modules.squashfs-$flavor"
|
||||
(cd "$KERNEL_BITS_ROOT/lib/modules" &&
|
||||
mksquashfs . $modules_squashfs_path -no-progress -xattrs -comp xz)
|
||||
|
||||
# And clean up
|
||||
teardown_mountpoint $KERNEL_BITS_ROOT
|
||||
umount $KERNEL_BITS_ROOT
|
||||
rm -rf $KERNEL_BITS_ROOT $KERNEL_BITS_OVERLAY
|
||||
done
|
0
live-build/ubuntu-server/includes.binary/boot/.keep
Normal file
0
live-build/ubuntu-server/includes.binary/boot/.keep
Normal file
@ -7,9 +7,7 @@ network:
|
||||
match:
|
||||
name: "en*"
|
||||
dhcp4: true
|
||||
optional: true
|
||||
all-eth:
|
||||
match:
|
||||
name: "eth*"
|
||||
dhcp4: true
|
||||
optional: true
|
||||
|
@ -0,0 +1,2 @@
|
||||
[Journal]
|
||||
RateLimitIntervalSec=0
|
@ -1 +0,0 @@
|
||||
../subiquity_config.mount
|
@ -1,5 +0,0 @@
|
||||
[Mount]
|
||||
What=/cdrom/casper/maas/maas-rack.squashfs
|
||||
Where=/media/rack.lower
|
||||
Type=squashfs
|
||||
Options=ro
|
@ -1,5 +0,0 @@
|
||||
[Mount]
|
||||
What=/cdrom/casper/maas/maas-region.squashfs
|
||||
Where=/media/region.lower
|
||||
Type=squashfs
|
||||
Options=ro
|
@ -7,9 +7,9 @@ ConditionPathExists=!/run/subiquity/complete
|
||||
StartLimitInterval=0
|
||||
|
||||
[Service]
|
||||
Environment=PYTHONPATH=/usr/share/subiquity
|
||||
Environment=SNAP_REEXEC=0
|
||||
ExecStartPre=/bin/systemctl stop serial-getty@%I
|
||||
ExecStart=/sbin/agetty -n --keep-baud -l /snap/bin/subiquity --login-options "--serial" 115200,38400,9600 %I $TERM
|
||||
ExecStart=/usr/bin/snap run subiquity.subiquity-service %I
|
||||
ExecStopPost=/bin/systemctl start serial-getty@%I
|
||||
Type=idle
|
||||
Restart=always
|
||||
|
@ -0,0 +1 @@
|
||||
/bin/true
|
1
live-build/ubuntu/hooks/033-disk-image-uefi.binary
Symbolic link
1
live-build/ubuntu/hooks/033-disk-image-uefi.binary
Symbolic link
@ -0,0 +1 @@
|
||||
../../ubuntu-cpc/hooks.d/base/disk-image-uefi.binary
|
131
live-build/ubuntu/hooks/040-hyperv-desktop-images.binary
Normal file
131
live-build/ubuntu/hooks/040-hyperv-desktop-images.binary
Normal file
@ -0,0 +1,131 @@
|
||||
#!/bin/bash -eux
|
||||
|
||||
echo "Creating Hyper-V image with Desktop..."
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
echo "We don't create minimized images for $0."
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "${ARCH}" in
|
||||
amd64)
|
||||
;;
|
||||
*)
|
||||
echo "Hyper-V only supports amd64";
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
IMAGE_STR="# DESKTOP_IMG: This file was created/modified by the Desktop Image build process"
|
||||
FS_LABEL="desktop-rootfs"
|
||||
|
||||
. config/functions
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
create_derivative uefi hyperv
|
||||
scratch_d=$(mktemp -d)
|
||||
mount_disk_image "${derivative_img}" "${scratch_d}"
|
||||
|
||||
cleanup_hyperv() {
|
||||
umount_disk_image ${scratch_d}
|
||||
rm -rf ${scratch_d}
|
||||
}
|
||||
trap cleanup_hyperv EXIT
|
||||
|
||||
# Perform customisations
|
||||
|
||||
chroot "${scratch_d}" apt-get update -y
|
||||
chroot "${scratch_d}" apt-get -y install xrdp linux-tools-virtual linux-cloud-tools-virtual
|
||||
|
||||
cat > ${scratch_d}/etc/modules-load.d/hyperv.conf << EOF
|
||||
${IMAGE_STR}
|
||||
hv_sock
|
||||
EOF
|
||||
|
||||
cat << EOF >> "${scratch_d}/etc/fstab"
|
||||
LABEL=$FS_LABEL / ext4 defaults 0 1
|
||||
EOF
|
||||
|
||||
# Customise xrdp
|
||||
|
||||
CHANGED_FILE_SUFFIX=.replaced-by-desktop-img-build
|
||||
|
||||
# use vsock transport.
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/use_vsock=false/use_vsock=true/g' "${scratch_d}/etc/xrdp/xrdp.ini"
|
||||
# use rdp security.
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/security_layer=negotiate/security_layer=rdp/g' "${scratch_d}/etc/xrdp/xrdp.ini"
|
||||
# remove encryption validation.
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/crypt_level=high/crypt_level=none/g' "${scratch_d}/etc/xrdp/xrdp.ini"
|
||||
# disable bitmap compression since its local its much faster
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/bitmap_compression=true/bitmap_compression=false/g' "${scratch_d}/etc/xrdp/xrdp.ini"
|
||||
|
||||
# Add script to setup the ubuntu session properly
|
||||
cat > "${scratch_d}/etc/xrdp/startubuntu.sh" << EOF
|
||||
#!/bin/sh
|
||||
${IMAGE_STR}
|
||||
export GNOME_SHELL_SESSION_MODE=ubuntu
|
||||
export XDG_CURRENT_DESKTOP=ubuntu:GNOME
|
||||
exec /etc/xrdp/startwm.sh
|
||||
EOF
|
||||
chmod a+x "${scratch_d}/etc/xrdp/startubuntu.sh"
|
||||
|
||||
# use the script to setup the ubuntu session
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/startwm/startubuntu/g' "${scratch_d}/etc/xrdp/sesman.ini"
|
||||
|
||||
# rename the redirected drives to 'shared-drives'
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/FuseMountName=thinclient_drives/FuseMountName=shared-drives/g' "${scratch_d}/etc/xrdp/sesman.ini"
|
||||
|
||||
# Changed the allowed_users
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/allowed_users=console/allowed_users=anybody/g' "${scratch_d}/etc/X11/Xwrapper.config"
|
||||
|
||||
# Blacklist the vmw module
|
||||
cat > "${scratch_d}/etc/modprobe.d/blacklist_vmw_vsock_vmci_transport.conf" << EOF
|
||||
${IMAGE_STR}
|
||||
blacklist vmw_vsock_vmci_transport
|
||||
EOF
|
||||
|
||||
# Configure the policy xrdp session
|
||||
cat > ${scratch_d}/etc/polkit-1/localauthority/50-local.d/45-allow-colord.pkla << EOF
|
||||
${IMAGE_STR}
|
||||
[Allow Colord all Users]
|
||||
Identity=unix-user:*
|
||||
Action=org.freedesktop.color-manager.create-device;org.freedesktop.color-manager.create-profile;org.freedesktop.color-manager.delete-device;org.freedesktop.color-manager.delete-profile;org.freedesktop.color-manager.modify-device;org.freedesktop.color-manager.modify-profile
|
||||
ResultAny=no
|
||||
ResultInactive=no
|
||||
ResultActive=yes
|
||||
EOF
|
||||
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's|After=|ConditionPathExists=!/var/lib/oem-config/run\nAfter=|g' "${scratch_d}/lib/systemd/system/xrdp.service"
|
||||
|
||||
# End xrdp customisation
|
||||
|
||||
# Don't run gnome-initial-setup from gdm
|
||||
sed -i${CHANGED_FILE_SUFFIX} "s|#WaylandEnable=false|#WaylandEnable=false\nInitialSetupEnable=false|" "${scratch_d}/etc/gdm3/custom.conf"
|
||||
chroot "${scratch_d}" /usr/sbin/useradd -d /home/oem -m -N -u 29999 oem
|
||||
chroot "${scratch_d}" /usr/sbin/oem-config-prepare --quiet
|
||||
touch "${scratch_d}/var/lib/oem-config/run"
|
||||
|
||||
chroot "${scratch_d}" apt-get clean
|
||||
|
||||
# End customisations
|
||||
|
||||
cleanup_hyperv
|
||||
trap - EXIT
|
||||
|
||||
raw_img=binary/boot/disk-hyperv-uefi.ext4
|
||||
vhd_img=livecd.ubuntu-desktop-hyperv.vhdx
|
||||
|
||||
qemu-img convert -O vhdx "$raw_img" "$vhd_img"
|
||||
rm "$raw_img"
|
||||
|
||||
apt-get install -y zip
|
||||
zip "$vhd_img.zip" "$vhd_img"
|
||||
|
||||
create_manifest chroot "$vhd_img.zip.manifest"
|
||||
|
||||
rm "$vhd_img"
|
9
live-build/ubuntu/includes.chroot/etc/hosts
Normal file
9
live-build/ubuntu/includes.chroot/etc/hosts
Normal file
@ -0,0 +1,9 @@
|
||||
127.0.0.1 localhost.localdomain localhost
|
||||
::1 localhost6.localdomain6 localhost6
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
ff02::3 ip6-allhosts
|
1
lp-in-release
Symbolic link
1
lp-in-release
Symbolic link
@ -0,0 +1 @@
|
||||
magic-proxy
|
971
magic-proxy
Executable file
971
magic-proxy
Executable file
@ -0,0 +1,971 @@
|
||||
#!/usr/bin/python3 -u
|
||||
#-*- encoding: utf-8 -*-
|
||||
"""
|
||||
This script can be called as "lp-in-release" or as "magic-proxy". When called
|
||||
under the former name, it acts as a CLI tool, when called under the latter name
|
||||
it will act as a transparent HTTP proxy.
|
||||
|
||||
The CLI tool parses the directory listing of
|
||||
|
||||
http://<mirror>/dists/suite/by-hash/SHA256
|
||||
|
||||
and figures out which hashes belong to an InRelease file. For example, to list
|
||||
all available hashes for "cosmic" run
|
||||
|
||||
./lp-in-release list --suite cosmic
|
||||
|
||||
Per default the script scans archive.ubuntu.com, but you can tell it to use a
|
||||
different mirror with the --mirror-url command line parameter. Analogously, you
|
||||
can list the hashes for "cosmic-updates" or "cosmic-security". The script can
|
||||
also find the hash that was valid at a given timestamp via
|
||||
|
||||
./lp-in-release select --suite cosmic --cutoff-time <timestamp>
|
||||
|
||||
Finally, you can use the script to inject inrelease-path settings into a
|
||||
sources.list file via
|
||||
|
||||
./lp-in-release inject --cutoff-time <timestamp> /etc/apt/sources.list
|
||||
|
||||
The proxy is just an extension to this functionality. Whenever a URL points at
|
||||
an InRelease file or a path listed in an InRelease file, the proxy will
|
||||
automatically inject the by hash URL for the resource according to the timestamp
|
||||
it was configured for. The proxy works in transparent and non-transparent mode.
|
||||
"""
|
||||
from datetime import datetime, timedelta, tzinfo
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import fcntl
|
||||
import getopt
|
||||
import hashlib
|
||||
import http.client
|
||||
import http.server
|
||||
import json
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
import shutil
|
||||
import socketserver
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
|
||||
EXIT_OK = 0
|
||||
EXIT_ERR = 1
|
||||
|
||||
class LPInReleaseBaseError(Exception):
|
||||
pass
|
||||
|
||||
class LPInReleaseIndexError(LPInReleaseBaseError):
|
||||
pass
|
||||
|
||||
class LPInReleaseCacheError(LPInReleaseBaseError):
|
||||
pass
|
||||
|
||||
class LPInReleaseProxyError(LPInReleaseBaseError):
|
||||
pass
|
||||
|
||||
class InRelease:
|
||||
"""This class represents an InRelease file."""
|
||||
|
||||
def __init__(self, mirror, suite, data, hash_=None, last_modified=None):
|
||||
"""mirror must contain the proper URL of the package repository up to
|
||||
the "dists" folder, e.g.
|
||||
|
||||
http://archive.ubuntu.com/ubuntu
|
||||
|
||||
suite is the name of the suite this InRelease file belongs to, e.g.
|
||||
<release>, <release>-updates or <release>-security.
|
||||
|
||||
data must contain the full contents of the InReleaes file as a unicode
|
||||
string.
|
||||
|
||||
If supplied, then hash_ will be used as the sha256 hexdigest of the
|
||||
binary encoding of the InRelease file. If not supplied, the hash will
|
||||
be calculated. This is just used as a time-saver, when cache contents
|
||||
are read back in.
|
||||
|
||||
last_modified must be a string of format
|
||||
|
||||
Thu, 26 Apr 2018 23:37:48 UTC
|
||||
|
||||
representing the publication time of the InRelease file. If not given,
|
||||
the generation time stored in the InRelease file will be used. Below,
|
||||
this is set explicitly to correspond to the Last-Modified header spat
|
||||
out by the Web server.
|
||||
"""
|
||||
self.mirror = mirror
|
||||
self.suite = suite
|
||||
self.data = data
|
||||
self.dict = {}
|
||||
|
||||
if hash_:
|
||||
self.hash = hash_
|
||||
else:
|
||||
h = hashlib.sha256()
|
||||
h.update(data.encode("utf-8"))
|
||||
self.hash = h.hexdigest()
|
||||
|
||||
if last_modified:
|
||||
self.published = self._parse_datetime(last_modified)
|
||||
else:
|
||||
self.published = self._extract_timestamp(data)
|
||||
|
||||
@property
|
||||
def datetime(self):
|
||||
"""Return the publication time of this InRelease file as a string in
|
||||
YYYY-MM-DD HH:MM:SS ISO format. The result is always in GMT."""
|
||||
return datetime \
|
||||
.utcfromtimestamp(self.published) \
|
||||
.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
@property
|
||||
def normalized_address(self):
|
||||
"""Return the "normalized" address of the mirror URL, consisting of
|
||||
only the hostname and the path. This may be used as an index into an
|
||||
InReleaseCache."""
|
||||
result = urllib.parse.urlparse(self.mirror)
|
||||
address = result.hostname + result.path.rstrip("/")
|
||||
return address
|
||||
|
||||
@property
|
||||
def contents(self):
|
||||
"""Return the pure contents of the InRelease file with the signature
|
||||
stripped off."""
|
||||
return self._split_release_and_sig(self.data)[0]
|
||||
|
||||
@property
|
||||
def signature(self):
|
||||
"""Return the ASCII-armored PGP signature of the InRelease file."""
|
||||
return self._split_release_and_sig(self.data)[1]
|
||||
|
||||
def serialize(self):
|
||||
"""Serializes the InRelease object into Python structures to be stored
|
||||
in an InReleaseCache."""
|
||||
month_names = [ "_ignore_",
|
||||
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
||||
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
|
||||
]
|
||||
|
||||
wkday_names = [
|
||||
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun",
|
||||
]
|
||||
|
||||
dt = datetime.utcfromtimestamp(self.published)
|
||||
|
||||
published = "{}, {:02} {} {} {:02}:{:02}:{:02} GMT".format(
|
||||
wkday_names[dt.weekday()],
|
||||
dt.day,
|
||||
month_names[dt.month],
|
||||
dt.year,
|
||||
dt.hour,
|
||||
dt.minute,
|
||||
dt.second
|
||||
)
|
||||
|
||||
return {
|
||||
"mirror": self.mirror,
|
||||
"suite": self.suite,
|
||||
"hash": self.hash,
|
||||
"published": published,
|
||||
"data": self.data,
|
||||
}
|
||||
|
||||
def get_hash_for(self, path):
|
||||
"""Check if the given path is listed in this InRelease file and if so
|
||||
return the corresponding hash in hexdigest format. If the path is not
|
||||
listed, None is returned."""
|
||||
if not self.dict:
|
||||
self._parse_contents()
|
||||
return self.dict.get(path)
|
||||
|
||||
def _parse_contents(self):
|
||||
"""This method parses out all lines containing SHA256 hashes and creates
|
||||
an internal dict, mapping resources to hashes."""
|
||||
regex = re.compile(
|
||||
r" (?P<hash>[0-9a-f]{64})\s+(?P<size>\d+)\s+(?P<path>\S+)")
|
||||
|
||||
for line in self.contents.splitlines():
|
||||
m = regex.match(line)
|
||||
if not m:
|
||||
continue
|
||||
self.dict[m.group("path")] = m.group("hash")
|
||||
|
||||
def _parse_datetime(self, datetime_string):
|
||||
"""Because the behavior of Python's strptime's would be
|
||||
locale-dependent, we parse datetime strings of the format found in
|
||||
Last-Modified HTTP headers ourselves. This returns an integer
|
||||
representing a posix timestamp or None, if the parsing failed."""
|
||||
class UTC(tzinfo):
|
||||
def utcoffset(self, dt):
|
||||
return timedelta(0)
|
||||
|
||||
# we need a map, because strptime would be locale-dependent
|
||||
month_name_to_number = {
|
||||
"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6,
|
||||
"Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12
|
||||
}
|
||||
|
||||
rexpr = r"""^\s*\w+,\s+
|
||||
(?P<day>\d+) \s+
|
||||
(?P<month>\w+) \s+
|
||||
(?P<year>\d+) \s+
|
||||
(?P<hour>\d+) :
|
||||
(?P<min>\d+) :
|
||||
(?P<sec>\d+) .*$"""
|
||||
|
||||
m = re.match(rexpr, datetime_string, flags=re.VERBOSE)
|
||||
if not m:
|
||||
return None
|
||||
|
||||
parts = list(m.group("year", "month", "day", "hour", "min", "sec"))
|
||||
parts[1] = month_name_to_number[m.group("month")]
|
||||
parts = [int(s) for s in parts]
|
||||
dt = datetime(*parts, tzinfo=UTC())
|
||||
epoch = datetime(1970, 1, 1, tzinfo=UTC())
|
||||
posix = (dt - epoch).total_seconds()
|
||||
|
||||
return int(posix)
|
||||
|
||||
def _extract_timestamp(self, data):
|
||||
"""Parse the contents of the InRelease file to find the time it was
|
||||
generated. Returns a POSIX timestamp if found or None otherwise."""
|
||||
for line in data.splitlines():
|
||||
if line.startswith("Date:"):
|
||||
return self._parse_datetime(line.split(":", 1)[1])
|
||||
|
||||
return None
|
||||
|
||||
def _split_release_and_sig(self, data):
|
||||
"""Split the InRelease file into content and signature parts and return
|
||||
a tuple of unicode strings (content, signature)."""
|
||||
rexpr = re.escape("-----BEGIN PGP SIGNED MESSAGE-----") + r"\r?\n|" + \
|
||||
re.escape("-----BEGIN PGP SIGNATURE-----" ) + r"\r?\n|" + \
|
||||
re.escape("-----END PGP SIGNATURE-----" )
|
||||
|
||||
# returns content and signature
|
||||
return re.split(rexpr, data)[1:3]
|
||||
|
||||
|
||||
class LPInReleaseCache:
|
||||
"""A cache for InRelease files that can optionally be saved to and
|
||||
loaded from disk."""
|
||||
|
||||
def __init__(self, filename=None):
|
||||
"""If filename is given, it is the name of the file that cache contents
|
||||
will be saved to or loaded from when the save and load methods are
|
||||
called, respectively."""
|
||||
self._filename = filename
|
||||
self._data = {}
|
||||
self._lock = threading.Lock()
|
||||
|
||||
self.load()
|
||||
|
||||
def load(self):
|
||||
"""Load the cache contents from disk performing some rudimentary file
|
||||
locking to prevent corruption."""
|
||||
if not self._filename:
|
||||
return
|
||||
|
||||
buf = []
|
||||
fd = None
|
||||
try:
|
||||
fd = os.open(self._filename, os.O_CREAT | os.O_RDWR)
|
||||
|
||||
fcntl.flock(fd, fcntl.LOCK_EX)
|
||||
|
||||
while True:
|
||||
tmp = os.read(fd, 4096)
|
||||
if not tmp:
|
||||
break
|
||||
buf.append(tmp)
|
||||
|
||||
fcntl.flock(fd, fcntl.LOCK_UN)
|
||||
except OSError as e:
|
||||
raise LPInReleaseCacheError("Failed to load cache file: {}"
|
||||
.format(str(e)))
|
||||
finally:
|
||||
if fd:
|
||||
os.close(fd)
|
||||
|
||||
cache_data = {} if not buf else json.loads(
|
||||
b"".join(buf).decode("utf-8"))
|
||||
|
||||
with self._lock:
|
||||
self._data = cache_data
|
||||
|
||||
def save(self):
|
||||
"""Save the cache contents to disk performing some rudimentary file
|
||||
locking to prevent corruption."""
|
||||
if not self._filename:
|
||||
return
|
||||
|
||||
with self._lock:
|
||||
buf = json \
|
||||
.dumps(self._data, ensure_ascii=False, indent=4,
|
||||
sort_keys=True) \
|
||||
.encode("utf-8")
|
||||
|
||||
fd = None
|
||||
try:
|
||||
fd = os.open(self._filename, os.O_CREAT | os.O_RDWR)
|
||||
|
||||
fcntl.flock(fd, fcntl.LOCK_EX)
|
||||
|
||||
os.ftruncate(fd, 0)
|
||||
os.write(fd, buf)
|
||||
|
||||
fcntl.flock(fd, fcntl.LOCK_UN)
|
||||
except OSError as e:
|
||||
raise LPInReleaseCacheError("Failed to store cache file: {}"
|
||||
.format(str(e)))
|
||||
finally:
|
||||
if fd:
|
||||
os.close(fd)
|
||||
|
||||
def add(self, inrelease):
|
||||
"""Add the given InRelease object to the cache."""
|
||||
with self._lock:
|
||||
self._data \
|
||||
.setdefault(inrelease.normalized_address, {}) \
|
||||
.setdefault(inrelease.suite, {}) \
|
||||
.setdefault(inrelease.hash, inrelease.serialize())
|
||||
|
||||
def get_one(self, mirror, suite, hash_):
|
||||
"""Return a single InRelease object for the given mirror and suite,
|
||||
corresponding to the hash or None if such an entry does not exist."""
|
||||
with self._lock:
|
||||
url_obj = urllib.parse.urlparse(mirror)
|
||||
address = url_obj.hostname + url_obj.path.rstrip("/")
|
||||
|
||||
inrel = self._data\
|
||||
.get(address, {})\
|
||||
.get(suite, {})\
|
||||
.get(hash_)
|
||||
|
||||
if not inrel:
|
||||
return None
|
||||
|
||||
return InRelease(
|
||||
inrel["mirror"],
|
||||
inrel["suite"],
|
||||
inrel["data"],
|
||||
hash_=inrel["hash"],
|
||||
last_modified=inrel["published"]
|
||||
)
|
||||
|
||||
def get_all(self, mirror, suite):
|
||||
"""Retrieve a list of InRelease objects for the given mirror and suite.
|
||||
Return a list of all known InRelease objects for the given mirror and
|
||||
suite."""
|
||||
with self._lock:
|
||||
url_obj = urllib.parse.urlparse(mirror)
|
||||
address = url_obj.hostname + url_obj.path.rstrip("/")
|
||||
|
||||
inrel_by_hash = self._data\
|
||||
.get(address, {})\
|
||||
.get(suite, {})
|
||||
|
||||
inrelease_list = []
|
||||
|
||||
for hash_, inrel in inrel_by_hash.items():
|
||||
inrelease_list.append(
|
||||
InRelease(
|
||||
inrel["mirror"],
|
||||
inrel["suite"],
|
||||
inrel["data"],
|
||||
hash_=inrel["hash"],
|
||||
last_modified=inrel["published"]
|
||||
)
|
||||
)
|
||||
|
||||
return inrelease_list
|
||||
|
||||
|
||||
class LPInReleaseIndex:
|
||||
"""Abstraction to the build system's view of the "by hash" database.
|
||||
Currently, that interface is the by-hash directory listing of the Web
|
||||
server."""
|
||||
|
||||
def __init__(self, mirror, suite, cache=None):
|
||||
"""The mirror is the base URL of the repository up to the "dists"
|
||||
folder, e.g.
|
||||
|
||||
http://archive.ubuntu.com/ubuntu
|
||||
|
||||
suite is the name of the suite this InReleaseIndex object operates on,
|
||||
e.g. <release>, <release>-updates or <release>-security.
|
||||
|
||||
Optionally, cache can be initialized to a LPInReleaseCache object, in
|
||||
which case all look-ups will first go to the cache and only cache
|
||||
misses will result in requests to the Web server.
|
||||
"""
|
||||
self._mirror = mirror
|
||||
self._suite = suite
|
||||
self._cache = cache
|
||||
|
||||
self._base_url = "/".join([self._mirror, "dists", self._suite,
|
||||
"by-hash/SHA256"])
|
||||
|
||||
def inrelease_files(self):
|
||||
"""Iterate over all InRelease files found in the archive for the mirror
|
||||
and suite this index has been configured to operate on."""
|
||||
hashes = self._retrieve_hashes()
|
||||
|
||||
for h in hashes:
|
||||
inrelease = None
|
||||
|
||||
if self._cache:
|
||||
inrelease = self._cache.get_one(self._mirror,
|
||||
self._suite, hash_=h)
|
||||
if not inrelease:
|
||||
inrelease = self._retrieve_inrelease(h)
|
||||
if not inrelease:
|
||||
continue
|
||||
|
||||
yield inrelease
|
||||
|
||||
def get_inrelease_for_timestamp(self, time_gmt):
|
||||
"""Find and return the InRelease file that was valid at the given Posix
|
||||
timestamp."""
|
||||
candidate = None
|
||||
|
||||
for inrelease in self.inrelease_files():
|
||||
if inrelease.published > time_gmt:
|
||||
continue
|
||||
if not candidate or inrelease.published > candidate.published:
|
||||
candidate = inrelease
|
||||
|
||||
return candidate
|
||||
|
||||
def _retrieve_inrelease(self, hash_):
|
||||
"""Retrieve the contents of the file identified by hash_. Check if the
|
||||
file is an InRelease file and return a corresponding InRelease object.
|
||||
If the hash_ does not belong to an InRelease file, None is returned."""
|
||||
_500KB = 500 * 1024
|
||||
|
||||
buf = b""
|
||||
inrelease = None
|
||||
url = self._base_url + "/" + hash_
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(url) as response:
|
||||
|
||||
# InRelease files seem to be around 200-300KB
|
||||
|
||||
content_length = response.headers.get("Content-Length")
|
||||
last_modified = response.headers.get("Last-Modified")
|
||||
|
||||
if not content_length:
|
||||
buf = response.read(_500KB + 1)
|
||||
content_length = len(buf)
|
||||
else:
|
||||
content_length = int(content_length)
|
||||
|
||||
# Slightly silly heuristic, but does the job
|
||||
|
||||
if content_length > _500KB or content_length < 1024:
|
||||
return None
|
||||
|
||||
buf += response.read()
|
||||
|
||||
content_encoding = self \
|
||||
._guess_content_encoding_for_response(response)
|
||||
|
||||
# few additional checks to see if this is an InRelease file
|
||||
|
||||
try:
|
||||
buf = buf.decode(content_encoding)
|
||||
except UnicodeError:
|
||||
return None
|
||||
|
||||
if not buf.startswith("-----BEGIN PGP SIGNED MESSAGE-----"):
|
||||
return None
|
||||
|
||||
for kw in ["Origin:", "Label:", "Suite:", "Acquire-By-Hash:"]:
|
||||
if not kw in buf:
|
||||
return None
|
||||
|
||||
inrelease = InRelease(self._mirror, self._suite, buf,
|
||||
hash_=hash_, last_modified=last_modified)
|
||||
|
||||
if self._cache:
|
||||
self._cache.add(inrelease)
|
||||
except urllib.error.HTTPError as e:
|
||||
if not e.code in [404,]:
|
||||
raise LPInReleaseIndexError("Error retrieving {}: {}"
|
||||
.format(url, str(e)))
|
||||
|
||||
return inrelease
|
||||
|
||||
def _guess_content_encoding_for_response(self, response):
|
||||
"""Guess the content encoding of the given HTTPResponse object."""
|
||||
content_encoding = response.headers.get("Content-Encoding")
|
||||
content_type = response.headers.get("Content-Type",
|
||||
"text/html;charset=UTF-8")
|
||||
|
||||
if not content_encoding:
|
||||
m = re.match(r"^.*charset=(\S+)$", content_type)
|
||||
|
||||
if m:
|
||||
content_encoding = m.group(1)
|
||||
else:
|
||||
content_encoding = "UTF-8"
|
||||
|
||||
return content_encoding
|
||||
|
||||
def _retrieve_hashes(self):
|
||||
"""Retrieve all available by-hashes for the mirror and suite that this
|
||||
index is configured to operate on."""
|
||||
hashes = []
|
||||
|
||||
if self._cache:
|
||||
cache_entry = self._cache.get_all(self._mirror, self._suite)
|
||||
if cache_entry:
|
||||
return [inrel.hash for inrel in cache_entry]
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(self._base_url) as response:
|
||||
content_encoding = self._guess_content_encoding_for_response(
|
||||
response)
|
||||
|
||||
body = response.read().decode(content_encoding)
|
||||
hashes = list(set(re.findall(r"[a-z0-9]{64}", body)))
|
||||
except urllib.error.URLError as e:
|
||||
raise LPInReleaseIndexError("Could not retrieve hash listing: {}"
|
||||
.format(str(e)))
|
||||
|
||||
return hashes
|
||||
|
||||
|
||||
class LPInReleaseIndexCli:
|
||||
"""A CLI interface for LPInReleaseIndex."""
|
||||
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
self._mirror = None
|
||||
self._suite = None
|
||||
self._timestamp = None
|
||||
self._cachefile = None
|
||||
self._cache = None
|
||||
self._infile = None
|
||||
self._outfile = None
|
||||
|
||||
def __call__(self, args):
|
||||
options = vars(self._parse_opts(args))
|
||||
|
||||
# Copy settings to object attributes
|
||||
for key, value in options.items():
|
||||
if hasattr(self, "_" + key):
|
||||
setattr(self, "_" + key, value)
|
||||
|
||||
if self._cachefile:
|
||||
self._cache = LPInReleaseCache(self._cachefile)
|
||||
|
||||
try:
|
||||
options["func"]()
|
||||
except LPInReleaseIndexError as e:
|
||||
sys.stderr.write("{}: {}\n".format(self._name, str(e)))
|
||||
sys.exit(EXIT_ERR)
|
||||
|
||||
if self._cache:
|
||||
self._cache.save()
|
||||
|
||||
def list(self):
|
||||
"""List all InRelease hashes for a given mirror and suite."""
|
||||
for inrelease in self._list(self._mirror, self._suite):
|
||||
if self._timestamp and inrelease.published > self._timestamp:
|
||||
continue
|
||||
|
||||
print("{} {} ({})".format(
|
||||
inrelease.hash,
|
||||
inrelease.datetime,
|
||||
inrelease.published,
|
||||
))
|
||||
|
||||
def select(self):
|
||||
"""Find the hash of the InRelease file valid at a given timestamp."""
|
||||
candidate = self._select(self._mirror, self._suite)
|
||||
|
||||
if candidate:
|
||||
print("{} {} ({})".format(
|
||||
candidate.hash,
|
||||
candidate.datetime,
|
||||
candidate.published,
|
||||
))
|
||||
|
||||
def inject(self):
|
||||
"""Inject by-hash and inrelease-path settings into a sources.list."""
|
||||
sources_list = self._infile
|
||||
|
||||
if not os.path.exists(sources_list):
|
||||
sys.stderr.write("{}: No such file: {}.\n"
|
||||
.format(self._name, sources_list))
|
||||
sys.exit(EXIT_ERR)
|
||||
|
||||
with open(sources_list, "r", encoding="utf-8") as fp:
|
||||
buf = fp.read()
|
||||
|
||||
rexpr = re.compile(r"""^
|
||||
(?P<type>deb(?:-src)?)\s+
|
||||
(?P<opts>\[[^\]]+\]\s+)?
|
||||
(?P<mirror>(?P<scheme>\S+):\S+)\s+
|
||||
(?P<suite>\S+)\s+
|
||||
(?P<comps>.*)$""", flags=re.VERBOSE)
|
||||
|
||||
lines = buf.splitlines(True)
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = lines[i]
|
||||
m = rexpr.match(line)
|
||||
|
||||
if not m:
|
||||
continue
|
||||
if m.group("scheme") not in ["http", "https", "ftp"]:
|
||||
continue
|
||||
|
||||
opts = {}
|
||||
if m.group("opts"):
|
||||
for entry in m.group("opts").strip().strip("[]").split():
|
||||
k, v = entry.split("=")
|
||||
opts[k] = v
|
||||
|
||||
inrelease = self._select(m.group("mirror"), m.group("suite"))
|
||||
if inrelease:
|
||||
opts["by-hash"] = "yes"
|
||||
opts["inrelease-path"] = "by-hash/SHA256/" + inrelease.hash
|
||||
|
||||
groupdict = m.groupdict()
|
||||
groupdict["opts"] = " ".join(["{0}={1}".format(*o) for o in
|
||||
opts.items()])
|
||||
|
||||
lines[i] = "{type} [{opts}] {mirror} {suite} {comps}\n"\
|
||||
.format(**groupdict)
|
||||
|
||||
outfile = None
|
||||
try:
|
||||
if not self._outfile or self._outfile == "-":
|
||||
outfile = sys.stdout
|
||||
else:
|
||||
outfile = open(self._outfile, "w+", encoding="utf-8")
|
||||
outfile.write("".join(lines))
|
||||
finally:
|
||||
if outfile and outfile != sys.stdout:
|
||||
outfile.close()
|
||||
|
||||
def _parse_opts(self, args):
|
||||
"""Parse command line arguments and initialize the CLI object."""
|
||||
main_parser = argparse.ArgumentParser()
|
||||
subparsers = main_parser.add_subparsers(dest="command")
|
||||
|
||||
parser_inject = subparsers.add_parser("inject",
|
||||
help="Rewrite a sources.list file injecting appropriate hashes.")
|
||||
parser_list = subparsers.add_parser("list",
|
||||
help="List InRelease hashes for a given release and suite.")
|
||||
parser_select = subparsers.add_parser("select",
|
||||
help="Select hash to use for a given timestamp, release, suite.")
|
||||
|
||||
parser_inject.set_defaults(func=self.inject)
|
||||
parser_list.set_defaults(func=self.list)
|
||||
parser_select.set_defaults(func=self.select)
|
||||
|
||||
# Options common to all commands
|
||||
for parser in [parser_inject, parser_list, parser_select]:
|
||||
cutoff_time_required = True if parser != parser_list else False
|
||||
|
||||
parser.add_argument("-t", "--cutoff-time", dest="timestamp",
|
||||
type=int, required=cutoff_time_required,
|
||||
help="A POSIX timestamp to pin the repo to.")
|
||||
parser.add_argument("--cache-file", dest="cachefile", type=str,
|
||||
help="A file where to cache intermediate results (optional).")
|
||||
|
||||
mirror = "http://archive.ubuntu.com/ubuntu"
|
||||
|
||||
# Options common to list, select commands
|
||||
for parser in [parser_list, parser_select]:
|
||||
parser.add_argument("-m", "--mirror", dest="mirror", type=str,
|
||||
default=mirror, help="The URL of the mirror to use.")
|
||||
parser.add_argument("-s", "--suite",
|
||||
dest="suite", type=str, required=True,
|
||||
help="The suite to scan (e.g. 'bionic', 'bionic-updates').")
|
||||
|
||||
# Extra option for inject command
|
||||
parser_inject.add_argument("-o", "--output-file", dest="outfile",
|
||||
type=str, help="")
|
||||
parser_inject.add_argument("infile", type=str,
|
||||
help="The sources.list file to modify.")
|
||||
|
||||
if not args:
|
||||
main_parser.print_help()
|
||||
sys.exit(EXIT_ERR)
|
||||
|
||||
return main_parser.parse_args(args)
|
||||
|
||||
def _list(self, mirror, suite):
|
||||
"""Internal helper for the list command. This is also used
|
||||
implicitly by the _select method."""
|
||||
index = LPInReleaseIndex(mirror, suite, cache=self._cache)
|
||||
|
||||
inrelease_files = \
|
||||
reversed(
|
||||
sorted(
|
||||
list(index.inrelease_files()),
|
||||
key=lambda x: x.published
|
||||
)
|
||||
)
|
||||
|
||||
return inrelease_files
|
||||
|
||||
def _select(self, mirror, suite):
|
||||
"""Internal helper for the select command."""
|
||||
candidate = None
|
||||
|
||||
for inrelease in self._list(mirror, suite):
|
||||
if inrelease.published > self._timestamp:
|
||||
continue
|
||||
if not candidate or inrelease.published > candidate.published:
|
||||
candidate = inrelease
|
||||
|
||||
return candidate
|
||||
|
||||
|
||||
class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||
"""Request handler providing a virtual snapshot of the package
|
||||
repositories."""
|
||||
|
||||
def do_HEAD(self):
|
||||
"""Process a HEAD request."""
|
||||
self.__get_request(verb="HEAD")
|
||||
|
||||
def do_GET(self):
|
||||
"""Process a GET request."""
|
||||
self.__get_request()
|
||||
|
||||
def __get_request(self, verb="GET"):
|
||||
"""Pass all requests on to the destination server 1:1 except when the
|
||||
target is an InRelease file or a resource listed in an InRelease files.
|
||||
|
||||
In that case we silently download the resource via the by-hash URL
|
||||
which was most recent at the cutoff (or repo snapshot) time and inject
|
||||
it into the response.
|
||||
|
||||
It is important to understand that there is no status 3xx HTTP redirect
|
||||
happening here, the client does not know that what it receives is not
|
||||
exactly what it requested."""
|
||||
|
||||
host, path = self.__get_host_path()
|
||||
|
||||
m = re.match(
|
||||
r"^(?P<base>.*?)/dists/(?P<suite>[^/]+)/(?P<target>.*)$",
|
||||
path
|
||||
)
|
||||
|
||||
if m:
|
||||
mirror = "http://" + host + m.group("base")
|
||||
base = m.group("base")
|
||||
suite = m.group("suite")
|
||||
target = m.group("target")
|
||||
|
||||
index = LPInReleaseIndex(mirror, suite,
|
||||
cache=self.server.inrelease_cache)
|
||||
inrelease = index.get_inrelease_for_timestamp(
|
||||
self.server.snapshot_stamp)
|
||||
|
||||
if inrelease is None:
|
||||
self.__send_error(404, "No InRelease file found for given "
|
||||
"mirror, suite and timestamp.")
|
||||
return
|
||||
|
||||
if target == "InRelease":
|
||||
# If target is InRelease, send back contents directly.
|
||||
data = inrelease.data.encode("utf-8")
|
||||
|
||||
self.log_message(
|
||||
"Inject InRelease '{}'".format(inrelease.hash))
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Length", len(data))
|
||||
self.end_headers()
|
||||
|
||||
if verb == "GET":
|
||||
self.wfile.write(data)
|
||||
|
||||
return
|
||||
else:
|
||||
# If target hash is listed, then redirect to by-hash URL.
|
||||
hash_ = inrelease.get_hash_for(target)
|
||||
|
||||
if hash_:
|
||||
self.log_message(
|
||||
"Inject {} for {}".format(hash_, target))
|
||||
|
||||
target_path = target.rsplit("/", 1)[0]
|
||||
|
||||
path = "{}/dists/{}/{}/by-hash/SHA256/{}"\
|
||||
.format(base, suite, target_path, hash_)
|
||||
|
||||
try:
|
||||
client = http.client.HTTPConnection(host)
|
||||
client.request(verb, path)
|
||||
except Exception as e:
|
||||
self.log_error("Failed to retrieve http://{}{}: {}"
|
||||
.format(host, path, str(e)))
|
||||
return
|
||||
|
||||
try:
|
||||
self.__send_response(client.getresponse())
|
||||
except Exception as e:
|
||||
self.log_error("Error delivering response: {}".format(str(e)))
|
||||
|
||||
def __get_host_path(self):
|
||||
"""Figure out the host to contact and the path of the resource that is
|
||||
being requested."""
|
||||
host = self.headers.get("host")
|
||||
url = urllib.parse.urlparse(self.path)
|
||||
path = url.path
|
||||
|
||||
return host, path
|
||||
|
||||
def __send_response(self, response):
|
||||
"""Pass on upstream response headers and body to the client."""
|
||||
self.send_response(response.status)
|
||||
|
||||
for name, value in response.getheaders():
|
||||
self.send_header(name, value)
|
||||
|
||||
self.end_headers()
|
||||
shutil.copyfileobj(response, self.wfile)
|
||||
|
||||
def __send_error(self, status, message):
|
||||
"""Return an HTTP error status and a message in the response body."""
|
||||
self.send_response(status)
|
||||
self.send_header("Content-Type", "text/plain; charset=utf-8")
|
||||
self.end_headers()
|
||||
self.wfile.write(message.encode("utf-8"))
|
||||
|
||||
|
||||
class MagicHTTPProxy(socketserver.ThreadingMixIn, http.server.HTTPServer):
|
||||
"""Tiny HTTP server using ProxyingHTTPRequestHandler instances to provide
|
||||
a snapshot view of the package repositories."""
|
||||
|
||||
def __init__(self, server_address, server_port, cache_file=None,
|
||||
repo_snapshot_stamp=time.time(), run_as=None):
|
||||
|
||||
try:
|
||||
super(http.server.HTTPServer, self).__init__(
|
||||
(server_address, server_port), ProxyingHTTPRequestHandler)
|
||||
except OSError as e:
|
||||
raise LPInReleaseProxyError(
|
||||
"Could not initialize proxy: {}".format(str(e)))
|
||||
|
||||
self.inrelease_cache = LPInReleaseCache(filename=cache_file)
|
||||
self.snapshot_stamp = repo_snapshot_stamp
|
||||
|
||||
|
||||
class MagicHTTPProxyCli:
|
||||
"""A CLI interface for the MagicHTTPProxy."""
|
||||
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
self._address = "127.0.0.1"
|
||||
self._port = 8080
|
||||
self._timestamp = time.time()
|
||||
self._run_as = None
|
||||
self._pid_file = None
|
||||
self._log_file = None
|
||||
self._background = False
|
||||
self._setsid = False
|
||||
|
||||
def __call__(self, args):
|
||||
options = self._parse_opts(args)
|
||||
|
||||
proxy = MagicHTTPProxy(
|
||||
options.address,
|
||||
options.port,
|
||||
cache_file=None,
|
||||
repo_snapshot_stamp=options.timestamp
|
||||
)
|
||||
|
||||
# Detach, but keep all streams open.
|
||||
if options.background:
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
os._exit(EXIT_OK)
|
||||
|
||||
if options.log_file:
|
||||
fd = open(options.log_file, "wb+")
|
||||
os.dup2(fd.fileno(), sys.stdout.fileno())
|
||||
os.dup2(fd.fileno(), sys.stderr.fileno())
|
||||
|
||||
# Become session leader and give up controlling terminal.
|
||||
if options.setsid:
|
||||
if not options.log_file:
|
||||
fd = open(os.devnull, "wb+")
|
||||
os.dup2(fd.fileno(), sys.stdout.fileno())
|
||||
os.dup2(fd.fileno(), sys.stderr.fileno())
|
||||
os.setsid()
|
||||
|
||||
if options.pid_file:
|
||||
with open(options.pid_file, "w+", encoding="utf-8") as fp:
|
||||
fp.write(str(os.getpid()))
|
||||
|
||||
if options.run_as is not None:
|
||||
try:
|
||||
uid = pwd.getpwnam(options.run_as).pw_uid
|
||||
os.setuid(uid)
|
||||
except KeyError as e:
|
||||
sys.stderr.write("Failed to lookup {}: {}\n"
|
||||
.format(options.run_as, str(e)))
|
||||
sys.exit(EXIT_ERR)
|
||||
except PermissionError as e:
|
||||
sys.stderr.write("Cannot setuid: {}\n".format(str(e)))
|
||||
sys.exit(EXIT_ERR)
|
||||
|
||||
proxy.serve_forever()
|
||||
|
||||
def _parse_opts(self, args):
|
||||
"""Parse command line arguments and initialize the CLI object."""
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--address", dest="address", type=str,
|
||||
default="127.0.0.1", help="The address of the interface to "
|
||||
"bind to (default: 127.0.0.1)")
|
||||
parser.add_argument("--port", dest="port", type=int, default=8080,
|
||||
help="The port to listen on (default: 8080)")
|
||||
parser.add_argument("-t", "--cutoff-time", dest="timestamp", type=int,
|
||||
required=True, help="A POSIX timestamp to pin the repo to.")
|
||||
parser.add_argument("--run-as", dest="run_as", type=str,
|
||||
help="Drop privileges and run as this user.")
|
||||
parser.add_argument("--pid-file", dest="pid_file", type=str,
|
||||
help="Store the PID to this file.")
|
||||
parser.add_argument("--log-file", dest="log_file", type=str,
|
||||
help="Re-direct all streams to this file.")
|
||||
parser.add_argument("--background", dest="background",
|
||||
action="store_true",
|
||||
help="Whether to go into the background.")
|
||||
parser.add_argument("--setsid", dest="setsid",
|
||||
action="store_true",
|
||||
help="Become session leader and drop controlling TTY.")
|
||||
|
||||
return parser.parse_args(args)
|
||||
|
||||
if __name__ == "__main__":
|
||||
name = os.path.basename(sys.argv[0])
|
||||
|
||||
try:
|
||||
if name == "lp-in-release":
|
||||
cli = LPInReleaseIndexCli(name)
|
||||
else:
|
||||
cli = MagicHTTPProxyCli(name)
|
||||
|
||||
cli(sys.argv[1:])
|
||||
except LPInReleaseBaseError as e:
|
||||
sys.stderr.write("{}: {}\n".format(name, str(e)))
|
||||
sys.exit(EXIT_ERR)
|
||||
except KeyboardInterrupt:
|
||||
sys.stderr.write("{}: Caught keyboard interrupt, exiting...\n"
|
||||
.format(name))
|
||||
sys.exit(EXIT_ERR)
|
63
minimize-manual
Executable file
63
minimize-manual
Executable file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/python3
|
||||
"""Minimize the number of manually installed packages in the image.
|
||||
|
||||
Finds all manually installed meta packages, and marks their dependencies
|
||||
as automatically installed.
|
||||
"""
|
||||
import sys
|
||||
|
||||
import apt
|
||||
|
||||
|
||||
def is_root(pkg):
|
||||
"""Check if the package is a root package (manually inst. meta)"""
|
||||
return (pkg.is_installed and
|
||||
not pkg.is_auto_installed and
|
||||
(pkg.section == "metapackages" or
|
||||
pkg.section.endswith("/metapackages")))
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function"""
|
||||
cache = apt.Cache(rootdir=sys.argv[1] if len(sys.argv) > 1 else None)
|
||||
roots = set(pkg for pkg in cache if is_root(pkg))
|
||||
workset = set(roots)
|
||||
seen = set()
|
||||
ubiquity_depends = set()
|
||||
|
||||
with cache.actiongroup():
|
||||
while True:
|
||||
print("Iteration", file=sys.stderr)
|
||||
to_proc = workset - seen
|
||||
if not to_proc:
|
||||
break
|
||||
for pkg in sorted(to_proc):
|
||||
print(" Visiting", pkg, file=sys.stderr)
|
||||
|
||||
if pkg not in roots and pkg not in ubiquity_depends:
|
||||
pkg.mark_auto()
|
||||
|
||||
for dep in (pkg.installed.dependencies +
|
||||
pkg.installed.recommends):
|
||||
for bdep in dep.or_dependencies:
|
||||
for ver in bdep.target_versions:
|
||||
if ver.package.is_installed:
|
||||
if pkg.name == "ubiquity":
|
||||
ubiquity_depends.add(ver.package)
|
||||
if pkg.name != "ubiquity":
|
||||
# Reprocess this package again, as we did not mark it when we visited it from ubiquity
|
||||
try:
|
||||
ubiquity_depends.remove(ver.package)
|
||||
# This will raise the KeyError here if ubiquity did not depend on it
|
||||
seen.remove(ver.package)
|
||||
except KeyError:
|
||||
pass
|
||||
workset.add(ver.package)
|
||||
|
||||
seen.add(pkg)
|
||||
|
||||
cache.commit()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Loading…
x
Reference in New Issue
Block a user