mirror of
https://git.launchpad.net/livecd-rootfs
synced 2025-08-21 13:44:08 +00:00
Compare commits
33 Commits
ubuntu/mas
...
2.578.10
Author | SHA1 | Date | |
---|---|---|---|
|
ee700c1f44 | ||
|
68e56a233e | ||
|
7ca1312c09 | ||
|
ba29aa2120 | ||
|
86eb0e9916 | ||
|
626bc6dd8d | ||
|
23fa3735c8 | ||
|
bb5e5126d2 | ||
|
be0b42c329 | ||
|
4a7866254e | ||
|
740c6ab42b | ||
|
0d56c46d24 | ||
|
e00b7fa1dc | ||
|
5a088d522f | ||
|
0d54091641 | ||
|
e81e8e55b8 | ||
|
b7e6aee81b | ||
|
2dbda09b6a | ||
|
a1302a4a91 | ||
|
bddc32b01f | ||
|
e528a8eabd | ||
|
ba8797dc16 | ||
|
4931523fc1 | ||
|
50ca382be5 | ||
|
367e9fbfeb | ||
|
24e39d6844 | ||
|
2fecd44890 | ||
|
00b995c7e2 | ||
|
f493132f90 | ||
|
edbbc23970 | ||
|
51bd9d5b4a | ||
|
523a964b64 | ||
|
a3dac25d86 |
80
debian/changelog
vendored
80
debian/changelog
vendored
@ -1,8 +1,84 @@
|
||||
livecd-rootfs (2.579) UNRELEASED; urgency=medium
|
||||
livecd-rootfs (2.578.10) disco; urgency=medium
|
||||
|
||||
* Add support for HyperV Gallery Images (LP: 1837088)
|
||||
|
||||
-- David Krauser <david.krauser@canonical.com> Wed, 09 Oct 2019 15:13:28 -0400
|
||||
|
||||
livecd-rootfs (2.578.9) disco; urgency=medium
|
||||
|
||||
* magic-proxy: dump proxy log to stdout on failure (LP: #1847300)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Tue, 08 Oct 2019 10:57:31 -0500
|
||||
|
||||
livecd-rootfs (2.578.8) disco; urgency=medium
|
||||
|
||||
* ubuntu-cpc: Only produce explicitly specified artifacts (LP: #1837254)
|
||||
|
||||
-- Robert C Jennings <robert.jennings@canonical.com> Mon, 26 Aug 2019 16:25:24 -0500
|
||||
|
||||
livecd-rootfs (2.578.7) disco; urgency=medium
|
||||
|
||||
[ Tobias Koch ]
|
||||
* Add retry logic to snap-tool to make downloads more resilient.
|
||||
(LP: #1837871)
|
||||
|
||||
-- Tobias Koch <tobias.koch@canonical.com> Mon, 26 Aug 2019 13:36:36 +0200
|
||||
|
||||
livecd-rootfs (2.578.6) disco; urgency=medium
|
||||
|
||||
[ Tobias Koch ]
|
||||
* Do proper error checking when calling snap-tool info to determine
|
||||
the base of a snap. (LP: #1828500)
|
||||
|
||||
[ Michael Vogt ]
|
||||
* Run "snap debug validate-seed" during preseeding to do basic validation of
|
||||
the generated seed.yaml
|
||||
|
||||
[ Iain Lane ]
|
||||
* Seed core for non minimized builds, as it is still required (LP:
|
||||
#1836594).
|
||||
|
||||
-- Iain Lane <iain.lane@canonical.com> Tue, 16 Jul 2019 13:20:52 +0100
|
||||
|
||||
livecd-rootfs (2.578.5) disco; urgency=medium
|
||||
|
||||
[ Balint Reczey ]
|
||||
* Build WSL rootfs tarball (LP: #1827930)
|
||||
|
||||
[ Steve Langasek ]
|
||||
* Strip translation files out of the minimal images, another thing that
|
||||
goes unused when there is no human console user (and we already don't
|
||||
have the locales themselves present on a minimal image). LP: #1829333.
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Thu, 30 May 2019 12:13:55 -0700
|
||||
|
||||
livecd-rootfs (2.578.4) disco; urgency=medium
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* magic-proxy: Send headers on error (LP: #1829944)
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Thu, 23 May 2019 13:33:40 -0700
|
||||
|
||||
livecd-rootfs (2.578.3) disco; urgency=medium
|
||||
|
||||
[ Robert C Jennings ]
|
||||
* ubuntu-cpc: Ensure base disk image is the same between all build targets
|
||||
(LP: #1827426)
|
||||
|
||||
-- Steve Langasek <steve.langasek@ubuntu.com> Tue, 21 May 2019 15:19:34 -0700
|
||||
|
||||
livecd-rootfs (2.578.2) disco; urgency=medium
|
||||
|
||||
* Remove device nodes later for ubuntu-base:minimized (i.e. docker) builds.
|
||||
(LP: #1828118)
|
||||
|
||||
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Wed, 08 May 2019 09:59:35 +1200
|
||||
|
||||
livecd-rootfs (2.578.1) disco; urgency=medium
|
||||
|
||||
* Run clean_debian_chroot after minimize_manual (LP: #1826377)
|
||||
|
||||
-- Julian Andres Klode <juliank@ubuntu.com> Thu, 25 Apr 2019 11:43:11 +0200
|
||||
-- Julian Andres Klode <juliank@ubuntu.com> Fri, 26 Apr 2019 10:34:05 +0200
|
||||
|
||||
livecd-rootfs (2.578) disco; urgency=medium
|
||||
|
||||
|
3
debian/control
vendored
3
debian/control
vendored
@ -10,6 +10,7 @@ Package: livecd-rootfs
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends},
|
||||
apt-utils,
|
||||
attr,
|
||||
debootstrap,
|
||||
distro-info,
|
||||
dosfstools,
|
||||
@ -32,7 +33,7 @@ Depends: ${misc:Depends},
|
||||
python3-software-properties,
|
||||
qemu-utils,
|
||||
rsync,
|
||||
snapd,
|
||||
snapd (>= 2.39),
|
||||
squashfs-tools (>= 1:3.3-1),
|
||||
sudo,
|
||||
u-boot-tools [armhf arm64],
|
||||
|
@ -108,8 +108,8 @@ Expire-Date: 0
|
||||
|
||||
lb bootstrap "$@"
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu-server|ubuntu-cpc)
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
ubuntu-server:*|ubuntu-cpc:*|ubuntu:desktop-preinstalled)
|
||||
# Set locale to C.UTF-8 by default. We should
|
||||
# probably do this for all images early in the
|
||||
# 18.10 cycle but for now just do it for
|
||||
@ -131,6 +131,9 @@ Expire-Date: 0
|
||||
# Drop all man pages
|
||||
path-exclude=/usr/share/man/*
|
||||
|
||||
# Drop all translations
|
||||
path-exclude=/usr/share/locale/*/LC_MESSAGES/*.mo
|
||||
|
||||
# Drop all documentation ...
|
||||
path-exclude=/usr/share/doc/*
|
||||
|
||||
@ -191,6 +194,10 @@ if [ -f /etc/dpkg/dpkg.cfg.d/excludes ] || [ -f /etc/dpkg/dpkg.cfg.d/excludes.dp
|
||||
# This step processes the packages which still have missing documentation
|
||||
dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/doc/ {print $2}' | sed 's|/[^/]*$||' | sort |uniq \
|
||||
| xargs dpkg -S | sed 's|, |\n|g;s|: [^:]*$||' | uniq | DEBIAN_FRONTEND=noninteractive xargs apt-get install --reinstall -y
|
||||
echo "Restoring system translations..."
|
||||
# This step processes the packages which still have missing translations
|
||||
dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/locale/ {print $2}' | sed 's|/[^/]*$||' | sort |uniq \
|
||||
| xargs dpkg -S | sed 's|, |\n|g;s|: [^:]*$||' | uniq | DEBIAN_FRONTEND=noninteractive xargs apt-get install --reinstall -y
|
||||
if dpkg --verify --verify-format rpm | awk '/..5...... \/usr\/share\/doc/ {exit 1}'; then
|
||||
echo "Documentation has been restored successfully."
|
||||
rm /etc/dpkg/dpkg.cfg.d/excludes.dpkg-tmp
|
||||
@ -308,17 +315,6 @@ EOF
|
||||
apt-get -y --purge autoremove"
|
||||
fi
|
||||
|
||||
if [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-base:minimized" ]; then
|
||||
# Save even more size by removing apt lists (that are currently removed
|
||||
# downstream anyway)
|
||||
rm -rf chroot/var/lib/apt/lists/*
|
||||
# Having device notes in the docker image can cause problems
|
||||
# (https://github.com/tianon/docker-brew-ubuntu-core/issues/62)
|
||||
# so remove them. We only do this for docker out of an
|
||||
# abundance of caution.
|
||||
rm -rf chroot/dev/*
|
||||
fi
|
||||
|
||||
configure_universe
|
||||
|
||||
if [ -d chroot/var/lib/preinstalled-pool ]; then
|
||||
@ -444,6 +440,12 @@ EOF
|
||||
if [ -e binary.success ]; then
|
||||
rm -f binary.success
|
||||
else
|
||||
# Dump the magic-proxy log to stdout on failure to aid debugging
|
||||
if [ -f /build/livecd.magic-proxy.log ] ; then
|
||||
echo "================= Magic proxy log (start) ================="
|
||||
cat /build/livecd.magic-proxy.log
|
||||
echo "================== Magic proxy log (end) =================="
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -976,3 +978,8 @@ if [ -f "config/magic-proxy.pid" ]; then
|
||||
iptables -t nat -D OUTPUT -p tcp --dport 80 -m owner ! --uid-owner daemon \
|
||||
-j REDIRECT --to 8080
|
||||
fi
|
||||
|
||||
case $PROJECT in
|
||||
ubuntu-cpc)
|
||||
config/hooks.d/remove-implicit-artifacts
|
||||
esac
|
||||
|
@ -278,7 +278,7 @@ _get_live_passes ()
|
||||
|
||||
if [ -z "${IMAGEFORMAT:-}" ]; then
|
||||
case $PROJECT:${SUBPROJECT:-} in
|
||||
ubuntu-cpc:*)
|
||||
ubuntu-cpc:*|ubuntu:desktop-preinstalled)
|
||||
if [ "$SUBARCH" = "raspi3" ]; then
|
||||
# For now only raspi3, but others are soon to follow
|
||||
IMAGEFORMAT=ubuntu-image
|
||||
@ -450,6 +450,13 @@ if [ "$PREINSTALLED" = "true" ]; then
|
||||
;;
|
||||
ubuntu-core|ubuntu-base|base|ubuntu-touch|ubuntu-touch-custom|ubuntu-cpc|ubuntu-desktop-next)
|
||||
;;
|
||||
ubuntu)
|
||||
add_package live oem-config-gtk ubiquity-frontend-gtk
|
||||
add_package live ubiquity-slideshow-ubuntu
|
||||
if [ "$SUBPROJECT" = "desktop-preinstalled" ]; then
|
||||
add_package live language-pack-en-base
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
add_package live oem-config-gtk ubiquity-frontend-gtk
|
||||
add_package live ubiquity-slideshow-ubuntu
|
||||
@ -1196,9 +1203,21 @@ EOF
|
||||
fi
|
||||
;;
|
||||
|
||||
ubuntu-touch:*|ubuntu-touch-custom:*|ubuntu-core:system-image|ubuntu-desktop-next:system-image|ubuntu-cpc:*|ubuntu-server:live)
|
||||
cp -af /usr/share/livecd-rootfs/live-build/${PROJECT}/* \
|
||||
config/
|
||||
ubuntu-touch:*|ubuntu-touch-custom:*|ubuntu-core:system-image|ubuntu-desktop-next:system-image|ubuntu-cpc:*|ubuntu-server:live|ubuntu:desktop-preinstalled)
|
||||
# Ensure that most things e.g. includes.chroot are copied as is
|
||||
for entry in /usr/share/livecd-rootfs/live-build/${PROJECT}/*; do
|
||||
case $entry in
|
||||
*hooks*)
|
||||
# But hooks are shared across the projects with symlinks
|
||||
# dereference them
|
||||
cp -afL $entry config/
|
||||
;;
|
||||
*)
|
||||
# Most places want to preserve symlinks as is
|
||||
cp -af $entry config/
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$PROJECT" = "ubuntu-cpc" ]; then
|
||||
case ${IMAGE_TARGETS:-} in
|
||||
|
@ -1,7 +1,7 @@
|
||||
# vi: ts=4 expandtab syntax=sh
|
||||
|
||||
#imagesize=${IMAGE_SIZE:-$((2252*1024**2))} # 2.2G (the current size we ship)
|
||||
imagesize=${IMAGE_SIZE:-2361393152} # 2.2G (the current size we ship)
|
||||
# default imagesize = 2252*1024**2 = 2.2G (the current size we ship)
|
||||
imagesize=${IMAGE_SIZE:-2361393152}
|
||||
fs_label="${FS_LABEL:-rootfs}"
|
||||
|
||||
rootfs_dev_mapper=
|
||||
@ -483,12 +483,20 @@ _snap_preseed() {
|
||||
;;
|
||||
*)
|
||||
# Determine if and what core snap is needed
|
||||
local core_snap=$(/usr/share/livecd-rootfs/snap-tool info \
|
||||
local snap_info
|
||||
|
||||
snap_info=$(/usr/share/livecd-rootfs/snap-tool info \
|
||||
--cohort-key="${COHORT_KEY:-}" \
|
||||
--channel="$CHANNEL" "$SNAP_NAME" | \
|
||||
grep '^base:' | awk '{print $2}'
|
||||
--channel="$CHANNEL" "${SNAP_NAME}" \
|
||||
)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to retrieve base of $SNAP_NAME!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local core_snap=$(echo "$snap_info" | grep '^base:' | awk '{print $2}')
|
||||
|
||||
# If $core_snap is not the empty string then SNAP itself is not a core
|
||||
# snap and we must additionally seed the core snap.
|
||||
if [ -n "$core_snap" ]; then
|
||||
@ -611,6 +619,12 @@ snap_prepare() {
|
||||
local CUSTOM_BRAND_MODEL=${2:-generic:generic-classic}
|
||||
|
||||
snap_prepare_assertions "$CHROOT_ROOT" "$CUSTOM_BRAND_MODEL"
|
||||
|
||||
# ubuntu-cpc:minimized has its own special snap handling
|
||||
if [ "$PROJECT:${SUBPROJECT:-}" != ubuntu-cpc:minimized ]; then
|
||||
# Download the core snap
|
||||
_snap_preseed "$CHROOT_ROOT" core stable
|
||||
fi
|
||||
}
|
||||
|
||||
snap_preseed() {
|
||||
@ -635,6 +649,15 @@ snap_preseed() {
|
||||
touch "$CHROOT_ROOT/var/lib/snapd/seed/.snapd-explicit-install-stamp"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# Do basic validation of generated snapd seed.yaml, doing it here
|
||||
# means we catch all the places(tm) that snaps are added but the
|
||||
# downside is that each time a snap is added the seed must be valid,
|
||||
# i.e. snaps with bases need to add bases first etc.
|
||||
if [ -e chroot/var/lib/snapd/seed/seed.yaml ]; then
|
||||
snap debug validate-seed "$CHROOT_ROOT/var/lib/snapd/seed/seed.yaml"
|
||||
fi
|
||||
}
|
||||
|
||||
snap_from_seed() {
|
||||
@ -717,7 +740,18 @@ subtract_package_lists() {
|
||||
clean_debian_chroot() {
|
||||
# remove crufty files that shouldn't be left in an image
|
||||
rm -f chroot/var/cache/debconf/*-old chroot/var/lib/dpkg/*-old
|
||||
Chroot chroot apt clean
|
||||
Chroot chroot apt clean
|
||||
# For the docker images we remove even more stuff.
|
||||
if [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-base:minimized" ]; then
|
||||
# Remove apt lists (that are currently removed downstream
|
||||
# anyway)
|
||||
rm -rf chroot/var/lib/apt/lists/*
|
||||
# Having device nodes in the docker image can cause problems
|
||||
# (https://github.com/tianon/docker-brew-ubuntu-core/issues/62)
|
||||
# so remove them. We only do this for docker out of an
|
||||
# abundance of caution.
|
||||
rm -rf chroot/dev/*
|
||||
fi
|
||||
}
|
||||
|
||||
configure_universe() {
|
||||
|
@ -9,8 +9,17 @@ case $ARCH in
|
||||
;;
|
||||
esac
|
||||
|
||||
IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
FS_LABEL="cloudimg-rootfs"
|
||||
case ${PROJECT:-} in
|
||||
ubuntu)
|
||||
IMAGE_STR="# DESKTOP_IMG: This file was created/modified by the Desktop Image build process"
|
||||
FS_LABEL="desktop-rootfs"
|
||||
IMAGE_SIZE=12884901888 # 12G
|
||||
;;
|
||||
*)
|
||||
IMAGE_STR="# CLOUD_IMG: This file was created/modified by the Cloud Image build process"
|
||||
FS_LABEL="cloudimg-rootfs"
|
||||
;;
|
||||
esac
|
||||
|
||||
. config/binary
|
||||
|
||||
@ -65,8 +74,8 @@ install_grub() {
|
||||
chroot mountpoint mkdir -p "${efi_boot_dir}"
|
||||
|
||||
if [ -n "$partuuid" ]; then
|
||||
# FIXME: code duplicated between 032-disk-image.binary
|
||||
# and 033-disk-image-uefi.binary. We want to fix this to not
|
||||
# FIXME: code duplicated between disk-image.binary
|
||||
# and disk-image-uefi.binary. We want to fix this to not
|
||||
# have initramfs-tools installed at all on these images.
|
||||
echo "partuuid found for root device; omitting initrd"
|
||||
echo "GRUB_FORCE_PARTUUID=$partuuid" >> mountpoint/etc/default/grub.d/40-force-partuuid.cfg
|
||||
|
@ -8,7 +8,7 @@ if [ -n "$SUBARCH" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# This is the directory created by 031-0-create-root-dir.binary
|
||||
# This is the directory created by create-root-dir.binary
|
||||
rootfs_dir=rootfs.dir
|
||||
|
||||
squashfs_f="$PWD/livecd.ubuntu-cpc.squashfs"
|
||||
|
@ -8,7 +8,7 @@ if [ -n "$SUBARCH" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# This is the directory created by 031-0-create-root-dir.binary
|
||||
# This is the directory created by create-root-dir.binary
|
||||
rootfs_dir=rootfs.dir
|
||||
|
||||
cp $rootfs_dir.manifest livecd.ubuntu-cpc.rootfs.manifest
|
||||
|
@ -5,3 +5,4 @@ depends disk-image
|
||||
depends qcow2
|
||||
depends vmdk
|
||||
depends vagrant
|
||||
depends wsl
|
||||
|
@ -1,3 +1,10 @@
|
||||
base/disk-image.binary
|
||||
base/disk-image-uefi.binary
|
||||
base/disk-image-ppc64el.binary
|
||||
provides livecd.ubuntu-cpc.ext4
|
||||
provides livecd.ubuntu-cpc.initrd-generic
|
||||
provides livecd.ubuntu-cpc.initrd-generic-lpae
|
||||
provides livecd.ubuntu-cpc.kernel-generic
|
||||
provides livecd.ubuntu-cpc.kernel-generic-lpae
|
||||
provides livecd.ubuntu-cpc.kernel-kvm
|
||||
provides livecd.ubuntu-cpc.manifest
|
||||
|
@ -1,2 +1,3 @@
|
||||
depends disk-image
|
||||
base/qcow2-image.binary
|
||||
provides livecd.ubuntu-cpc.img
|
||||
|
@ -1,2 +1,4 @@
|
||||
depends root-dir
|
||||
base/root-squashfs.binary
|
||||
provides livecd.ubuntu-cpc.squashfs
|
||||
provides livecd.ubuntu-cpc.squashfs.manifest
|
||||
|
@ -1,2 +1,4 @@
|
||||
depends root-dir
|
||||
base/root-xz.binary
|
||||
provides livecd.ubuntu-cpc.rootfs.tar.xz
|
||||
provides livecd.ubuntu-cpc.rootfs.manifest
|
||||
|
@ -1,2 +1,3 @@
|
||||
depends disk-image
|
||||
base/vagrant.binary
|
||||
provides livecd.ubuntu-cpc.vagrant.box
|
||||
|
@ -1,3 +1,5 @@
|
||||
depends disk-image
|
||||
base/vmdk-image.binary
|
||||
base/vmdk-ova-image.binary
|
||||
provides livecd.ubuntu-cpc.vmdk
|
||||
provides livecd.ubuntu-cpc.ova
|
||||
|
4
live-build/ubuntu-cpc/hooks.d/base/series/wsl
Normal file
4
live-build/ubuntu-cpc/hooks.d/base/series/wsl
Normal file
@ -0,0 +1,4 @@
|
||||
depends root-dir
|
||||
base/wsl.binary
|
||||
provides livecd.ubuntu-cpc.wsl.rootfs.tar.gz
|
||||
provides livecd.ubuntu-cpc.wsl.rootfs.manifest
|
@ -7,7 +7,7 @@
|
||||
# and checksums. This step produces an OVA that is suitable for use with
|
||||
# Cloud's that support the OVF specification.
|
||||
#
|
||||
# For this step, we re-use the VMDK's made in 040-vmdk-image.binary
|
||||
# For this step, we re-use the VMDK's made in vmdk-image.binary
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
|
56
live-build/ubuntu-cpc/hooks.d/base/wsl.binary
Executable file
56
live-build/ubuntu-cpc/hooks.d/base/wsl.binary
Executable file
@ -0,0 +1,56 @@
|
||||
#!/bin/bash -eux
|
||||
# vi: ts=4 expandtab
|
||||
#
|
||||
# Generate the compressed root directory for WSL
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
echo "Skipping minimized $0 build as WSL systems are designed to be interactive"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case $ARCH in
|
||||
amd64|arm64)
|
||||
;;
|
||||
*)
|
||||
echo "WSL root tarballs are not generated for $ARCH."
|
||||
exit 0;;
|
||||
esac
|
||||
|
||||
if [ -n "${SUBARCH:-}" ]; then
|
||||
echo "Skipping rootfs build for subarch flavor build"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
. config/functions
|
||||
|
||||
rootfs_dir=wslroot.dir
|
||||
|
||||
# This is the directory created by create-root-dir.binary
|
||||
cp -a rootfs.dir $rootfs_dir
|
||||
|
||||
setup_mountpoint $rootfs_dir
|
||||
|
||||
env DEBIAN_FRONTEND=noninteractive chroot $rootfs_dir apt-get -y -qq install ubuntu-wsl
|
||||
|
||||
create_manifest $rootfs_dir livecd.ubuntu-cpc.wsl.rootfs.manifest
|
||||
teardown_mountpoint $rootfs_dir
|
||||
|
||||
# remove attributes not supported by WSL's tar
|
||||
if [ -d $rootfs_dir/var/log/journal ]; then
|
||||
setfattr -x system.posix_acl_access $rootfs_dir/var/log/journal
|
||||
setfattr -x system.posix_acl_default $rootfs_dir/var/log/journal
|
||||
fi
|
||||
|
||||
# The reason not using just tar .. -C $rootfs_dir . is that using '.' was found
|
||||
# not working once and checking if using the simpler command is safe needs
|
||||
# verification of the app installation on all Windows 10 builds we support
|
||||
# with WSL.
|
||||
cd $rootfs_dir
|
||||
tar --xattrs --sort=name -czf ../livecd.ubuntu-cpc.wsl.rootfs.tar.gz *
|
||||
cd ..
|
||||
|
||||
rm -rf $rootfs_dir
|
@ -31,10 +31,19 @@ to this:
|
||||
depends disk-image
|
||||
depends extra-settings
|
||||
extra/cloudB.binary
|
||||
provides livecd.ubuntu-cpc.disk-kvm.img
|
||||
provides livecd.ubuntu-cpc.disk-kvm.manifest
|
||||
|
||||
Where "disk-image" and "extra-settings" may list scripts and dependencies which
|
||||
are to be processed before the script "extra/cloudB.binary" is called.
|
||||
|
||||
The "provides" directive defines a file that the hook creates; it can be
|
||||
specified multiple times. The field is used by this script to generate a list
|
||||
of output files created explicitly by the named image targets. The list is
|
||||
saved to the "explicit_provides" file in the hooks output directory. In
|
||||
the case of the "all" target this list would be empty. This list is
|
||||
consumed by the "remove-implicit-artifacts" which is run at the end of the build.
|
||||
|
||||
ACHTUNG: live build runs scripts with the suffix ".chroot" in a batch separate
|
||||
from scripts ending in ".binary". Even if you arrange them interleaved in your
|
||||
series files, the chroot scripts will be run before the binary scripts.
|
||||
@ -74,6 +83,7 @@ class MakeHooks:
|
||||
self._quiet = quiet
|
||||
self._hooks_list = []
|
||||
self._included = set()
|
||||
self._provides = []
|
||||
|
||||
def reset(self):
|
||||
"""Reset the internal state allowing instance to be reused for
|
||||
@ -120,8 +130,9 @@ class MakeHooks:
|
||||
e.g. "vmdk" or "vagrant".
|
||||
"""
|
||||
self.collect_chroot_hooks()
|
||||
self.collect_binary_hooks(image_sets)
|
||||
self.collect_binary_hooks(image_sets, explicit_sets=True)
|
||||
self.create_symlinks()
|
||||
self.create_explicit_provides()
|
||||
|
||||
def collect_chroot_hooks(self):
|
||||
"""Chroot hooks are numbered and not explicitly mentioned in series
|
||||
@ -139,7 +150,7 @@ class MakeHooks:
|
||||
continue
|
||||
self._hooks_list.append(os.path.join("chroot", entry))
|
||||
|
||||
def collect_binary_hooks(self, image_sets):
|
||||
def collect_binary_hooks(self, image_sets, explicit_sets=False):
|
||||
"""Search the series files for the given image_sets and parse them
|
||||
and their dependencies to generate a list of hook scripts to be run
|
||||
during image build.
|
||||
@ -150,6 +161,11 @@ class MakeHooks:
|
||||
|
||||
Populates the internal list of paths to hook scripts in the order in
|
||||
which the scripts are to be run.
|
||||
|
||||
If "explicit_sets" is True, the files specified on lines starting
|
||||
with "provides" will be added to self._provides to track explicit
|
||||
output artifacts. This is only True for the initial images_sets
|
||||
list, dependent image sets should set this to False.
|
||||
"""
|
||||
for image_set in image_sets:
|
||||
series_file = self.find_series_file(image_set)
|
||||
@ -161,8 +177,9 @@ class MakeHooks:
|
||||
with open(series_file, "r", encoding="utf-8") as fp:
|
||||
for line in fp:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
|
||||
m = re.match(r"^\s*depends\s+(\S+.*)$", line)
|
||||
if m:
|
||||
include_set = m.group(1)
|
||||
@ -171,6 +188,13 @@ class MakeHooks:
|
||||
self._included.add(include_set)
|
||||
self.collect_binary_hooks([include_set,])
|
||||
continue
|
||||
|
||||
m = re.match(r"^\s*provides\s+(\S+.*)$", line)
|
||||
if m:
|
||||
if explicit_sets:
|
||||
self._provides.append(m.group(1))
|
||||
continue
|
||||
|
||||
if not line in self._hooks_list:
|
||||
self._hooks_list.append(line)
|
||||
|
||||
@ -195,13 +219,32 @@ class MakeHooks:
|
||||
hook_basename = m.group("basename")
|
||||
|
||||
linkname = ("%03d-" % counter) + hook_basename
|
||||
linksrc = os.path.join(self._hooks_dir, linkname)
|
||||
linkdest = os.path.relpath(os.path.join(self._script_dir, hook),
|
||||
linkdest = os.path.join(self._hooks_dir, linkname)
|
||||
linksrc = os.path.relpath(os.path.join(self._script_dir, hook),
|
||||
self._hooks_dir)
|
||||
|
||||
if not self._quiet:
|
||||
print("[HOOK] %s => %s" % (linkname, hook))
|
||||
os.symlink(linkdest, linksrc)
|
||||
os.symlink(linksrc, linkdest)
|
||||
|
||||
def create_explicit_provides(self):
|
||||
"""
|
||||
Create a file named "explicit_provides" in self._script_dir
|
||||
listing all files named on "provides" in the series files of
|
||||
targets explicitly named by the user. The file is created but
|
||||
left empty if there are no explict "provides" keywords in the
|
||||
targets (this is the case for 'all')
|
||||
"""
|
||||
with open(os.path.join(self._script_dir, "explicit_provides"), "w",
|
||||
encoding="utf-8") as fp:
|
||||
empty = True
|
||||
for provides in self._provides:
|
||||
if not self._quiet:
|
||||
print("[PROVIDES] %s" % provides)
|
||||
fp.write("%s\n" % provides)
|
||||
empty = False
|
||||
if not empty:
|
||||
fp.write('livecd.magic-proxy.log\n')
|
||||
|
||||
def cli(self, args):
|
||||
"""Command line interface to the hooks generator."""
|
||||
|
41
live-build/ubuntu-cpc/hooks.d/remove-implicit-artifacts
Executable file
41
live-build/ubuntu-cpc/hooks.d/remove-implicit-artifacts
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python3
|
||||
#-*- encoding: utf-8 -*-
|
||||
"""
|
||||
Remove output files not created by explicitly specified image targets
|
||||
|
||||
This uses the 'explicit_provides' file generated by the 'make-hooks'
|
||||
script. If the file is empty, all output will be saved.
|
||||
"""
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
|
||||
if __name__ == "__main__":
|
||||
print('Running {}'.format(__file__))
|
||||
scriptname = os.path.basename(__file__)
|
||||
explicit = set()
|
||||
with open('./config/hooks.d/explicit_provides', 'r',
|
||||
encoding='utf-8') as fp:
|
||||
for filename in fp:
|
||||
explicit.add(filename.rstrip())
|
||||
|
||||
if not explicit:
|
||||
print('{}: explicit_provides is empty. '
|
||||
'All binary output will be included'.format(scriptname))
|
||||
sys.exit(0)
|
||||
|
||||
all = set(glob.glob('livecd.ubuntu-cpc.*'))
|
||||
implicit = all - explicit
|
||||
|
||||
print('{}: all artifacts considered: {}'.format(scriptname, all))
|
||||
print('{}: explict artifacts to keep: {}'.format(scriptname, explicit))
|
||||
print('{}: implicit artifacts to remove: {}'.format(scriptname, implicit))
|
||||
|
||||
for file in implicit:
|
||||
if os.path.islink(file):
|
||||
print('{}: unlinking {}'.format(scriptname, file))
|
||||
os.unlink(file)
|
||||
elif os.path.isfile(file):
|
||||
print('{}: removing {} '
|
||||
'{} bytes'.format(scriptname, file, os.stat(file).st_size))
|
||||
os.remove(file)
|
1
live-build/ubuntu/hooks/033-disk-image-uefi.binary
Symbolic link
1
live-build/ubuntu/hooks/033-disk-image-uefi.binary
Symbolic link
@ -0,0 +1 @@
|
||||
../../ubuntu-cpc/hooks.d/base/disk-image-uefi.binary
|
128
live-build/ubuntu/hooks/040-hyperv-desktop-images.binary
Normal file
128
live-build/ubuntu/hooks/040-hyperv-desktop-images.binary
Normal file
@ -0,0 +1,128 @@
|
||||
#!/bin/bash -eux
|
||||
|
||||
echo "Creating Hyper-V image with Desktop..."
|
||||
|
||||
case ${SUBPROJECT:-} in
|
||||
minimized)
|
||||
echo "We don't create minimized images for $0."
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
case "${ARCH}" in
|
||||
amd64)
|
||||
;;
|
||||
*)
|
||||
echo "Hyper-V only supports amd64";
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
IMAGE_STR="# DESKTOP_IMG: This file was created/modified by the Desktop Image build process"
|
||||
FS_LABEL="desktop-rootfs"
|
||||
|
||||
. config/functions
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
create_derivative uefi hyperv
|
||||
scratch_d=$(mktemp -d)
|
||||
mount_disk_image "${derivative_img}" "${scratch_d}"
|
||||
|
||||
cleanup_hyperv() {
|
||||
umount_disk_image ${scratch_d}
|
||||
rm -rf ${scratch_d}
|
||||
}
|
||||
trap cleanup_hyperv EXIT
|
||||
|
||||
# Perform customisations
|
||||
|
||||
chroot "${scratch_d}" apt-get update -y
|
||||
chroot "${scratch_d}" apt-get -y install xrdp linux-tools-virtual linux-cloud-tools-virtual
|
||||
|
||||
cat > ${scratch_d}/etc/modules-load.d/hyperv.conf << EOF
|
||||
${IMAGE_STR}
|
||||
hv_sock
|
||||
EOF
|
||||
|
||||
cat << EOF >> "${scratch_d}/etc/fstab"
|
||||
LABEL=$FS_LABEL / ext4 defaults 0 0
|
||||
EOF
|
||||
|
||||
# Customise xrdp
|
||||
|
||||
CHANGED_FILE_SUFFIX=.replaced-by-desktop-img-build
|
||||
|
||||
# use vsock transport.
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/use_vsock=false/use_vsock=true/g' "${scratch_d}/etc/xrdp/xrdp.ini"
|
||||
# use rdp security.
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/security_layer=negotiate/security_layer=rdp/g' "${scratch_d}/etc/xrdp/xrdp.ini"
|
||||
# remove encryption validation.
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/crypt_level=high/crypt_level=none/g' "${scratch_d}/etc/xrdp/xrdp.ini"
|
||||
# disable bitmap compression since its local its much faster
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/bitmap_compression=true/bitmap_compression=false/g' "${scratch_d}/etc/xrdp/xrdp.ini"
|
||||
|
||||
# Add script to setup the ubuntu session properly
|
||||
cat > "${scratch_d}/etc/xrdp/startubuntu.sh" << EOF
|
||||
#!/bin/sh
|
||||
${IMAGE_STR}
|
||||
export GNOME_SHELL_SESSION_MODE=ubuntu
|
||||
export XDG_CURRENT_DESKTOP=ubuntu:GNOME
|
||||
exec /etc/xrdp/startwm.sh
|
||||
EOF
|
||||
chmod a+x "${scratch_d}/etc/xrdp/startubuntu.sh"
|
||||
|
||||
# use the script to setup the ubuntu session
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/startwm/startubuntu/g' "${scratch_d}/etc/xrdp/sesman.ini"
|
||||
|
||||
# rename the redirected drives to 'shared-drives'
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/FuseMountName=thinclient_drives/FuseMountName=shared-drives/g' "${scratch_d}/etc/xrdp/sesman.ini"
|
||||
|
||||
# Changed the allowed_users
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's/allowed_users=console/allowed_users=anybody/g' "${scratch_d}/etc/X11/Xwrapper.config"
|
||||
|
||||
# Blacklist the vmw module
|
||||
cat > "${scratch_d}/etc/modprobe.d/blacklist_vmw_vsock_vmci_transport.conf" << EOF
|
||||
${IMAGE_STR}
|
||||
blacklist vmw_vsock_vmci_transport
|
||||
EOF
|
||||
|
||||
# Configure the policy xrdp session
|
||||
cat > ${scratch_d}/etc/polkit-1/localauthority/50-local.d/45-allow-colord.pkla << EOF
|
||||
${IMAGE_STR}
|
||||
[Allow Colord all Users]
|
||||
Identity=unix-user:*
|
||||
Action=org.freedesktop.color-manager.create-device;org.freedesktop.color-manager.create-profile;org.freedesktop.color-manager.delete-device;org.freedesktop.color-manager.delete-profile;org.freedesktop.color-manager.modify-device;org.freedesktop.color-manager.modify-profile
|
||||
ResultAny=no
|
||||
ResultInactive=no
|
||||
ResultActive=yes
|
||||
EOF
|
||||
|
||||
sed -i${CHANGED_FILE_SUFFIX} -e 's|After=|ConditionPathExists=!/var/lib/oem-config/run\nAfter=|g' "${scratch_d}/lib/systemd/system/xrdp.service"
|
||||
|
||||
# End xrdp customisation
|
||||
|
||||
# Don't run gnome-initial-setup from gdm
|
||||
sed -i${CHANGED_FILE_SUFFIX} "s|#WaylandEnable=false|#WaylandEnable=false\nInitialSetupEnable=false|" "${scratch_d}/etc/gdm3/custom.conf"
|
||||
chroot "${scratch_d}" /usr/sbin/useradd -d /home/oem -m -N -u 29999 oem
|
||||
chroot "${scratch_d}" /usr/sbin/oem-config-prepare --quiet
|
||||
touch "${scratch_d}/var/lib/oem-config/run"
|
||||
|
||||
chroot "${scratch_d}" apt-get clean
|
||||
|
||||
# End customisations
|
||||
|
||||
cleanup_hyperv
|
||||
trap - EXIT
|
||||
|
||||
raw_img=binary/boot/disk-hyperv-uefi.ext4
|
||||
vhd_img=livecd.ubuntu-desktop-hyperv.vhdx
|
||||
|
||||
qemu-img convert -O vhdx "$raw_img" "$vhd_img"
|
||||
rm "$raw_img"
|
||||
|
||||
apt-get install -y zip
|
||||
zip "$vhd_img.zip" "$vhd_img"
|
||||
rm "$vhd_img"
|
9
live-build/ubuntu/includes.chroot/etc/hosts
Normal file
9
live-build/ubuntu/includes.chroot/etc/hosts
Normal file
@ -0,0 +1,9 @@
|
||||
127.0.0.1 localhost.localdomain localhost
|
||||
::1 localhost6.localdomain6 localhost6
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
ff02::3 ip6-allhosts
|
@ -843,6 +843,7 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||
"""Return an HTTP error status and a message in the response body."""
|
||||
self.send_response(status)
|
||||
self.send_header("Content-Type", "text/plain; charset=utf-8")
|
||||
self.end_headers()
|
||||
self.wfile.write(message.encode("utf-8"))
|
||||
|
||||
|
||||
|
222
snap-tool
222
snap-tool
@ -28,6 +28,7 @@ import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
|
||||
@ -50,6 +51,178 @@ class SnapAssertionError(SnapError):
|
||||
pass
|
||||
|
||||
|
||||
class ExpBackoffHTTPClient:
|
||||
"""This class is an abstraction layer on top of urllib with additional
|
||||
retry logic for more reliable downloads."""
|
||||
|
||||
class Request:
|
||||
"""This is a convenience wrapper around urllib.request."""
|
||||
|
||||
def __init__(self, request, do_retry, base_interval, num_tries):
|
||||
"""
|
||||
:param request:
|
||||
An urllib.request.Request instance.
|
||||
:param do_retry:
|
||||
Whether to enable the exponential backoff and retry logic.
|
||||
:param base_interval:
|
||||
The initial interval to sleep after a failed attempt.
|
||||
:param num_tries:
|
||||
How many attempts to make.
|
||||
"""
|
||||
self._request = request
|
||||
self._do_retry = do_retry
|
||||
self._base_interval = base_interval
|
||||
self._num_tries = num_tries
|
||||
self._response = None
|
||||
|
||||
def open(self):
|
||||
"""Open the connection."""
|
||||
if not self._response:
|
||||
self._response = self._retry_urlopen()
|
||||
|
||||
def close(self):
|
||||
"""Close the connection."""
|
||||
if self._response:
|
||||
self._response.close()
|
||||
self._response = None
|
||||
|
||||
def data(self):
|
||||
"""Return the raw response body."""
|
||||
with self:
|
||||
return self.read()
|
||||
|
||||
def json(self):
|
||||
"""Return the deserialized response body interpreted as JSON."""
|
||||
return json.loads(self.data(), encoding="utf-8")
|
||||
|
||||
def text(self):
|
||||
"""Return the response body as a unicode string."""
|
||||
encoding = "utf-8"
|
||||
|
||||
with self:
|
||||
content_type = self._response.getheader("Content-Type", "")
|
||||
|
||||
if content_type == "application/json":
|
||||
encoding = "utf-8"
|
||||
else:
|
||||
m = re.match(r"text/\S+;\s*charset=(?P<charset>\S+)",
|
||||
content_type)
|
||||
if m:
|
||||
encoding=m.group("charset")
|
||||
|
||||
return self.read().decode(encoding)
|
||||
|
||||
def read(self, size=None):
|
||||
"""Read size bytes from the response. If size if not set, the
|
||||
complete response body is read in."""
|
||||
return self._response.read(size)
|
||||
|
||||
def __enter__(self):
|
||||
"""Make this class a context manager."""
|
||||
self.open()
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
"""Make this class a context manager."""
|
||||
self.close()
|
||||
|
||||
def _retry_urlopen(self):
|
||||
"""Try to open the HTTP connection as many times as configured
|
||||
through the constructor. Every time an error occurs, double the
|
||||
time to wait until the next attempt."""
|
||||
for attempt in range(self._num_tries):
|
||||
try:
|
||||
return urllib.request.urlopen(self._request)
|
||||
except Exception as e:
|
||||
if isinstance(e, urllib.error.HTTPError) and e.code < 500:
|
||||
raise
|
||||
if attempt >= self._num_tries - 1:
|
||||
raise
|
||||
sys.stderr.write(
|
||||
"WARNING: failed to open URL '{}': {}\n"
|
||||
.format(self._request.full_url, str(e))
|
||||
)
|
||||
else:
|
||||
break
|
||||
|
||||
sleep_interval = self._base_interval * 2**attempt
|
||||
sys.stderr.write(
|
||||
"Retrying HTTP request in {} seconds...\n"
|
||||
.format(sleep_interval)
|
||||
)
|
||||
time.sleep(sleep_interval)
|
||||
|
||||
|
||||
def __init__(self, do_retry=True, base_interval=2, num_tries=8):
|
||||
"""
|
||||
:param do_retry:
|
||||
Whether to enable the retry logic.
|
||||
:param base_interval:
|
||||
The initial interval to sleep after a failed attempt.
|
||||
:param num_tries:
|
||||
How many attempts to make.
|
||||
"""
|
||||
self._do_retry = do_retry
|
||||
self._base_interval = base_interval
|
||||
self._num_tries = num_tries if do_retry else 1
|
||||
|
||||
def get(self, url, headers=None):
|
||||
"""Create a GET request that can be used to retrieve the resource
|
||||
at the given URL.
|
||||
|
||||
:param url:
|
||||
An HTTP URL.
|
||||
:param headers:
|
||||
A dictionary of extra headers to send along.
|
||||
:return:
|
||||
An ExpBackoffHTTPClient.Request instance.
|
||||
"""
|
||||
return self._prepare_request(url, headers=headers)
|
||||
|
||||
def post(self, url, data=None, json=None, headers=None):
|
||||
"""Create a POST request that can be used to submit data to the
|
||||
endpoint at the given URL."""
|
||||
return self._prepare_request(
|
||||
url, data=data, json_data=json, headers=headers
|
||||
)
|
||||
|
||||
def _prepare_request(self, url, data=None, json_data=None, headers=None):
|
||||
"""Prepare a Request instance that can be used to retrieve data from
|
||||
and/or send data to the endpoint at the given URL.
|
||||
|
||||
:param url:
|
||||
An HTTP URL.
|
||||
:param data:
|
||||
Raw binary data to send along in the request body.
|
||||
:param json_data:
|
||||
A Python data structure to be serialized and sent out in JSON
|
||||
format.
|
||||
:param headers:
|
||||
A dictionary of extra headers to send along.
|
||||
:return:
|
||||
An ExpBackoffHTTPClient.Request instance.
|
||||
"""
|
||||
if data is not None and json_data is not None:
|
||||
raise ValueError(
|
||||
"Parameters 'data' and 'json_data' are mutually exclusive."
|
||||
)
|
||||
|
||||
if json_data:
|
||||
data = json.dumps(json_data, ensure_ascii=False)
|
||||
if headers is None:
|
||||
headers = {}
|
||||
headers["Content-Type"] = "application/json"
|
||||
if isinstance(data, str):
|
||||
data = data.encode("utf-8")
|
||||
|
||||
return ExpBackoffHTTPClient.Request(
|
||||
urllib.request.Request(url, data=data, headers=headers or {}),
|
||||
self._do_retry,
|
||||
self._base_interval,
|
||||
self._num_tries
|
||||
)
|
||||
|
||||
|
||||
class Snap:
|
||||
"""This class provides methods to retrieve information about a snap and
|
||||
download it together with its assertions."""
|
||||
@ -115,13 +288,17 @@ class Snap:
|
||||
"Snap-CDN": "none",
|
||||
})
|
||||
|
||||
request = urllib.request.Request(snap_download_url, headers=headers)
|
||||
|
||||
if not skip_snap_download:
|
||||
with urllib.request.urlopen(request) as response, \
|
||||
open(snap_filename, "wb+") as fp:
|
||||
http_client = ExpBackoffHTTPClient()
|
||||
response = http_client.get(snap_download_url, headers=headers)
|
||||
with response, open(snap_filename, "wb+") as fp:
|
||||
shutil.copyfileobj(response, fp)
|
||||
|
||||
if os.path.getsize(snap_filename) != snap_byte_size:
|
||||
raise SnapError(
|
||||
"The downloaded snap does not have the expected size."
|
||||
)
|
||||
|
||||
if not download_assertions:
|
||||
return
|
||||
|
||||
@ -193,16 +370,20 @@ class Snap:
|
||||
elif self._cohort_key:
|
||||
data["actions"][0]["cohort-key"] = self._cohort_key
|
||||
|
||||
request_json = json.dumps(data, ensure_ascii=False).encode("utf-8")
|
||||
|
||||
try:
|
||||
response_dict = self._do_snapcraft_request(path, data=request_json)
|
||||
response_dict = self._do_snapcraft_request(path, json_data=data)
|
||||
except SnapCraftError as e:
|
||||
raise SnapError("failed to get details for '{}': {}"
|
||||
.format(self._name, str(e)))
|
||||
|
||||
snap_data = response_dict["results"][0]
|
||||
|
||||
if snap_data.get("result") == "error":
|
||||
raise SnapError(
|
||||
"failed to get details for '{}': {}"
|
||||
.format(self._name, snap_data.get("error", {}).get("message"))
|
||||
)
|
||||
|
||||
# Have "base" initialized to something meaningful.
|
||||
if self.is_core_snap():
|
||||
snap_data["snap"]["base"] = ""
|
||||
@ -296,40 +477,29 @@ class Snap:
|
||||
"Accept": "application/x.ubuntu.assertion",
|
||||
}
|
||||
|
||||
request = urllib.request.Request(url, headers=headers)
|
||||
|
||||
http_client = ExpBackoffHTTPClient()
|
||||
try:
|
||||
with urllib.request.urlopen(request) as response:
|
||||
body = response.read()
|
||||
with http_client.get(url, headers=headers) as response:
|
||||
return response.text()
|
||||
except urllib.error.HTTPError as e:
|
||||
raise SnapAssertionError(str(e))
|
||||
|
||||
return body.decode("utf-8")
|
||||
|
||||
def _do_snapcraft_request(self, path, data=None):
|
||||
def _do_snapcraft_request(self, path, json_data=None):
|
||||
url = self._snapcraft_url + "/" + path
|
||||
|
||||
headers = {
|
||||
"Snap-Device-Series": str(self._series),
|
||||
"Snap-Device-Architecture": self._arch,
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
request = urllib.request.Request(url, data=data, headers=headers)
|
||||
|
||||
http_client = ExpBackoffHTTPClient()
|
||||
try:
|
||||
with urllib.request.urlopen(request) as response:
|
||||
body = response.read()
|
||||
response = http_client.post(url, json=json_data, headers=headers)
|
||||
with response:
|
||||
return response.json()
|
||||
except urllib.error.HTTPError as e:
|
||||
raise SnapCraftError(str(e))
|
||||
|
||||
try:
|
||||
response_data = json.loads(body, encoding="utf-8")
|
||||
except json.JSONDecodeError as e:
|
||||
raise SnapCraftError("failed to decode response body: " + str(e))
|
||||
|
||||
return response_data
|
||||
|
||||
|
||||
class SnapCli:
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user