You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1124 lines
37 KiB
1124 lines
37 KiB
# vi: ts=4 expandtab syntax=sh
|
|
|
|
# default imagesize = 2252*1024**2 = 2.2G (the current size we ship)
|
|
imagesize=${IMAGE_SIZE:-2361393152}
|
|
fs_label="${FS_LABEL:-rootfs}"
|
|
|
|
rootfs_dev_mapper=
|
|
loop_device=
|
|
loop_raw=
|
|
backing_img=
|
|
|
|
clean_loops() {
|
|
local kpartx_ret
|
|
local kpartx_stdout
|
|
|
|
if [ -n "${backing_img}" ]; then
|
|
# sync before removing loop to avoid "Device or resource busy" errors
|
|
sync
|
|
kpartx_ret=""
|
|
kpartx_stdout=$(kpartx -v -d "${backing_img}") || kpartx_ret=$?
|
|
echo "$kpartx_stdout"
|
|
if [ -n "$kpartx_ret" ]; then
|
|
if echo "$kpartx_stdout" | grep -q "loop deleted"; then
|
|
echo "Suppressing kpartx returning error (#860894)"
|
|
else
|
|
exit $kpartx_ret
|
|
fi
|
|
fi
|
|
unset backing_img
|
|
fi
|
|
|
|
if [ -z "${rootfs_dev_mapper}" ]; then
|
|
return 0
|
|
fi
|
|
|
|
unset loop_device
|
|
unset loop_raw
|
|
unset rootfs_dev_mapper
|
|
}
|
|
|
|
create_empty_disk_image() {
|
|
# Prepare an empty disk image
|
|
dd if=/dev/zero of="$1" bs=1 count=0 seek="${imagesize}"
|
|
}
|
|
|
|
create_manifest() {
|
|
local chroot_root=${1}
|
|
local target_file=${2}
|
|
echo "create_manifest chroot_root: ${chroot_root}"
|
|
dpkg-query --show --admindir="${chroot_root}/var/lib/dpkg" > ${target_file}
|
|
echo "create_manifest call to dpkg-query finished."
|
|
./config/snap-seed-parse "${chroot_root}" "${target_file}"
|
|
echo "create_manifest call to snap_seed_parse finished."
|
|
if [ "$PROJECT" = ubuntu-cpc ]; then
|
|
echo "create_manifest creating file listing."
|
|
local target_filelist=${2%.manifest}.filelist
|
|
(cd "${chroot_root}" && find -xdev) > "${target_filelist}"
|
|
fi
|
|
echo "create_manifest finished"
|
|
}
|
|
|
|
make_ext4_partition() {
|
|
device="$1"
|
|
label=${fs_label:+-L "${fs_label}"}
|
|
mkfs.ext4 -F -b 4096 -i 8192 -m 0 ${label} -E resize=536870912 "$device"
|
|
}
|
|
|
|
mount_image() {
|
|
trap clean_loops EXIT
|
|
backing_img="$1"
|
|
local rootpart="$2"
|
|
kpartx_mapping="$(kpartx -s -v -a ${backing_img})"
|
|
|
|
# Find the loop device
|
|
loop_p1="$(echo -e ${kpartx_mapping} | head -n1 | awk '{print$3}')"
|
|
loop_device="/dev/${loop_p1%p[0-9]*}"
|
|
if [ ! -b ${loop_device} ]; then
|
|
echo "unable to find loop device for ${backing_img}"
|
|
exit 1
|
|
fi
|
|
|
|
# Find the rootfs location
|
|
rootfs_dev_mapper="/dev/mapper/${loop_p1%%[0-9]}${rootpart}"
|
|
if [ ! -b "${rootfs_dev_mapper}" ]; then
|
|
echo "${rootfs_dev_mapper} is not a block device";
|
|
exit 1
|
|
fi
|
|
|
|
# Add some information to the debug logs
|
|
echo "Mounted disk image ${backing_img} to ${rootfs_dev_mapper}"
|
|
blkid ${rootfs_dev_mapper}
|
|
|
|
return 0
|
|
}
|
|
|
|
setup_mountpoint() {
|
|
local mountpoint="$1"
|
|
|
|
if [ ! -c /dev/mem ]; then
|
|
mknod -m 660 /dev/mem c 1 1
|
|
chown root:kmem /dev/mem
|
|
fi
|
|
|
|
mount --rbind /dev "$mountpoint/dev"
|
|
mount proc-live -t proc "$mountpoint/proc"
|
|
mount sysfs-live -t sysfs "$mountpoint/sys"
|
|
mount securityfs -t securityfs "$mountpoint/sys/kernel/security"
|
|
# Provide more up to date apparmor features, matching target kernel
|
|
mount -o bind /usr/share/livecd-rootfs/live-build/apparmor/generic "$mountpoint/sys/kernel/security/apparmor/features/"
|
|
mount -o bind /usr/share/livecd-rootfs/live-build/seccomp/generic.actions_avail "$mountpoint/proc/sys/kernel/seccomp/actions_avail"
|
|
mount -t tmpfs none "$mountpoint/tmp"
|
|
mount -t tmpfs none "$mountpoint/var/lib/apt/lists"
|
|
mount -t tmpfs none "$mountpoint/var/cache/apt"
|
|
mv "$mountpoint/etc/resolv.conf" resolv.conf.tmp
|
|
cp /etc/resolv.conf "$mountpoint/etc/resolv.conf"
|
|
mv "$mountpoint/etc/nsswitch.conf" nsswitch.conf.tmp
|
|
sed 's/systemd//g' nsswitch.conf.tmp > "$mountpoint/etc/nsswitch.conf"
|
|
chroot "$mountpoint" apt-get update
|
|
|
|
}
|
|
|
|
teardown_mountpoint() {
|
|
# Reverse the operations from setup_mountpoint
|
|
local mountpoint="$1"
|
|
|
|
# ensure we have exactly one trailing slash, and escape all slashes for awk
|
|
mountpoint_match=$(echo "$mountpoint" | sed -e's,/$,,; s,/,\\/,g;')'\/'
|
|
# sort -r ensures that deeper mountpoints are unmounted first
|
|
for submount in $(awk </proc/self/mounts "\$2 ~ /$mountpoint_match/ \
|
|
{ print \$2 }" | LC_ALL=C sort -r); do
|
|
mount --make-private $submount
|
|
umount $submount
|
|
done
|
|
mv resolv.conf.tmp "$mountpoint/etc/resolv.conf"
|
|
mv nsswitch.conf.tmp "$mountpoint/etc/nsswitch.conf"
|
|
}
|
|
|
|
mount_partition() {
|
|
partition="$1"
|
|
mountpoint="$2"
|
|
|
|
mount "$partition" "$mountpoint"
|
|
setup_mountpoint "$mountpoint"
|
|
}
|
|
|
|
mount_overlay() {
|
|
lower="$1"
|
|
upper="$2"
|
|
work="$2/../work"
|
|
path="$3"
|
|
|
|
mkdir -p "$work"
|
|
mount -t overlay overlay \
|
|
-olowerdir="$lower",upperdir="$upper",workdir="$work" \
|
|
"$path"
|
|
}
|
|
|
|
get_lowerdirs_for_pass () {
|
|
# Returns the name of the lowerdir from the name of a pass
|
|
# $1 Name of the pass
|
|
local curpass="$1"
|
|
local lowerlayers=""
|
|
|
|
while :; do
|
|
curpass=$(get_parent_pass $curpass)
|
|
# We climbed up the tree to the root layer, we are done
|
|
[ -z "$curpass" ] && break
|
|
|
|
lowerlayers="${lowerlayers}:overlay.${curpass}"
|
|
done
|
|
echo "${lowerlayers#:}"
|
|
}
|
|
|
|
mount_disk_image() {
|
|
local disk_image=${1}
|
|
local mountpoint=${2}
|
|
mount_image ${disk_image} 1
|
|
mount_partition "${rootfs_dev_mapper}" $mountpoint
|
|
|
|
local uefi_dev="/dev/mapper${loop_device///dev/}p15"
|
|
if [ -b ${uefi_dev} -a -e $mountpoint/boot/efi ]; then
|
|
mount "${uefi_dev}" $mountpoint/boot/efi
|
|
fi
|
|
|
|
# This is needed to allow for certain operations
|
|
# such as updating grub and installing software
|
|
cat > $mountpoint/usr/sbin/policy-rc.d << EOF
|
|
#!/bin/sh
|
|
# ${IMAGE_STR}
|
|
echo "All runlevel operations denied by policy" >&2
|
|
exit 101
|
|
EOF
|
|
chmod 0755 $mountpoint/usr/sbin/policy-rc.d
|
|
|
|
}
|
|
|
|
umount_partition() {
|
|
local mountpoint=${1}
|
|
teardown_mountpoint $mountpoint
|
|
mount --make-private $mountpoint
|
|
umount $mountpoint
|
|
udevadm settle
|
|
# workaround for LP: 1960537
|
|
sleep 30
|
|
|
|
if [ -n "${rootfs_dev_mapper}" -a -b "${rootfs_dev_mapper}" ]; then
|
|
# buildd's don't have /etc/mtab symlinked
|
|
# /etc/mtab is needed in order zerofree space for ext4 filesystems
|
|
[ -e /etc/mtab ] || ln -s /proc/mounts /etc/mtab
|
|
|
|
# both of these are likely overkill, but it does result in slightly
|
|
# smaller ext4 filesystem
|
|
e2fsck -y -E discard ${rootfs_dev_mapper}
|
|
zerofree ${rootfs_dev_mapper}
|
|
fi
|
|
}
|
|
|
|
umount_disk_image() {
|
|
mountpoint="$1"
|
|
|
|
local uefi_dev="/dev/mapper${loop_device///dev/}p15"
|
|
if [ -e "$mountpoint/boot/efi" -a -b "$uefi_dev" ]; then
|
|
# zero fill free space in UEFI partition
|
|
cat < /dev/zero > "$mountpoint/boot/efi/bloat_file" 2> /dev/null || true
|
|
rm "$mountpoint/boot/efi/bloat_file"
|
|
mount --make-private "$mountpoint/boot/efi"
|
|
umount --detach-loop "$mountpoint/boot/efi"
|
|
fi
|
|
|
|
if [ -e $mountpoint/usr/sbin/policy-rc.d ]; then
|
|
rm $mountpoint/usr/sbin/policy-rc.d
|
|
fi
|
|
umount_partition $mountpoint
|
|
clean_loops
|
|
}
|
|
|
|
modify_vmdk_header() {
|
|
# Modify the VMDK headers so that both VirtualBox _and_ VMware can
|
|
# read the vmdk and import them.
|
|
|
|
vmdk_name="${1}"
|
|
descriptor=$(mktemp)
|
|
newdescriptor=$(mktemp)
|
|
|
|
# Extract the vmdk header for manipulation
|
|
dd if="${vmdk_name}" of="${descriptor}" bs=1 skip=512 count=1024
|
|
echo "Cat'ing original vmdk disk descriptor to console for debugging."
|
|
# cat header so we are aware of the original descriptor for debugging
|
|
cat $descriptor
|
|
|
|
# trim null bytes to treat as standard text file
|
|
tr -d '\000' < $descriptor > $newdescriptor
|
|
|
|
# remove the vmdk-stream-converter comment and replace with
|
|
# # Disk DescriptorFile. This is needed for Virtualbox
|
|
# remove the comments from vmdk-stream-converter which causes
|
|
# VirtualBox and others to fail VMDK validation
|
|
sed -i -e 's|# Description file.*|# Disk DescriptorFile|' \
|
|
-e '/# Believe this is random*/d' \
|
|
-e '/# Indicates no parent/d' \
|
|
-e '/# The Disk Data Base/d' \
|
|
${newdescriptor}
|
|
|
|
# add newline to newdescriptor
|
|
echo "" >> $newdescriptor
|
|
|
|
# add required tools version
|
|
echo -n 'ddb.toolsVersion = "2147483647"' >> $newdescriptor
|
|
|
|
echo "Cat'ing modified descriptor for debugging."
|
|
cat $newdescriptor
|
|
|
|
# diff original descriptor and new descriptor for debugging
|
|
# diff exits 1 if difference. pipefail not set so piping diff
|
|
# to cat prints diff and swallows exit 1
|
|
echo "Printing diff of original and new descriptors."
|
|
diff --text $descriptor $newdescriptor | cat
|
|
|
|
# The header must be 1024 or less before padding
|
|
if ! expr $(stat --format=%s ${newdescriptor}) \< 1025 > /dev/null 2>&1; then
|
|
echo "descriptor is too large, VMDK will be invalid!";
|
|
exit 1
|
|
fi
|
|
|
|
# reset newdescriptor to be 1024
|
|
truncate --no-create --size=1K $newdescriptor
|
|
|
|
# Overwrite the vmdk header with our new, modified one
|
|
dd conv=notrunc,nocreat \
|
|
if="${newdescriptor}" of="${vmdk_name}" \
|
|
bs=1 seek=512 count=1024
|
|
|
|
rm ${descriptor} ${newdescriptor}
|
|
}
|
|
|
|
create_vmdk() {
|
|
# There is no real good way to create a _compressed_ VMDK using open source
|
|
# tooling that works across multiple VMDK-capable platforms. This functions
|
|
# uses vmdk-stream-converter and then calls modify_vmdk_header to produce a
|
|
# compatible VMDK.
|
|
|
|
src="$1"
|
|
destination="$2"
|
|
size="${3:-10240}"
|
|
|
|
streamconverter="VMDKstream"
|
|
scratch_d=$(mktemp -d)
|
|
cp ${src} ${scratch_d}/resize.img
|
|
|
|
truncate --size=${size}M ${scratch_d}/resize.img
|
|
python3 -m ${streamconverter} ${scratch_d}/resize.img ${destination}
|
|
modify_vmdk_header ${destination}
|
|
|
|
qemu-img info ${destination}
|
|
rm -rf ${scratch_d}
|
|
}
|
|
|
|
create_derivative() {
|
|
# arg1 is the disk type
|
|
# arg2 is the new name
|
|
unset derivative_img
|
|
case ${1} in
|
|
uefi) disk_image="binary/boot/disk-uefi.ext4";
|
|
dname="${disk_image//-uefi/-$2-uefi}";;
|
|
*) disk_image="binary/boot/disk.ext4";
|
|
dname="${disk_image//.ext4/-$2.ext4}";;
|
|
esac
|
|
|
|
if [ ! -e ${disk_image} ]; then
|
|
echo "Did not find ${disk_image}!"; exit 1;
|
|
fi
|
|
|
|
cp ${disk_image} ${dname}
|
|
export derivative_img=${dname}
|
|
}
|
|
|
|
convert_to_qcow2() {
|
|
src="$1"
|
|
destination="$2"
|
|
qemu-img convert -c -O qcow2 -o compat=0.10 "$src" "$destination"
|
|
qemu-img info "$destination"
|
|
}
|
|
|
|
replace_grub_root_with_label() {
|
|
# When update-grub is run, it will detect the disks in the build system.
|
|
# Instead, we want grub to use the right labelled disk
|
|
CHROOT_ROOT="$1"
|
|
|
|
# If boot by partuuid has been requested, don't override.
|
|
if [ -f $CHROOT_ROOT/etc/default/grub.d/40-force-partuuid.cfg ] && \
|
|
grep -q ^GRUB_FORCE_PARTUUID= $CHROOT_ROOT/etc/default/grub.d/40-force-partuuid.cfg
|
|
then
|
|
return 0
|
|
fi
|
|
sed -i -e "s,root=[^ ]*,root=LABEL=${fs_label}," \
|
|
"$CHROOT_ROOT/boot/grub/grub.cfg"
|
|
}
|
|
|
|
|
|
# When running update-grub in a chroot on a build host, we don't want it to
|
|
# probe for disks or probe for other installed OSes. Extract common
|
|
# diversion wrappers, so this isn't reinvented differently for each image.
|
|
divert_grub() {
|
|
CHROOT_ROOT="$1"
|
|
|
|
# Don't divert all of grub-probe here; just the scripts we don't want
|
|
# running. Otherwise, you may be missing part-uuids for the search
|
|
# command, for example. ~cyphermox
|
|
|
|
chroot "$CHROOT_ROOT" dpkg-divert --local \
|
|
--divert /etc/grub.d/30_os-prober.dpkg-divert \
|
|
--rename /etc/grub.d/30_os-prober
|
|
|
|
# Divert systemd-detect-virt; /etc/kernel/postinst.d/zz-update-grub
|
|
# no-ops if we are in a container, and the launchpad farm runs builds
|
|
# in lxd. We therefore pretend that we're never in a container (by
|
|
# exiting 1).
|
|
chroot "$CHROOT_ROOT" dpkg-divert --local \
|
|
--rename /usr/bin/systemd-detect-virt
|
|
echo "exit 1" > "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
|
|
chmod +x "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
|
|
}
|
|
|
|
undivert_grub() {
|
|
CHROOT_ROOT="$1"
|
|
|
|
chroot "$CHROOT_ROOT" dpkg-divert --remove --local \
|
|
--divert /etc/grub.d/30_os-prober.dpkg-divert \
|
|
--rename /etc/grub.d/30_os-prober
|
|
|
|
if grep -q "^exit 1$" "$CHROOT_ROOT"/usr/bin/systemd-detect-virt; then
|
|
rm "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
|
|
fi
|
|
chroot "$CHROOT_ROOT" dpkg-divert --remove --local \
|
|
--rename /usr/bin/systemd-detect-virt
|
|
}
|
|
|
|
recreate_initramfs() {
|
|
# Regenerate the initramfs by running update-initramfs in the
|
|
# chroot at $1 and copying the generated initramfs
|
|
# around. Beware that this was written for a single use case
|
|
# (live-server) and may not work in all cases without
|
|
# tweaking...
|
|
# config/common must be sourced before calling this function.
|
|
CHROOT="$1"
|
|
# Start by cargo culting bits of lb_chroot_hacks:
|
|
if [ -n "$LB_INITRAMFS_COMPRESSION" ]; then
|
|
echo "COMPRESS=$LB_INITRAMFS_COMPRESSION" > "$CHROOT"/etc/initramfs-tools/conf.d/livecd-rootfs.conf
|
|
fi
|
|
chroot "$CHROOT" sh -c "${UPDATE_INITRAMFS_OPTIONS:-} update-initramfs -k all -t -u"
|
|
rm -rf "$CHROOT"/etc/initramfs-tools/conf.d/livecd-rootfs.conf
|
|
# Then bits of lb_binary_linux-image:
|
|
case "${LB_INITRAMFS}" in
|
|
casper)
|
|
DESTDIR="binary/casper"
|
|
;;
|
|
|
|
live-boot)
|
|
DESTDIR="binary/live"
|
|
;;
|
|
|
|
*)
|
|
DESTDIR="binary/boot"
|
|
;;
|
|
esac
|
|
mv "$CHROOT"/boot/initrd.img-* $DESTDIR
|
|
}
|
|
|
|
release_ver() {
|
|
# Return the release version number
|
|
distro-info --series="$LB_DISTRIBUTION" -r | awk '{ print $1 }'
|
|
}
|
|
|
|
# cribbed from cdimage, perhaps this should be a small helper script in germinate?
|
|
add_inheritance () {
|
|
case " $inherit " in
|
|
*" $1 "*)
|
|
;;
|
|
*)
|
|
inherit="${inherit:+$inherit }$1"
|
|
;;
|
|
esac
|
|
}
|
|
|
|
expand_inheritance () {
|
|
for seed in $(grep "^$1:" config/germinate-output/structure | cut -d: -f2); do
|
|
expand_inheritance "$seed"
|
|
done
|
|
add_inheritance "$1"
|
|
}
|
|
|
|
inheritance () {
|
|
inherit=
|
|
expand_inheritance "$1"
|
|
echo "$inherit"
|
|
}
|
|
|
|
_snap_post_process() {
|
|
# Look for the 'core' snap. If it is not present, assume that the image
|
|
# contains only snaps with bases >= core18. In that case snapd is
|
|
# preseeded. However, when 'core' is being installed and snapd has not
|
|
# been installed by a call to 'snap_preseed' (see below) then it is
|
|
# removed again.
|
|
local CHROOT_ROOT=$1
|
|
local SNAP_NAME=$2
|
|
|
|
local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
|
|
local snaps_dir="$seed_dir/snaps"
|
|
local seed_yaml="$seed_dir/seed.yaml"
|
|
local assertions_dir="$seed_dir/assertions"
|
|
local snapd_install_stamp="$seed_dir/.snapd-explicit-install-stamp"
|
|
|
|
case $SNAP_NAME in
|
|
core[0-9]*)
|
|
# If the 'core' snap is not present, assume we are coreXX-only and
|
|
# install the snapd snap.
|
|
if [ ! -f ${snaps_dir}/core_[0-9]*.snap ]; then
|
|
_snap_preseed $CHROOT_ROOT snapd stable
|
|
fi
|
|
;;
|
|
core)
|
|
# If the snapd snap has been seeded, but not marked as explicitly
|
|
# installed (see snap_preseed below), then remove it.
|
|
if [ -f ${snaps_dir}/snapd_[0-9]*.snap ] && \
|
|
[ ! -f "$snapd_install_stamp" ]
|
|
then
|
|
# Remove snap, assertions and entry in seed.yaml
|
|
rm -f ${snaps_dir}/snapd_[0-9]*.snap
|
|
rm -f ${assertions_dir}/snapd_[0-9]*.assert
|
|
sed -i -e'N;/name: snapd/,+2d' $seed_yaml
|
|
fi
|
|
;;
|
|
*)
|
|
# ignore
|
|
;;
|
|
esac
|
|
}
|
|
|
|
_snap_preseed() {
|
|
# Download the snap/assertion and add to the preseed
|
|
local CHROOT_ROOT=$1
|
|
local SNAP=$2
|
|
local SNAP_NAME=${SNAP%/*}
|
|
local CHANNEL=${3:?Snap channel must be specified}
|
|
|
|
local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
|
|
local snaps_dir="$seed_dir/snaps"
|
|
local seed_yaml="$seed_dir/seed.yaml"
|
|
local assertions_dir="$seed_dir/assertions"
|
|
|
|
# Download the snap & assertion
|
|
local snap_download_failed=0
|
|
|
|
# Preseed a snap only once
|
|
if [ -f ${snaps_dir}/${SNAP_NAME}_[0-9]*.snap ]; then
|
|
return
|
|
fi
|
|
|
|
sh -c "
|
|
set -x;
|
|
cd \"$CHROOT_ROOT/var/lib/snapd/seed\";
|
|
SNAPPY_STORE_NO_CDN=1 snap download \
|
|
--cohort="${COHORT_KEY:-}" \
|
|
--channel=\"$CHANNEL\" \"$SNAP_NAME\"" || snap_download_failed=1
|
|
if [ $snap_download_failed = 1 ] ; then
|
|
echo "If the channel ($CHANNEL) includes '*/ubuntu-##.##' track per "
|
|
echo "Ubuntu policy (ex. stable/ubuntu-18.04) the publisher will need "
|
|
echo "to temporarily create the channel/track to allow fallback during"
|
|
echo "download (ex. stable/ubuntu-18.04 falls back to stable if the"
|
|
echo "prior had been created in the past)."
|
|
exit 1
|
|
fi
|
|
|
|
mv -v $seed_dir/*.assert $assertions_dir
|
|
mv -v $seed_dir/*.snap $snaps_dir
|
|
|
|
# Pre-seed snap's base
|
|
case $SNAP_NAME in
|
|
snapd)
|
|
# snapd is self-contained, ignore base
|
|
;;
|
|
core|core[0-9][0-9])
|
|
# core and core## are self-contained, ignore base
|
|
;;
|
|
*)
|
|
# Determine which core snap is needed
|
|
local snap_info
|
|
|
|
# snap info doesn't have --channel, so must run agains the downloaded snap
|
|
snap_info=$(snap info --verbose ${snaps_dir}/${SNAP_NAME}_[0-9]*.snap)
|
|
|
|
if [ $? -ne 0 ]; then
|
|
echo "Failed to retrieve base of $SNAP_NAME!"
|
|
exit 1
|
|
fi
|
|
|
|
local snap_type=$(echo "$snap_info" | awk '/^type:/ { print $2 }')
|
|
|
|
if [ "$snap_type" != base ]; then
|
|
local core_snap=$(echo "$snap_info" | awk '/^base:/ {print $2}')
|
|
|
|
# If snap info does not list a base the default is 'core'
|
|
# which is now an error to use.
|
|
if [ -z "$core_snap" ]; then
|
|
if [ -z "$ALLOW_CORE_SNAP" ]; then
|
|
echo "Legacy snap with no base declaration found, refusing to install 'core' snap"
|
|
exit 1
|
|
else
|
|
echo "Legacy snap with no base declaration found, but \$ALLOW_CORE_SNAP set. continue (but FIX YOUR SNAPS!)"
|
|
core_snap=${core_snap:-core}
|
|
fi
|
|
fi
|
|
|
|
_snap_preseed $CHROOT_ROOT $core_snap stable
|
|
fi
|
|
;;
|
|
esac
|
|
|
|
# Add the snap to the seed.yaml
|
|
! [ -e $seed_yaml ] && echo "snaps:" > $seed_yaml
|
|
cat <<EOF >> $seed_yaml
|
|
-
|
|
name: ${SNAP_NAME}
|
|
channel: ${CHANNEL}
|
|
EOF
|
|
|
|
case ${SNAP} in */classic) echo " classic: true" >> $seed_yaml;; esac
|
|
|
|
echo -n " file: " >> $seed_yaml
|
|
(cd $snaps_dir; ls -1 ${SNAP_NAME}_*.snap) >> $seed_yaml
|
|
|
|
_snap_post_process $CHROOT_ROOT $SNAP_NAME
|
|
}
|
|
|
|
snap_prepare_assertions() {
|
|
# Configure basic snapd assertions
|
|
local CHROOT_ROOT=$1
|
|
# A colon-separated string of brand:model to be used for the image's model
|
|
# assertion
|
|
local CUSTOM_BRAND_MODEL=$2
|
|
|
|
local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
|
|
local snaps_dir="$seed_dir/snaps"
|
|
local assertions_dir="$seed_dir/assertions"
|
|
local model_assertion="$assertions_dir/model"
|
|
local account_key_assertion="$assertions_dir/account-key"
|
|
local account_assertion="$assertions_dir/account"
|
|
|
|
local brand="$(echo $CUSTOM_BRAND_MODEL | cut -d: -f 1)"
|
|
local model="$(echo $CUSTOM_BRAND_MODEL | cut -d: -f 2)"
|
|
|
|
# Get existing model and brand assertions to compare with new parameters
|
|
# For customized images, snap_prepare_assertions is called several times
|
|
# with different brand or model. In this case we want to overwrite
|
|
# existing brand and models.
|
|
local override_model_branch="false"
|
|
if [ -e "$model_assertion" ] ; then
|
|
existing_model=$(awk '/^model: / {print $2}' $model_assertion)
|
|
existing_brand=$(awk '/^brand-id: / {print $2}' $model_assertion)
|
|
|
|
if [ "$existing_model" != "$model" ] || [ "$existing_brand" != "$brand" ]; then
|
|
override_model_branch="true"
|
|
fi
|
|
fi
|
|
|
|
# Exit if assertions dir exists and we didn't change model or brand
|
|
if [ -d "$assertions_dir" ] && [ "$override_model_branch" = "false" ]; then
|
|
return
|
|
fi
|
|
|
|
mkdir -p "$assertions_dir"
|
|
mkdir -p "$snaps_dir"
|
|
|
|
# Clear the assertions if they already exist
|
|
if [ -e "$model_assertion" ] ; then
|
|
echo "snap_prepare_assertions: replacing $existing_brand:$existing_model with $brand:$model"
|
|
rm "$model_assertion"
|
|
rm "$account_key_assertion"
|
|
rm "$account_assertion"
|
|
fi
|
|
|
|
if ! [ -e "$model_assertion" ] ; then
|
|
snap known --remote model series=16 \
|
|
model=$model brand-id=$brand \
|
|
> "$model_assertion"
|
|
fi
|
|
|
|
if ! [ -e "$account_key_assertion" ] ; then
|
|
local account_key=$(sed -n -e's/sign-key-sha3-384: //p' \
|
|
< "$model_assertion")
|
|
snap known --remote account-key \
|
|
public-key-sha3-384="$account_key" \
|
|
> "$account_key_assertion"
|
|
fi
|
|
|
|
if ! [ -e "$account_assertion" ] ; then
|
|
local account=$(sed -n -e's/account-id: //p' < "$account_key_assertion")
|
|
snap known --remote account account-id=$account \
|
|
> "$account_assertion"
|
|
fi
|
|
}
|
|
|
|
snap_prepare() {
|
|
# Configure basic snapd assertions and pre-seeds the 'core' snap
|
|
local CHROOT_ROOT=$1
|
|
# Optional. If set, should be a colon-separated string of brand:model to be
|
|
# used for the image's model assertion
|
|
local CUSTOM_BRAND_MODEL=${2:-generic:generic-classic}
|
|
|
|
snap_prepare_assertions "$CHROOT_ROOT" "$CUSTOM_BRAND_MODEL"
|
|
}
|
|
|
|
snap_preseed() {
|
|
# Preseed a snap in the image (snap_prepare must be called once prior)
|
|
local CHROOT_ROOT=$1
|
|
# $2 can be in the form of snap_name/classic=track/risk/branch
|
|
local SNAP=$2
|
|
# strip CHANNEL specification
|
|
SNAP=${SNAP%=*}
|
|
# strip /classic confinement
|
|
local SNAP_NAME=${SNAP%/*}
|
|
# Seed from the specified channel (e.g. core18 latest/stable)
|
|
# Or Channel endcoded in the snap name (e.g. lxd=4.0/stable/ubuntu-20.04)
|
|
# Or Ubuntu policy default channel latest/stable/ubuntu-$(release_ver)
|
|
local CHANNEL=${3:-}
|
|
if [ -z "$CHANNEL" ]; then
|
|
case $2 in
|
|
*=*)
|
|
CHANNEL=${2#*=}
|
|
;;
|
|
*)
|
|
CHANNEL="stable/ubuntu-$(release_ver)"
|
|
;;
|
|
esac
|
|
fi
|
|
|
|
# At this point:
|
|
# SNAP_NAME is just the snap name
|
|
# SNAP is either $SNAP_NAME or $SNAP_NAME/classic for classic confined
|
|
# CHANNEL is the channel
|
|
|
|
if [ ! -e "$CHROOT_ROOT/var/lib/snapd/seed/assertions/model" ]; then
|
|
echo "ERROR: Snap model assertion not present, snap_prepare must be called"
|
|
exit 1
|
|
fi
|
|
|
|
_snap_preseed $CHROOT_ROOT $SNAP $CHANNEL
|
|
|
|
# Mark this image as having snapd installed explicitly.
|
|
case $SNAP_NAME in
|
|
snapd)
|
|
touch "$CHROOT_ROOT/var/lib/snapd/seed/.snapd-explicit-install-stamp"
|
|
;;
|
|
esac
|
|
|
|
# Do basic validation of generated snapd seed.yaml, doing it here
|
|
# means we catch all the places(tm) that snaps are added but the
|
|
# downside is that each time a snap is added the seed must be valid,
|
|
# i.e. snaps with bases need to add bases first etc
|
|
#
|
|
# Skip validation by setting SNAP_NO_VALIDATE_SEED=1.
|
|
if [ -z "${SNAP_NO_VALIDATE_SEED:-}" ]; then
|
|
snap_validate_seed "${CHROOT_ROOT}"
|
|
fi
|
|
}
|
|
|
|
snap_validate_seed() {
|
|
local CHROOT_ROOT=$1
|
|
|
|
if [ -e "${CHROOT_ROOT}/var/lib/snapd/seed/seed.yaml" ]; then
|
|
snap debug validate-seed "${CHROOT_ROOT}/var/lib/snapd/seed/seed.yaml"
|
|
/usr/lib/snapd/snap-preseed --reset $(realpath "${CHROOT_ROOT}")
|
|
/usr/lib/snapd/snap-preseed $(realpath "${CHROOT_ROOT}")
|
|
chroot "${CHROOT_ROOT}" apparmor_parser --skip-read-cache --write-cache --skip-kernel-load --verbose -j `nproc` /etc/apparmor.d
|
|
fi
|
|
}
|
|
|
|
snap_from_seed() {
|
|
local base_seed=$1
|
|
local out=$2
|
|
local all_snaps
|
|
local seeds_expanded
|
|
|
|
seeds_expanded=$(inheritance ${base_seed})
|
|
for seed in ${seeds_expanded}; do
|
|
echo "snap: considering ${seed}"
|
|
file=config/germinate-output/${seed}.snaps
|
|
[ -e "${file}" ] || continue
|
|
# extract the first column (snap package name) from germinate's output
|
|
# translate the human-readable "foo (classic)" into a
|
|
# more machine readable "foo/classic"
|
|
seed_snaps=$(sed -rn '1,/-----/d;/-----/,$d; s/(.*) \|.*/\1/; s, \(classic\),/classic,; p' "${file}")
|
|
for snap in ${seed_snaps}; do
|
|
echo "snap: found ${snap}"
|
|
all_snaps="${all_snaps:+${all_snaps} }${snap}"
|
|
done
|
|
done
|
|
if [ -n "${all_snaps}" ]; then
|
|
echo "${all_snaps}" > $out
|
|
fi
|
|
}
|
|
|
|
seed_from_task ()
|
|
{
|
|
# Retrieve the name of the seed from a task name
|
|
local task=$1
|
|
local seed
|
|
local seedfile
|
|
local seedfiles
|
|
|
|
seedfile="$(grep -lE "^Task-Key: +${task}\$" config/germinate-output/*seedtext|head -1)"
|
|
if [ -n "$seedfile" ]; then
|
|
basename $seedfile .seedtext
|
|
return
|
|
fi
|
|
|
|
seedfiles="$(grep -lE "^Task-Per-Derivative: *1\$" config/germinate-output/*seedtext)"
|
|
if [ -n "$seedfiles" ]; then
|
|
for seed in $(echo $seedfiles | xargs basename -s .seedtext); do
|
|
if [ ${PROJECT}-${seed} = $task ]; then
|
|
echo ${seed}
|
|
return
|
|
fi
|
|
done
|
|
fi
|
|
}
|
|
|
|
list_packages_from_seed () {
|
|
# Store all packages for a given seed, including its seed dependency
|
|
# $1: Name of the seed to expand to a package list
|
|
|
|
local all_seeds="$(inheritance $1)"
|
|
|
|
for seed in $all_seeds; do
|
|
head -n-2 config/germinate-output/${seed}.seed|tail -n+3|awk '{print $1}'
|
|
done|sort -u
|
|
}
|
|
|
|
subtract_package_lists() {
|
|
# Subtract a package list from another
|
|
#
|
|
# $1 source package list
|
|
# $2 Package list to subtract from source package list
|
|
local list1=$(mktemp)
|
|
local list2=$(mktemp)
|
|
|
|
list_packages_from_seed $1 > list1
|
|
list_packages_from_seed $2 > list2
|
|
comm -23 list1 list2
|
|
|
|
rm list1
|
|
rm list2
|
|
}
|
|
|
|
clean_debian_chroot() {
|
|
# remove crufty files that shouldn't be left in an image
|
|
rm -f chroot/var/cache/debconf/*-old chroot/var/lib/dpkg/*-old
|
|
Chroot chroot apt clean
|
|
# For the docker images we remove even more stuff.
|
|
if [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-base:minimized" ] || [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-oci:minimized" ]; then
|
|
# Remove apt lists (that are currently removed downstream
|
|
# anyway)
|
|
rm -rf chroot/var/lib/apt/lists/*
|
|
# Having device nodes in the docker image can cause problems
|
|
# (https://github.com/tianon/docker-brew-ubuntu-core/issues/62)
|
|
# so remove them. We only do this for docker out of an
|
|
# abundance of caution.
|
|
rm -rf chroot/dev/*
|
|
fi
|
|
}
|
|
|
|
configure_universe() {
|
|
if [ -f config/universe-enabled ]; then
|
|
# This is cargo-culted almost verbatim (with some syntax changes for
|
|
# preinstalled being slightly different in what it doesn't ask) from
|
|
# debian-installer's apt-setup:
|
|
|
|
cat > chroot/etc/apt/sources.list << EOF
|
|
# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
|
|
# newer versions of the distribution.
|
|
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION main restricted
|
|
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION main restricted
|
|
|
|
## Major bug fix updates produced after the final release of the
|
|
## distribution.
|
|
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates main restricted
|
|
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates main restricted
|
|
|
|
## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
|
|
## team. Also, please note that software in universe WILL NOT receive any
|
|
## review or updates from the Ubuntu security team.
|
|
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION universe
|
|
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION universe
|
|
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates universe
|
|
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates universe
|
|
|
|
## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
|
|
## team, and may not be under a free licence. Please satisfy yourself as to
|
|
## your rights to use the software. Also, please note that software in
|
|
## multiverse WILL NOT receive any review or updates from the Ubuntu
|
|
## security team.
|
|
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION multiverse
|
|
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION multiverse
|
|
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates multiverse
|
|
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-updates multiverse
|
|
|
|
## N.B. software from this repository may not have been tested as
|
|
## extensively as that contained in the main release, although it includes
|
|
## newer versions of some applications which may provide useful features.
|
|
## Also, please note that software in backports WILL NOT receive any review
|
|
## or updates from the Ubuntu security team.
|
|
deb $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-backports main restricted universe multiverse
|
|
# deb-src $LB_PARENT_MIRROR_BINARY $LB_DISTRIBUTION-backports main restricted universe multiverse
|
|
|
|
## Uncomment the following two lines to add software from Canonical's
|
|
## 'partner' repository.
|
|
## This software is not part of Ubuntu, but is offered by Canonical and the
|
|
## respective vendors as a service to Ubuntu users.
|
|
# deb http://archive.canonical.com/ubuntu $LB_DISTRIBUTION partner
|
|
# deb-src http://archive.canonical.com/ubuntu $LB_DISTRIBUTION partner
|
|
|
|
deb $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security main restricted
|
|
# deb-src $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security main restricted
|
|
deb $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security universe
|
|
# deb-src $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security universe
|
|
deb $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security multiverse
|
|
# deb-src $LB_PARENT_MIRROR_BINARY_SECURITY $LB_DISTRIBUTION-security multiverse
|
|
EOF
|
|
|
|
fi
|
|
}
|
|
|
|
configure_oci() {
|
|
# configure a chroot to be a OCI/docker container
|
|
# theses changes are taken from the current Dockerfile modifications done
|
|
# at https://github.com/tianon/docker-brew-ubuntu-core/blob/master/update.sh
|
|
|
|
local chroot=$1
|
|
local serial=$2
|
|
|
|
if [ ! -d "${chroot}" ]; then
|
|
echo "The chroot does not exist"
|
|
exit 1
|
|
fi
|
|
|
|
echo "==== Configuring OCI ===="
|
|
|
|
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L40-L48
|
|
echo '#!/bin/sh' > ${chroot}/usr/sbin/policy-rc.d
|
|
echo 'exit 101' >> ${chroot}/usr/sbin/policy-rc.d
|
|
Chroot ${chroot} "chmod +x /usr/sbin/policy-rc.d"
|
|
|
|
|
|
# Inject a build stamp into the image
|
|
mkdir -p ${chroot}/etc/cloud
|
|
cat > ${chroot}/etc/cloud/build.info << EOF
|
|
serial: $serial
|
|
EOF
|
|
|
|
|
|
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L54-L56
|
|
Chroot ${chroot} "dpkg-divert --local --rename --add /sbin/initctl"
|
|
cp -a ${chroot}/usr/sbin/policy-rc.d ${chroot}/sbin/initctl
|
|
sed -i 's/^exit.*/exit 0/' ${chroot}/sbin/initctl
|
|
|
|
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L71-L78
|
|
echo 'force-unsafe-io' > ${chroot}/etc/dpkg/dpkg.cfg.d/docker-apt-speedup
|
|
|
|
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L85-L105
|
|
echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' > ${chroot}/etc/apt/apt.conf.d/docker-clean
|
|
|
|
echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' >> ${chroot}/etc/apt/apt.conf.d/docker-clean
|
|
|
|
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' >> ${chroot}/etc/apt/apt.conf.d/docker-clean
|
|
|
|
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L109-L115
|
|
echo 'Acquire::Languages "none";' > ${chroot}/etc/apt/apt.conf.d/docker-no-languages
|
|
|
|
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L118-L130
|
|
echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > ${chroot}/etc/apt/apt.conf.d/docker-gzip-indexes
|
|
|
|
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L134-L151
|
|
echo 'Apt::AutoRemove::SuggestsImportant "false";' > ${chroot}/etc/apt/apt.conf.d/docker-autoremove-suggests
|
|
|
|
# delete all the apt list files since they're big and get stale quickly
|
|
rm -rf ${chroot}/var/lib/apt/lists/*
|
|
|
|
# verify that the APT lists files do not exist
|
|
Chroot chroot "apt-get indextargets" > indextargets.out
|
|
[ ! -s indextargets.out ]
|
|
rm indextargets.out
|
|
# (see https://bugs.launchpad.net/cloud-images/+bug/1699913)
|
|
|
|
# make systemd-detect-virt return "docker"
|
|
# See: https://github.com/systemd/systemd/blob/aa0c34279ee40bce2f9681b496922dedbadfca19/src/basic/virt.c#L434
|
|
mkdir -p ${chroot}/run/systemd
|
|
echo 'docker' > ${chroot}/run/systemd/container
|
|
|
|
rm -rf ${chroot}/var/cache/apt/*.bin
|
|
echo "==== Configuring OCI done ===="
|
|
}
|
|
|
|
configure_network_manager() {
|
|
# If the image pre-installs network-manager, let it manage all devices by
|
|
# default. Installing NM on an existing system only manages wifi and wwan via
|
|
# /usr/lib/NetworkManager/conf.d/10-globally-managed-devices.conf. When setting
|
|
# the global backend to NM, netplan overrides that file.
|
|
if [ -e chroot/usr/sbin/NetworkManager -a ! -f chroot/etc/netplan/01-network-manager-all.yaml ]; then
|
|
echo "===== Enabling all devices in NetworkManager ===="
|
|
mkdir -p chroot/etc/netplan
|
|
cat <<EOF > chroot/etc/netplan/01-network-manager-all.yaml
|
|
# Let NetworkManager manage all devices on this system
|
|
network:
|
|
version: 2
|
|
renderer: NetworkManager
|
|
EOF
|
|
else
|
|
echo "==== NetworkManager not installed ===="
|
|
fi
|
|
}
|
|
|
|
get_parent_pass () {
|
|
# return parent pass
|
|
# $1 name of the pass
|
|
# return parent pass name or '' if pass is root pass.
|
|
local pass="$1"
|
|
|
|
parent_pass=${pass%.*}
|
|
if [ "${parent_pass}" = "${pass}" ]; then
|
|
return
|
|
fi
|
|
echo ${pass%.*}
|
|
}
|
|
|
|
setenv_file () {
|
|
# Exposes an environment variable in a chroot
|
|
# $1 Name of the variable
|
|
# $2 Value of the variable
|
|
# $3 Path to the environment file of the chroot
|
|
local var="$1"
|
|
local val="$2"
|
|
local file="$3"
|
|
|
|
grep -v "^$var" $file || true > $file.new
|
|
echo "${var}=${val}" >> $file.new
|
|
mv $file.new $file
|
|
}
|
|
|
|
divert_update_initramfs () {
|
|
Chroot chroot "dpkg-divert --quiet --add \
|
|
--divert /usr/sbin/update-initramfs.REAL --rename \
|
|
/usr/sbin/update-initramfs"
|
|
cat > chroot/usr/sbin/update-initramfs <<'EOF'
|
|
#! /bin/sh
|
|
if [ $# != 1 ] || [ "$1" != -u ]; then
|
|
exec update-initramfs.REAL "$@"
|
|
fi
|
|
echo "update-initramfs: diverted by livecd-rootfs (will be called later)" >&2
|
|
|
|
exit 0
|
|
EOF
|
|
chmod +x chroot/usr/sbin/update-initramfs
|
|
}
|
|
|
|
undivert_update_initramfs () {
|
|
rm -f chroot/usr/sbin/update-initramfs
|
|
Chroot chroot "dpkg-divert --quiet --remove --rename \
|
|
/usr/sbin/update-initramfs"
|
|
}
|
|
|
|
is_root_layer () {
|
|
local pass=$1
|
|
if [ -z "$(get_parent_pass $pass)" ]; then
|
|
return 0
|
|
fi
|
|
return 1
|
|
}
|
|
|
|
is_live_layer () {
|
|
local pass=$1
|
|
for livepass in $LIVE_PASSES; do
|
|
[ "$livepass" != "$pass" ] && continue
|
|
return 0
|
|
done
|
|
return 1
|
|
}
|
|
|
|
setup_cidata() {
|
|
local cidata_dev=$1
|
|
local mountpoint=$(mktemp -d)
|
|
mkfs.vfat -F 32 -n CIDATA ${cidata_dev}
|
|
mount ${cidata_dev} ${mountpoint}
|
|
cp /usr/share/livecd-rootfs/live-build/cidata/* ${mountpoint}
|
|
cat >>${mountpoint}/meta-data.sample <<END
|
|
#instance-id: iid-$(openssl rand -hex 8)
|
|
|
|
END
|
|
umount ${mountpoint}
|
|
}
|
|
|
|
replace_kernel () {
|
|
mountpoint=$1
|
|
new_kernel=$2
|
|
|
|
# Install custom kernel (N.B. the trailing + retains linux-base during this
|
|
# operation)
|
|
env DEBIAN_FRONTEND=noninteractive chroot "${mountpoint}" apt-get \
|
|
remove --purge --assume-yes '^linux-.*' 'linux-base+'
|
|
env DEBIAN_FRONTEND=noninteractive chroot "${mountpoint}" apt-get \
|
|
update --assume-yes
|
|
env DEBIAN_FRONTEND=noninteractive chroot "${mountpoint}" apt-get \
|
|
install --assume-yes "${new_kernel}"
|
|
env DEBIAN_FRONTEND=noninteractive chroot "${mountpoint}" apt-get \
|
|
autoremove --purge --assume-yes
|
|
|
|
# If running a custom kernel, we should try to boot without an initramfs
|
|
# We do this by setting GRUB_FORCE_PARTUUID, which forces initramfs-less boot
|
|
force_boot_without_initramfs ${mountpoint}
|
|
}
|
|
|
|
track_initramfs_boot_fallback() {
|
|
mountpoint=$1
|
|
cat <<END > "${mountpoint}/etc/grub.d/01_track_initrdless_boot_fallback"
|
|
#! /bin/sh
|
|
# ${IMAGE_STR}
|
|
# This will detect if we attempt to boot with an initramfs and fail.
|
|
# In the case of a failure, initrdless_boot_fallback_triggered is set to
|
|
# a non-zero value in the grubenv. This value can be checked after boot
|
|
# by looking in /boot/grub/grubenv or by using the grub-editenv list command.
|
|
set -e
|
|
END
|
|
cat <<"END" >> "${mountpoint}/etc/grub.d/01_track_initrdless_boot_fallback"
|
|
cat <<"EOF"
|
|
if [ -n "${have_grubenv}" ]; then
|
|
if [ -n "${initrdfail}" ]; then
|
|
set initrdless_boot_fallback_triggered="${initrdfail}"
|
|
else
|
|
unset initrdless_boot_fallback_triggered
|
|
fi
|
|
save_env initrdless_boot_fallback_triggered
|
|
fi
|
|
EOF
|
|
END
|
|
chmod +x "${mountpoint}/etc/grub.d/01_track_initrdless_boot_fallback"
|
|
}
|
|
|
|
force_boot_without_initramfs() {
|
|
mountpoint=$1
|
|
|
|
partuuid=$(blkid -s PARTUUID -o value $(findmnt -n -o SOURCE --target "${mountpoint}"))
|
|
if [ -n "${partuuid}" ]; then
|
|
echo "Force booting without an initramfs..."
|
|
mkdir -p "${mountpoint}/etc/default/grub.d"
|
|
cat << EOF >> "${mountpoint}/etc/default/grub.d/40-force-partuuid.cfg"
|
|
# Force boot without an initramfs by setting GRUB_FORCE_PARTUUID
|
|
# Remove this line to enable boot with an initramfs
|
|
GRUB_FORCE_PARTUUID=${partuuid}
|
|
EOF
|
|
divert_grub "${mountpoint}"
|
|
chroot "${mountpoint}" update-grub
|
|
undivert_grub "${mountpoint}"
|
|
fi
|
|
}
|