# vi: ts=4 expandtab syntax=sh

# default imagesize = 2252*1024**2 = 2.2G (the current size we ship)
imagesize=${IMAGE_SIZE:-2361393152}
fs_label="${FS_LABEL:-rootfs}"

AUTOMATION_HEADER="# Automatically generated by installer build process"

rootfs_dev_mapper=
loop_device=
loop_raw=
backing_img=

clean_loops() {
    if [ -n "${loop_device}" ]; then
        # If something just finished writing to the device or a
        # partition (e.g. the zerofree in umount_partition) udev might
        # still be processing the device.
        udevadm settle
        sync
        losetup -v -d "${loop_device}"
        unset backing_img
    fi

    if [ -z "${rootfs_dev_mapper}" ]; then
        return 0
    fi

    unset loop_device
    unset loop_raw
    unset rootfs_dev_mapper
}

create_empty_disk_image() {
    # Prepare an empty disk image
    dd if=/dev/zero of="$1" bs=1 count=0 seek="${imagesize}"
}

create_manifest() {
    local chroot_root=${1}
    local target_file=${2}
    echo "create_manifest chroot_root: ${chroot_root}"
    dpkg-query --show --admindir="${chroot_root}/var/lib/dpkg" > ${target_file}
    echo "create_manifest call to dpkg-query finished."
    ./config/snap-seed-parse "${chroot_root}" "${target_file}"
    echo "create_manifest call to snap_seed_parse finished."
    if [ "$PROJECT" = ubuntu-cpc ]; then
        echo "create_manifest creating file listing."
        local target_filelist=${2%.manifest}.filelist
        (cd "${chroot_root}" && find -xdev) | sort > "${target_filelist}"
    fi
    echo "create_manifest finished"
}

make_ext4_partition() {
    device="$1"
    label=${fs_label:+-L "${fs_label}"}
    mkfs.ext4 -F -b 4096 -i 8192 -m 0 ${label} -E resize=536870912 "$device"
}

mount_image() {
    trap clean_loops EXIT
    backing_img="$1"
    local rootpart="$2"

    loop_device=$(losetup --show -f -P -v ${backing_img})

    if [ ! -b ${loop_device} ]; then
        echo "unable to find loop device for ${backing_img}"
        exit 1
    fi

    # As explained in excruciating detail in LP: #2045586, losetup
    # races with udev in a way that can cause partition device files
    # to briefly vanish. systemd docs say we can hold udev off by using
    # flocks: https://systemd.io/BLOCK_DEVICE_LOCKING/
    # `udevadm lock` isn't yet usable in Ubuntu, so we'll use flock for now

    # Find the rootfs location
    rootfs_dev_mapper="${loop_device}p${rootpart}"
    if flock -x ${loop_device} [ ! -b "${rootfs_dev_mapper}" ]; then
        echo "${rootfs_dev_mapper} is not a block device";
        exit 1
    fi

    # Add some information to the debug logs
    echo "Mounted disk image ${backing_img} to ${rootfs_dev_mapper}"
    flock -x ${loop_device} blkid ${rootfs_dev_mapper} \
    || echo "blkid failed; continuing"

    return 0
}

use_lp_archives_in_sourceslist(){
    # Use the build environment apt mirror during the build,
    # for both archive and security.
    # live-build does this in the chroot (lb_chroot_archives)
    # but not for the binary hooks
    #
    # To restore the sourceslist back to the original, call
    # recover_sourceslist

    mountpoint="${1}"
    . config/bootstrap  # For the LB_MIRROR_* variables
    if [ -e "${mountpoint}/etc/apt/sources.list.d/ubuntu.sources" ]; then
        MOUNTPOINT_BACKUP_UBUNTU_SOURCES="ubuntu.sources.tmp"
        cp -a "${mountpoint}/etc/apt/sources.list.d/ubuntu.sources" "${MOUNTPOINT_BACKUP_UBUNTU_SOURCES}"
        sed -i "s#http://archive.ubuntu.com/ubuntu#${LB_PARENT_MIRROR_CHROOT}#g" \
            "${mountpoint}/etc/apt/sources.list.d/ubuntu.sources"
        sed -i "s#http://security.ubuntu.com/ubuntu#${LB_PARENT_MIRROR_CHROOT}#g" \
            "${mountpoint}/etc/apt/sources.list.d/ubuntu.sources"

        sha256sum "${mountpoint}/etc/apt/sources.list.d/ubuntu.sources" > ubuntu.sources.sha
    fi
    if [ -e "${mountpoint}/etc/apt/sources.list" ]; then
        MOUNTPOINT_BACKUP_SOURCES_LIST="sources.list.tmp"
        cp -a "${mountpoint}/etc/apt/sources.list" "${MOUNTPOINT_BACKUP_SOURCES_LIST}"
        sed -i "s#http://archive.ubuntu.com/ubuntu#${LB_PARENT_MIRROR_CHROOT}#g" \
            "${mountpoint}/etc/apt/sources.list"
        sed -i "s#http://security.ubuntu.com/ubuntu#${LB_PARENT_MIRROR_CHROOT}#g" \
            "${mountpoint}/etc/apt/sources.list"

        sha256sum "${mountpoint}/etc/apt/sources.list" > sources.list.sha
    fi
}

recover_sourceslist(){
    # Remove the build environment apt mirror from the image

    # Check that the sources.list has not changed.  If it has changed then the
    # binary hook has modified the file that will be discarded.  If the build
    # fails here the binary hook needs to alter sources.list.tmp and regenerate
    # sources.list.sha

    mountpoint="${1}"
    if [ -e "${MOUNTPOINT_BACKUP_UBUNTU_SOURCES:-/doesnotexist}" ]; then
        sha256sum --check ubuntu.sources.sha

        mv "${MOUNTPOINT_BACKUP_UBUNTU_SOURCES}" "${mountpoint}/etc/apt/sources.list.d/ubuntu.sources"
        unset MOUNTPOINT_BACKUP_UBUNTU_SOURCES
    fi
    if [ -e "${MOUNTPOINT_BACKUP_SOURCES_LIST:-/doesnotexist}" ]; then
        sha256sum --check sources.list.sha

        mv "${MOUNTPOINT_BACKUP_SOURCES_LIST}" "${mountpoint}/etc/apt/sources.list"
        unset MOUNTPOINT_BACKUP_SOURCES_LIST
    fi
}

setup_mountpoint() {
    local mountpoint="$1"

    if [ ! -c /dev/mem ]; then
        mknod -m 660 /dev/mem c 1 1
        chown root:kmem /dev/mem
    fi

    mount dev-live -t devtmpfs "$mountpoint/dev"
    mount devpts-live -t devpts -o nodev,nosuid "$mountpoint/dev/pts"
    mount proc-live -t proc "$mountpoint/proc"
    mount sysfs-live -t sysfs "$mountpoint/sys"
    mount securityfs -t securityfs "$mountpoint/sys/kernel/security"
    # Provide more up to date apparmor features, matching target kernel
    mount -o bind /usr/share/livecd-rootfs/live-build/apparmor/generic "$mountpoint/sys/kernel/security/apparmor/features/"
    mount -o bind /usr/share/livecd-rootfs/live-build/seccomp/generic.actions_avail "$mountpoint/proc/sys/kernel/seccomp/actions_avail"
    # cgroup2 mount for LP: 1944004
    mount -t cgroup2 none "$mountpoint/sys/fs/cgroup"
    mount -t tmpfs none "$mountpoint/tmp"
    mount -t tmpfs none "$mountpoint/var/lib/apt/lists"
    mount -t tmpfs none "$mountpoint/var/cache/apt"
    mv "$mountpoint/etc/resolv.conf" resolv.conf.tmp
    cp /etc/resolv.conf "$mountpoint/etc/resolv.conf"
    mv "$mountpoint/etc/nsswitch.conf" nsswitch.conf.tmp
    sed 's/systemd//g' nsswitch.conf.tmp > "$mountpoint/etc/nsswitch.conf"
    use_lp_archives_in_sourceslist "${mountpoint}"
    chroot "$mountpoint" apt-get update

}

teardown_mountpoint() {
    # Reverse the operations from setup_mountpoint
    local mountpoint=$(realpath "$1")

    # ensure we have exactly one trailing slash, and escape all slashes for awk
    mountpoint_match=$(echo "$mountpoint" | sed -e's,/$,,; s,/,\\/,g;')'\/'
    # sort -r ensures that deeper mountpoints are unmounted first
    for submount in $(awk </proc/self/mounts "\$2 ~ /$mountpoint_match/ \
                      { print \$2 }" | LC_ALL=C sort -r); do
        mount --make-private $submount
        umount $submount
    done
    recover_sourceslist "${mountpoint}"
    mv resolv.conf.tmp "$mountpoint/etc/resolv.conf"
    mv nsswitch.conf.tmp "$mountpoint/etc/nsswitch.conf"
}

mount_partition() {
    partition="$1"
    mountpoint="$2"

    mount "$partition" "$mountpoint"
    setup_mountpoint "$mountpoint"
}

mount_overlay() {
    lower="$1"
    upper="$2"
    work="$2/../work"
    path="$3"

    mkdir -p "$work"
    mount -t overlay overlay \
	-olowerdir="$lower",upperdir="$upper",workdir="$work" \
	"$path"
}

get_lowerdirs_for_pass () {
    # Returns the name of the lowerdir from the name of a pass
    # $1 Name of the pass
    local curpass="$1"
    local lowerlayers=""

	while :; do
		curpass=$(get_parent_pass $curpass)
        # We climbed up the tree to the root layer, we are done
		[ -z "$curpass" ] && break

		lowerlayers="${lowerlayers}:overlay.${curpass}"
	done
    echo "${lowerlayers#:}"
}

mount_disk_image() {
    local disk_image=${1}
    local mountpoint=${2}
    local rootpart=${3:-1}
    mount_image ${disk_image} "${rootpart}"
    mount_partition "${rootfs_dev_mapper}" $mountpoint

    local boot_dev="${loop_device}p16"
    if flock -x ${loop_device} \
        [ -b ${boot_dev} -a -e $mountpoint/boot ]; then
        flock -x ${loop_device} mount "${boot_dev}" $mountpoint/boot
    fi

    # Having one partition mounted should avoid udev-triggered partition
    # rescans on that device, so we no longer need to flock.

    local uefi_dev="${loop_device}p15"
    if [ -b ${uefi_dev} -a -e $mountpoint/boot/efi ]; then
        mount "${uefi_dev}" $mountpoint/boot/efi
    fi

    # This is needed to allow for certain operations
    # such as updating grub and installing software
    cat > $mountpoint/usr/sbin/policy-rc.d << EOF
#!/bin/sh
# ${IMAGE_STR}
echo "All runlevel operations denied by policy" >&2
exit 101
EOF
    chmod 0755 $mountpoint/usr/sbin/policy-rc.d

}

umount_partition() {
    local mountpoint=${1}
    teardown_mountpoint $mountpoint
    mount --make-private $mountpoint
    umount $mountpoint
    udevadm settle
    # workaround for LP: 1960537
    sleep 30 

    if [ -n "${rootfs_dev_mapper}" -a -b "${rootfs_dev_mapper}" ]; then
        # buildd's don't have /etc/mtab symlinked
        # /etc/mtab is needed in order zerofree space for ext4 filesystems
        [ -e /etc/mtab ] || ln -s /proc/mounts /etc/mtab

        # both of these are likely overkill, but it does result in slightly
        # smaller ext4 filesystem
        e2fsck -y -E discard ${rootfs_dev_mapper}
        zerofree ${rootfs_dev_mapper}
    fi
}

umount_disk_image() {
    mountpoint="$1"

    local uefi_dev="${loop_device}p15"
    if [ -e "$mountpoint/boot/efi" -a -b "$uefi_dev" ]; then
	# zero fill free space in UEFI partition
	cat < /dev/zero > "$mountpoint/boot/efi/bloat_file" 2> /dev/null || true
	rm "$mountpoint/boot/efi/bloat_file"
        mount --make-private "$mountpoint/boot/efi"
        umount --detach-loop "$mountpoint/boot/efi"
    fi

    if [ -e $mountpoint/usr/sbin/policy-rc.d ]; then
        rm $mountpoint/usr/sbin/policy-rc.d
    fi
    umount_partition $mountpoint
    clean_loops
}

modify_vmdk_header() {
    # Modify the VMDK headers so that both VirtualBox _and_ VMware can
    # read the vmdk and import them.

    vmdk_name="${1}"
    descriptor=$(mktemp)
    newdescriptor=$(mktemp)

    # Extract the vmdk header for manipulation
    dd if="${vmdk_name}" of="${descriptor}" bs=1 skip=512 count=1024
    echo "Cat'ing original descriptor to console for debugging."
    # cat header so we are aware of the original descriptor for debugging
    cat $descriptor

    # trim null bytes to treat as standard text file
    tr -d '\000' < $descriptor > $newdescriptor

    # add newline to newdescriptor
    echo "" >> $newdescriptor

    # add required tools version
    echo -n 'ddb.toolsVersion = "2147483647"' >> $newdescriptor

    # diff original descriptor and new descriptor for debugging
    # diff exits 1 if difference. pipefail not set so piping diff
    # to cat prints diff and swallows exit 1
    echo "Printing diff of original and new descriptors."
    diff --text $descriptor $newdescriptor | cat


    # The header must be 1024 or less before padding
    if ! expr $(stat --format=%s ${newdescriptor}) \< 1025 > /dev/null 2>&1; then
        echo "descriptor is too large, VMDK will be invalid!"; 
        exit 1
    fi

    # reset newdescriptor to be 1024
    truncate --no-create --size=1K $newdescriptor

    # Overwrite the vmdk header with our new, modified one
    dd conv=notrunc,nocreat \
        if="${newdescriptor}" of="${vmdk_name}" \
        bs=1 seek=512 count=1024

    rm ${descriptor} ${newdescriptor}
}

create_vmdk() {
    # There is no real good way to create a _compressed_ VMDK using open source
    # tooling that works across multiple VMDK-capable platforms. This functions
    # uses vmdk-stream-converter and then calls modify_vmdk_header to produce a
    # compatible VMDK.

    src="$1"
    destination="$2"
    size="${3:-10240}"

    streamconverter="VMDKstream"
    scratch_d=$(mktemp -d)
    cp ${src} ${scratch_d}/resize.img

    truncate --size=${size}M ${scratch_d}/resize.img
    python3 -m ${streamconverter} ${scratch_d}/resize.img ${destination}
    modify_vmdk_header ${destination}

    qemu-img info ${destination}
    rm -rf ${scratch_d}
}

create_squashfs() {
    local config_dir rootfs_dir squashfs_file
    rootfs_dir="$1"
    squashfs_file="$2"
    config_dir="$PWD/config"
    (cd $rootfs_dir &&
        mksquashfs . $squashfs_file -no-progress -xattrs -comp xz \
            -ef "$config_dir/squashfs-exclude-files")

}

create_derivative() {
    # arg1 is the disk type
    # arg2 is the new name
    unset derivative_img
    case ${1} in
           uefi) disk_image="binary/boot/disk-uefi.ext4";
                 dname="${disk_image//-uefi/-$2-uefi}";;
              *) disk_image="binary/boot/disk.ext4";
                 dname="${disk_image//.ext4/-$2.ext4}";;
    esac

    if [ ! -e ${disk_image} ]; then
        echo "Did not find ${disk_image}!"; exit 1;
    fi

    cp ${disk_image} ${dname}
    export derivative_img=${dname}
}

convert_to_qcow2() {
    src="$1"
    destination="$2"
    qemu-img convert -c -O qcow2 "$src" "$destination"
    qemu-img info "$destination"
}

replace_grub_root_with_label() {
    # When update-grub is run, it will detect the disks in the build system.
    # Instead, we want grub to use the right labelled disk
    CHROOT_ROOT="$1"

    # If boot by partuuid has been requested, don't override.
    if [ -f $CHROOT_ROOT/etc/default/grub.d/40-force-partuuid.cfg ] && \
           grep -q ^GRUB_FORCE_PARTUUID= $CHROOT_ROOT/etc/default/grub.d/40-force-partuuid.cfg
    then
        return 0
    fi
    sed -i -e "s,root=[^ ]*,root=LABEL=${fs_label}," \
        "$CHROOT_ROOT/boot/grub/grub.cfg"
}


# When running update-grub in a chroot on a build host, we don't want it to
# probe for disks or probe for other installed OSes.  Extract common
# diversion wrappers, so this isn't reinvented differently for each image.
divert_grub() {
	CHROOT_ROOT="$1"

	# Don't divert all of grub-probe here; just the scripts we don't want
	# running. Otherwise, you may be missing part-uuids for the search
	# command, for example. ~cyphermox

	chroot "$CHROOT_ROOT" dpkg-divert --local \
		--divert /etc/grub.d/30_os-prober.dpkg-divert \
		--rename /etc/grub.d/30_os-prober

	# Divert systemd-detect-virt; /etc/kernel/postinst.d/zz-update-grub
	# no-ops if we are in a container, and the launchpad farm runs builds
	# in lxd.  We therefore pretend that we're never in a container (by
	# exiting 1).
	chroot "$CHROOT_ROOT" dpkg-divert --local \
		--rename /usr/bin/systemd-detect-virt
	echo "exit 1" > "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
	chmod +x "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
}

undivert_grub() {
	CHROOT_ROOT="$1"

	chroot "$CHROOT_ROOT" dpkg-divert --remove --local \
		--divert /etc/grub.d/30_os-prober.dpkg-divert \
		--rename /etc/grub.d/30_os-prober

	if grep -q "^exit 1$" "$CHROOT_ROOT"/usr/bin/systemd-detect-virt; then
		rm "$CHROOT_ROOT"/usr/bin/systemd-detect-virt
	fi
	chroot "$CHROOT_ROOT" dpkg-divert --remove --local \
		--rename /usr/bin/systemd-detect-virt
}

recreate_initramfs() {
	# Regenerate the initramfs by running update-initramfs in the
	# chroot at $1 and copying the generated initramfs
	# around. Beware that this was written for a single use case
	# (live-server) and may not work in all cases without
	# tweaking...
	# config/common must be sourced before calling this function.
	CHROOT="$1"
	# Start by cargo culting bits of lb_chroot_hacks:
	if [ -n "$LB_INITRAMFS_COMPRESSION" ]; then
		echo "COMPRESS=$LB_INITRAMFS_COMPRESSION" > "$CHROOT"/etc/initramfs-tools/conf.d/livecd-rootfs.conf
	fi
	chroot "$CHROOT" sh -c "${UPDATE_INITRAMFS_OPTIONS:-} update-initramfs -k all -t -u"
	rm -rf "$CHROOT"/etc/initramfs-tools/conf.d/livecd-rootfs.conf
	# Then bits of lb_binary_linux-image:
	case "${LB_INITRAMFS}" in
		casper)
			DESTDIR="binary/casper"
			;;

		live-boot)
			DESTDIR="binary/live"
			;;

		*)
			DESTDIR="binary/boot"
			;;
	esac
	mv "$CHROOT"/boot/initrd.img-* $DESTDIR
}

release_ver() {
    # Return the release version number
    distro-info --series="$LB_DISTRIBUTION" -r | awk '{ print $1 }'
}

# cribbed from cdimage, perhaps this should be a small helper script in germinate?
add_inheritance () {
	case " $inherit " in
		*" $1 "*)
			;;
		*)
			inherit="${inherit:+$inherit }$1"
			;;
	esac
}

expand_inheritance () {
	for seed in $(grep "^$1:" config/germinate-output/structure | cut -d: -f2); do
		expand_inheritance "$seed"
	done
	add_inheritance "$1"
}

inheritance () {
	inherit=
	expand_inheritance "$1"
	echo "$inherit"
}

_snap_post_process() {
    # Look for the 'core' snap. If it is not present, assume that the image
    # contains only snaps with bases >= core18. In that case snapd is
    # preseeded. However, when 'core' is being installed and snapd has not
    # been installed by a call to 'snap_preseed' (see below) then it is
    # removed again.
    local CHROOT_ROOT=$1
    local SNAP_NAME=$2

    local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
    local snaps_dir="$seed_dir/snaps"
    local seed_yaml="$seed_dir/seed.yaml"
    local assertions_dir="$seed_dir/assertions"
    local snapd_install_stamp="$seed_dir/.snapd-explicit-install-stamp"

    case $SNAP_NAME in
        core[0-9]*)
            # If the 'core' snap is not present, assume we are coreXX-only and
            # install the snapd snap.
            if [ ! -f ${snaps_dir}/core_[0-9]*.snap ]; then
                _snap_preseed $CHROOT_ROOT snapd stable
            fi
            ;;
        core)
            # If the snapd snap has been seeded, but not marked as explicitly
            # installed (see snap_preseed below), then remove it.
            if [ -f ${snaps_dir}/snapd_[0-9]*.snap ] && \
                    [ ! -f "$snapd_install_stamp" ]
            then
                # Remove snap, assertions and entry in seed.yaml
                rm -f ${snaps_dir}/snapd_[0-9]*.snap
                rm -f ${assertions_dir}/snapd_[0-9]*.assert
                sed -i -e'N;/name: snapd/,+2d' $seed_yaml
            fi
            ;;
        *)
            # ignore
            ;;
    esac
}

_snap_preseed() {
    # Download the snap/assertion and add to the preseed
    local CHROOT_ROOT=$1
    local SNAP=$2
    local SNAP_NAME=${SNAP%/*}
    local CHANNEL=${3:?Snap channel must be specified}

    local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
    local snaps_dir="$seed_dir/snaps"
    local seed_yaml="$seed_dir/seed.yaml"
    local assertions_dir="$seed_dir/assertions"

    # Download the snap & assertion
    local snap_download_failed=0

    # Preseed a snap only once
    if [ -f ${snaps_dir}/${SNAP_NAME}_[0-9]*.snap ]; then
        return
    fi

    sh -c "
        set -x;
        cd \"$CHROOT_ROOT/var/lib/snapd/seed\";
        UBUNTU_STORE_ARCH=${ARCH:-} SNAPPY_STORE_NO_CDN=1 snap download \
            --cohort="${COHORT_KEY:-}" \
            --channel=\"$CHANNEL\" \"$SNAP_NAME\"" || snap_download_failed=1
    if [ $snap_download_failed = 1 ] ; then
        echo "If the channel ($CHANNEL) includes '*/ubuntu-##.##' track per "
        echo "Ubuntu policy (ex. stable/ubuntu-18.04) the publisher will need "
        echo "to temporarily create the channel/track to allow fallback during"
        echo "download (ex. stable/ubuntu-18.04 falls back to stable if the"
        echo "prior had been created in the past)."
        exit 1
    fi

    mv -v $seed_dir/*.assert $assertions_dir
    mv -v $seed_dir/*.snap $snaps_dir

    # Pre-seed snap's base
    case $SNAP_NAME in
        snapd)
            # snapd is self-contained, ignore base
            ;;
        core|core[0-9][0-9])
            # core and core## are self-contained, ignore base
            ;;
        *)
            # Determine which core snap is needed
            local snap_info

            # snap info doesn't have --channel, so must run agains the downloaded snap
            snap_info=$(snap info --verbose ${snaps_dir}/${SNAP_NAME}_[0-9]*.snap)

            if [ $? -ne 0 ]; then
                echo "Failed to retrieve base of $SNAP_NAME!"
                exit 1
            fi

            local snap_type=$(echo "$snap_info" | awk '/^type:/ { print $2 }')

            if [ "$snap_type" != base ]; then
                local core_snap=$(echo "$snap_info" | awk '/^base:/ {print $2}')

                # If snap info does not list a base the default is 'core'
                # which is now an error to use.
                if [ -z "$core_snap" ]; then
		    if [ -z "$ALLOW_CORE_SNAP" ]; then
			echo "Legacy snap with no base declaration found, refusing to install 'core' snap"
			exit 1
		    else
			echo "Legacy snap with no base declaration found, but \$ALLOW_CORE_SNAP set. continue (but FIX YOUR SNAPS!)"
			core_snap=${core_snap:-core}
		    fi
                fi

                _snap_preseed $CHROOT_ROOT $core_snap stable
            fi
            ;;
    esac

    # Add the snap to the seed.yaml
    ! [ -e $seed_yaml ] && echo "snaps:" > $seed_yaml
    cat <<EOF >> $seed_yaml
  -
    name: ${SNAP_NAME}
    channel: ${CHANNEL}
EOF

    case ${SNAP} in */classic) echo "    classic: true" >> $seed_yaml;; esac

    echo -n "    file: " >> $seed_yaml
    (cd $snaps_dir; ls -1 ${SNAP_NAME}_*.snap) >> $seed_yaml

    _snap_post_process $CHROOT_ROOT $SNAP_NAME
}

snap_prepare_assertions() {
    # Configure basic snapd assertions
    local CHROOT_ROOT=$1
    # A colon-separated string of brand:model to be used for the image's model
    # assertion
    local CUSTOM_BRAND_MODEL=$2

    local seed_dir="$CHROOT_ROOT/var/lib/snapd/seed"
    local snaps_dir="$seed_dir/snaps"
    local assertions_dir="$seed_dir/assertions"
    local model_assertion="$assertions_dir/model"
    local account_key_assertion="$assertions_dir/account-key"
    local account_assertion="$assertions_dir/account"

    local brand="$(echo $CUSTOM_BRAND_MODEL | cut -d: -f 1)"
    local model="$(echo $CUSTOM_BRAND_MODEL | cut -d: -f 2)"

    # Get existing model and brand assertions to compare with new parameters
    # For customized images, snap_prepare_assertions is called several times
    # with different brand or model. In this case we want to overwrite
    # existing brand and models.
    local override_model_branch="false"
    if [ -e "$model_assertion" ] ; then
        existing_model=$(awk '/^model: / {print $2}' $model_assertion)
        existing_brand=$(awk '/^brand-id: / {print $2}' $model_assertion)

        if [ "$existing_model" != "$model" ] || [ "$existing_brand" != "$brand" ]; then
            override_model_branch="true"
        fi
    fi

    # Exit if assertions dir exists and we didn't change model or brand
    if [ -d "$assertions_dir" ] && [ "$override_model_branch" = "false" ]; then
        return
    fi

    mkdir -p "$assertions_dir"
    mkdir -p "$snaps_dir"

    # Clear the assertions if they already exist
    if [ -e "$model_assertion" ] ; then
        echo "snap_prepare_assertions: replacing $existing_brand:$existing_model with $brand:$model"
        rm "$model_assertion"
        rm "$account_key_assertion"
        rm "$account_assertion"
    fi

    if ! [ -e "$model_assertion" ] ; then
        snap known --remote model series=16 \
            model=$model brand-id=$brand \
            > "$model_assertion"
    fi

    if ! [ -e "$account_key_assertion" ] ; then
        local account_key=$(sed -n -e's/sign-key-sha3-384: //p' \
            < "$model_assertion")
        snap known --remote account-key \
            public-key-sha3-384="$account_key" \
            > "$account_key_assertion"
    fi

    if ! [ -e "$account_assertion" ] ; then
        local account=$(sed -n -e's/account-id: //p' < "$account_key_assertion")
        snap known --remote account account-id=$account \
            > "$account_assertion"
    fi
}

snap_prepare() {
    # Configure basic snapd assertions and pre-seeds the 'core' snap
    local CHROOT_ROOT=$1
    # Optional. If set, should be a colon-separated string of brand:model to be
    # used for the image's model assertion
    local CUSTOM_BRAND_MODEL=${2:-generic:generic-classic}

    snap_prepare_assertions "$CHROOT_ROOT" "$CUSTOM_BRAND_MODEL"
}

snap_preseed() {
    # Preseed a snap in the image (snap_prepare must be called once prior)
    local CHROOT_ROOT=$1
    # $2 can be in the form of snap_name/classic=track/risk/branch
    local SNAP=$2
    # strip CHANNEL specification
    SNAP=${SNAP%=*}
    # strip /classic confinement
    local SNAP_NAME=${SNAP%/*}

    # For snap preseed to work, we need to ensure that fuse3 is installed in the chroot.
    # fuse3 is a recommends of snapd but if this is a minimized image then recommends are not installed
    # and preseeding will fail.
    chroot "${CHROOT_ROOT}" apt-get install --assume-yes --no-install-recommends fuse3

    # Seed from the specified channel (e.g. core18 latest/stable)
    # Or Channel endcoded in the snap name (e.g. lxd=4.0/stable/ubuntu-20.04)
    # Or Ubuntu policy default channel latest/stable/ubuntu-$(release_ver)
    local CHANNEL=${3:-}
    if [ -z "$CHANNEL" ]; then
        case $2 in
            *=*)
                CHANNEL=${2#*=}
                ;;
            *)
                CHANNEL="stable/ubuntu-$(release_ver)"
                ;;
        esac
    fi

    # At this point:
    # SNAP_NAME is just the snap name
    # SNAP is either $SNAP_NAME or $SNAP_NAME/classic for classic confined
    # CHANNEL is the channel

    if [ ! -e "$CHROOT_ROOT/var/lib/snapd/seed/assertions/model" ]; then
        echo "ERROR: Snap model assertion not present, snap_prepare must be called"
        exit 1
    fi

    _snap_preseed $CHROOT_ROOT $SNAP $CHANNEL

    # Mark this image as having snapd installed explicitly.
    case $SNAP_NAME in
        snapd)
            touch "$CHROOT_ROOT/var/lib/snapd/seed/.snapd-explicit-install-stamp"
            ;;
    esac

    # Do basic validation of generated snapd seed.yaml, doing it here
    # means we catch all the places(tm) that snaps are added but the
    # downside is that each time a snap is added the seed must be valid,
    # i.e. snaps with bases need to add bases first etc
    #
    # Skip validation by setting SNAP_NO_VALIDATE_SEED=1.
    if [ -z "${SNAP_NO_VALIDATE_SEED:-}" ]; then
        snap_validate_seed "${CHROOT_ROOT}"
    fi
}

snap_validate_seed() {
    local CHROOT_ROOT=$1
    local kern_major_min=undefined
    local boot_filename=undefined

    # ppc64el still uses /boot/vmlinux so we need to determine the boot file name as non ppc64el use /boot/vmlinuz
    # We don't need to query the arch as we can use existence of the file to determine the boot file name. Both
    # will never be present at the same time.
    if [ -e ${CHROOT_ROOT}/boot/vmlinuz ]; then
        boot_filename=vmlinuz
    elif [ -e ${CHROOT_ROOT}/boot/vmlinux ]; then
        boot_filename=vmlinux
    fi
    if [ ${boot_filename} != undefined ]; then  # we have a known boot file so we can proceed with checking for features to mount
        kern_major_min=$(readlink --canonicalize --no-newline ${CHROOT_ROOT}/boot/${boot_filename} | grep  --extended-regexp --only-matching --max-count 1 '[0-9]+\.[0-9]+')
        if [ -d /usr/share/livecd-rootfs/live-build/apparmor/${kern_major_min} ]; then
            # if an Ubuntu version has different kernel apparmor features between LTS and HWE kernels
            # a snap pre-seeding issue can occur, where the incorrect apparmor features are reported
            # basic copy of a directory structure overriding the "generic" feature set
            # which is tied to the LTS kernel

            # Bind kernel apparmor directory to feature directory for snap preseeding
            umount "${CHROOT_ROOT}/sys/kernel/security/apparmor/features/"
            mount --bind /usr/share/livecd-rootfs/live-build/apparmor/${kern_major_min} "${CHROOT_ROOT}/sys/kernel/security/apparmor/features/"
        fi
    fi

    if [ -e "${CHROOT_ROOT}/var/lib/snapd/seed/seed.yaml" ]; then
        snap debug validate-seed "${CHROOT_ROOT}/var/lib/snapd/seed/seed.yaml"
        /usr/lib/snapd/snap-preseed --reset $(realpath "${CHROOT_ROOT}")
        /usr/lib/snapd/snap-preseed $(realpath "${CHROOT_ROOT}")
        chroot "${CHROOT_ROOT}" apparmor_parser --skip-read-cache --write-cache --skip-kernel-load --verbose  -j `nproc` /etc/apparmor.d
    fi

    # Unmount kernel specific apparmor feature
    # mount generic apparmor feature again (cleanup)
    if [ -d /build/config/hooks.d/extra/apparmor/${kern_major_min} ]; then
        umount "${CHROOT_ROOT}/sys/kernel/security/apparmor/features/"
        mount -o bind /usr/share/livecd-rootfs/live-build/apparmor/generic "${CHROOT_ROOT}/sys/kernel/security/apparmor/features/"
    fi

}

list_packages_from_seed () {
    # Store all packages for a given seed, including its seed dependency
    # $1: Name of the seed to expand to a package list

    local all_seeds="$(inheritance $1)"

    for seed in $all_seeds; do
        head -n-2 config/germinate-output/${seed}.seed|tail -n+3|awk '{print $1}'
    done|sort -u
}

subtract_package_lists() {
    # Subtract a package list from another
    #
    # $1 source package list
    # $2 Package list to subtract from source package list
    local list1=$(mktemp)
    local list2=$(mktemp)

    list_packages_from_seed $1 > list1
    list_packages_from_seed $2 > list2
    comm -23 list1 list2

    rm list1
    rm list2
}

clean_debian_chroot() {
    # remove crufty files that shouldn't be left in an image
    rm -f chroot/var/cache/debconf/*-old chroot/var/lib/dpkg/*-old
    Chroot chroot apt clean
    # For the docker images we remove even more stuff.
    if [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-base:minimized" ] || [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-oci:minimized" ]; then
        # Remove apt lists (that are currently removed downstream
        # anyway)
        rm -rf chroot/var/lib/apt/lists/*
        # Having device nodes in the docker image can cause problems
        # (https://github.com/tianon/docker-brew-ubuntu-core/issues/62)
        # so remove them.  We only do this for docker out of an
        # abundance of caution.
        rm -rf chroot/dev/*
    fi
}

configure_universe() {
	if [ -f config/universe-enabled ]; then
        # This is cargo-culted almost verbatim (with some syntax changes for
        # preinstalled being slightly different in what it doesn't ask) from
        # debian-installer's apt-setup:

        cat > chroot/etc/apt/sources.list << EOF
# Ubuntu sources have moved to the /etc/apt/sources.list.d/ubuntu.sources
# file, which uses the deb822 format. Use deb822-formatted .sources files
# to manage package sources in the /etc/apt/sources.list.d/ directory.
# See the sources.list(5) manual page for details.
EOF

        cat > chroot/etc/apt/sources.list.d/ubuntu.sources << EOF
# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
# newer versions of the distribution.

## Ubuntu distribution repository
##
## The following settings can be adjusted to configure which packages to use from Ubuntu.
## Mirror your choices (except for URIs and Suites) in the security section below to
## ensure timely security updates.
##
## Types: Append deb-src to enable the fetching of source package.
## URIs: A URL to the repository (you may add multiple URLs)
## Suites: The following additional suites can be configured
##   <name>-updates   - Major bug fix updates produced after the final release of the
##                      distribution.
##   <name>-backports - software from this repository may not have been tested as
##                      extensively as that contained in the main release, although it includes
##                      newer versions of some applications which may provide useful features.
##                      Also, please note that software in backports WILL NOT receive any review
##                      or updates from the Ubuntu security team.
## Components: Aside from main, the following components can be added to the list
##   restricted  - Software that may not be under a free license, or protected by patents.
##   universe    - Community maintained packages. Software in this repository receives maintenance
##                 from volunteers in the Ubuntu community, or a 10 year security maintenance
##                 commitment from Canonical when an Ubuntu Pro subscription is attached.
##   multiverse  - Community maintained of restricted. Software from this repository is
##                 ENTIRELY UNSUPPORTED by the Ubuntu team, and may not be under a free
##                 licence. Please satisfy yourself as to your rights to use the software.
##                 Also, please note that software in multiverse WILL NOT receive any
##                 review or updates from the Ubuntu security team.
##
## See the sources.list(5) manual page for further settings.
Types: deb
URIs: $LB_PARENT_MIRROR_BINARY
Suites: $LB_DISTRIBUTION $LB_DISTRIBUTION-updates $LB_DISTRIBUTION-backports
Components: main universe restricted multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg

## Ubuntu security updates. Aside from URIs and Suites,
## this should mirror your choices in the previous section.
Types: deb
URIs: $LB_PARENT_MIRROR_BINARY_SECURITY
Suites: $LB_DISTRIBUTION-security
Components: main universe restricted multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg
EOF

fi
}

configure_oci() {
    # configure a chroot to be a OCI/docker container
    # theses changes are taken from the current Dockerfile modifications done
    # at https://github.com/tianon/docker-brew-ubuntu-core/blob/master/update.sh

    local chroot=$1

    if [ ! -d "${chroot}" ]; then
        echo "The chroot does not exist"
        exit 1
    fi

    echo "==== Configuring OCI ===="

    # https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L40-L48
    echo '#!/bin/sh' > ${chroot}/usr/sbin/policy-rc.d
    echo 'exit 101' >> ${chroot}/usr/sbin/policy-rc.d
    Chroot ${chroot} "chmod +x /usr/sbin/policy-rc.d"


    # https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L54-L56
    Chroot ${chroot} "dpkg-divert --local --rename --add /sbin/initctl"
    cp -a ${chroot}/usr/sbin/policy-rc.d ${chroot}/sbin/initctl
    sed -i 's/^exit.*/exit 0/' ${chroot}/sbin/initctl

    # https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L71-L78
    echo 'force-unsafe-io' > ${chroot}/etc/dpkg/dpkg.cfg.d/docker-apt-speedup

    # https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L85-L105
    echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' > ${chroot}/etc/apt/apt.conf.d/docker-clean

    echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' >> ${chroot}/etc/apt/apt.conf.d/docker-clean

    echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' >> ${chroot}/etc/apt/apt.conf.d/docker-clean

    # https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L109-L115
    echo 'Acquire::Languages "none";' > ${chroot}/etc/apt/apt.conf.d/docker-no-languages

    # https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L118-L130
    echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > ${chroot}/etc/apt/apt.conf.d/docker-gzip-indexes

    # https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L134-L151
    echo 'Apt::AutoRemove::SuggestsImportant "false";' > ${chroot}/etc/apt/apt.conf.d/docker-autoremove-suggests

    # https://bugs.launchpad.net/cloud-images/+bug/1810451
    echo 'APT::Periodic::Enable "0";' > ${chroot}/etc/apt/apt.conf.d/docker-disable-periodic-update

    # delete all the apt list files since they're big and get stale quickly
    rm -rf ${chroot}/var/lib/apt/lists/*

    # verify that the APT lists files do not exist
    Chroot chroot "apt-get indextargets" > indextargets.out
    [ ! -s indextargets.out ]
    rm indextargets.out
    # (see https://bugs.launchpad.net/cloud-images/+bug/1699913)

    # make systemd-detect-virt return "docker"
    # See: https://github.com/systemd/systemd/blob/aa0c34279ee40bce2f9681b496922dedbadfca19/src/basic/virt.c#L434
    mkdir -p ${chroot}/run/systemd
    echo 'docker' > ${chroot}/run/systemd/container

    # Create Ubuntu user
    Chroot ${chroot} useradd ubuntu -U -u 1000 --comment Ubuntu --groups adm,audio,cdrom,dialout,dip,floppy,plugdev,sudo,video --shell /bin/bash -m
    rm -rf ${chroot}/var/cache/apt/*.bin

    echo "==== Configuring OCI done ===="
}

configure_network_manager() {
    # If the image pre-installs network-manager, let it manage all devices by
    # default. Installing NM on an existing system only manages wifi and wwan via
    # /usr/lib/NetworkManager/conf.d/10-globally-managed-devices.conf. When setting
    # the global backend to NM, netplan overrides that file.
    if [ -e chroot/usr/sbin/NetworkManager -a ! -f chroot/etc/netplan/01-network-manager-all.yaml -a "$SUBPROJECT" != "desktop-preinstalled" ]; then
        echo "===== Enabling all devices in NetworkManager ===="
        mkdir -p chroot/etc/netplan
        cat <<EOF > chroot/etc/netplan/01-network-manager-all.yaml
# Let NetworkManager manage all devices on this system
network:
  version: 2
  renderer: NetworkManager
EOF
        # Do not limit cloud-init renderers to network-manager as suggested
        # in LP: #1982855 because subiquity needs to render full networking
        # in ephemeral boot time when autoinstall.network is provided.
        # Neither subiquity nor netplan is aware of /etc/NetworkManager config
        # artifacts emmitted by cloud-init. It's best if cloud-init reports
        # network config directly to /etc/netplan with the configured netplan
        # backend: NetworkManager per 01-network-manager-all.yaml above.

        # cloud-init's default renderer discovery will prefer netplan.
        # Any time subiquity needs to write and apply network config
        # it disables all previous network config in /etc/netplan so
        # any previous 50-cloud-init.yaml will be rendered inert.

        # Position cloud-init.service After=NetworkManager.service.
        # (LP: #2008952).
        # We override the entire cloud-init.service definition because
        # systemd drop-in files only allow adding Before/After constraints
        # yet we are dropping the Before=sysinit.target from the upstream
        # cloud-init.service.
        # This override can be dropped when NetworkManager.service can run
        # Before=sysinit.target when it drops strict dbus.service dependency.
        cat <<EOF > chroot/lib/systemd/system/cloud-init.service
${AUTOMATION_HEADER}
# Based on cloud-init 23.1 for Desktop LiveCD
[Unit]
Description=Initial cloud-init job (metadata service crawler)
DefaultDependencies=no
Wants=cloud-init-local.service
Wants=sshd-keygen.service
Wants=sshd.service
After=cloud-init-local.service
After=systemd-networkd-wait-online.service
# Installer Added After=NetworkManager* ordering
After=NetworkManager.service
After=NetworkManager-wait-online.service
After=networking.service
Before=network-online.target
Before=sshd-keygen.service
Before=sshd.service
# Before=sysinit.target  Installer removed to allow for NM dependency
Before=shutdown.target
Conflicts=shutdown.target
Before=systemd-user-sessions.service

[Service]
Type=oneshot
ExecStart=/usr/bin/cloud-init init
RemainAfterExit=yes
TimeoutSec=0

# Output needs to appear in instance console output
StandardOutput=journal+console

[Install]
WantedBy=cloud-init.target
EOF

        # Allow cloud-init clean to inform of strict network-manager config
        mkdir -p chroot/etc/cloud/clean.d
        cat <<EOF > chroot/etc/cloud/clean.d/99-installer-use-networkmanager
#!/bin/sh
# Inform clone image creators about strict network-manager cfg for cloud-init
if [ -f /etc/cloud/cloud.cfg.d/99-installer-use-networkmanager.cfg ]; then
  echo "WARNING: cloud-init network config is limited to using network-manager."
  echo "If this is undesirable: rm /etc/cloud/cloud.cfg.d/99-installer-use-networkmanager.cfg"
fi
EOF
        chmod +x chroot/etc/cloud/clean.d/99-installer-use-networkmanager
    else
        echo "==== NetworkManager not installed ===="
    fi
}

get_parent_pass () {
    # return parent pass
    # $1 name of the pass
    # return parent pass name or '' if pass is root pass.
    local pass="$1"

    parent_pass=${pass%.*}
    if [ "${parent_pass}" = "${pass}" ]; then
        return
    fi
    echo ${pass%.*}
}

setenv_file () {
	# Exposes an environment variable in a chroot
	# $1 Name of the variable
	# $2 Value of the variable
	# $3 Path to the environment file of the chroot
	local var="$1"
	local val="$2"
	local file="$3"

	grep -v "^$var" $file || true > $file.new
	echo "${var}=${val}" >> $file.new
	mv $file.new $file
}

divert_update_initramfs () {
    Chroot chroot "dpkg-divert --quiet --add \
		--divert /usr/sbin/update-initramfs.REAL --rename \
		/usr/sbin/update-initramfs"
	cat > chroot/usr/sbin/update-initramfs <<'EOF'
#! /bin/sh
if [ $# != 1 ] || [ "$1" != -u ]; then
	exec update-initramfs.REAL "$@"
fi
echo "update-initramfs: diverted by livecd-rootfs (will be called later)" >&2

exit 0
EOF
	chmod +x chroot/usr/sbin/update-initramfs
}

undivert_update_initramfs () {
	rm -f chroot/usr/sbin/update-initramfs
	Chroot chroot "dpkg-divert --quiet --remove --rename \
		/usr/sbin/update-initramfs"
}

is_root_layer () {
    local pass=$1
    if [ -z "$(get_parent_pass $pass)" ]; then
        return 0
    fi
    return 1
}

is_live_layer () {
    local pass=$1
    for livepass in $LIVE_PASSES; do
	    [ "$livepass" != "$pass" ] && continue
        return 0
    done
    return 1
}

setup_cidata() {
    local cidata_dev=$1
    local mountpoint=$(mktemp -d)
    mkfs.vfat -F 32 -n CIDATA ${cidata_dev}
    mount ${cidata_dev} ${mountpoint}
    cp /usr/share/livecd-rootfs/live-build/cidata/* ${mountpoint}
    cat >>${mountpoint}/meta-data.sample <<END
#instance-id: iid-$(openssl rand -hex 8)

END
    umount ${mountpoint}
}

setup_cinocloud() {
    if [ "${IMAGE_HAS_HARDCODED_PASSWORD:-}" != "1" ] || ( [ "${IMAGE_TARGETS:-}" != "disk1-img-xz" ] && [ "${IMAGE_TARGETS:-}" != "disk-image-non-cloud" ] ); then
        echo "unexpected attempt to add a hardcoded password to an image"
        exit 1
    fi
    local mountpoint=$1
    mkdir -p $mountpoint/var/lib/cloud/seed/nocloud-net
    cat <<EOF >$mountpoint/var/lib/cloud/seed/nocloud-net/meta-data
instance-id: iid-$(openssl rand -hex 8)
EOF
    cat <<EOF >$mountpoint/var/lib/cloud/seed/nocloud-net/user-data
#cloud-config
chpasswd:
    expire: True
    list:
        - ubuntu:ubuntu
ssh_pwauth: True
EOF
    cat <<EOF >$mountpoint/var/lib/cloud/seed/nocloud-net/network-config
# This is the initial network config.
# It can be overwritten by cloud-init.
version: 2
ethernets:
  zz-all-en:
    match:
      name: "en*"
    dhcp4: true
    optional: true
  zz-all-eth:
    match:
      name: "eth*"
    dhcp4: true
    optional: true
EOF
}

replace_kernel () { 
    mountpoint=$1
    new_kernel=$2

    # Install custom kernel (N.B. the trailing + retains linux-base during this
    # operation)
    env DEBIAN_FRONTEND=noninteractive chroot "${mountpoint}" apt-get \
        remove --purge --assume-yes '^linux-.*' 'linux-base+'
    env DEBIAN_FRONTEND=noninteractive chroot "${mountpoint}" apt-get \
        update --assume-yes
    env DEBIAN_FRONTEND=noninteractive chroot "${mountpoint}" apt-get \
        install --assume-yes "${new_kernel}" 
    env DEBIAN_FRONTEND=noninteractive chroot "${mountpoint}" apt-get \
        autoremove --purge --assume-yes

    # If running a custom kernel, we should try to boot without an initramfs
    # We do this by setting GRUB_FORCE_PARTUUID, which forces initramfs-less boot
    force_boot_without_initramfs ${mountpoint}
}

track_initramfs_boot_fallback() {
    mountpoint=$1
    cat <<END > "${mountpoint}/etc/grub.d/01_track_initrdless_boot_fallback"
#! /bin/sh
# ${IMAGE_STR}
# This will detect if we attempt to boot with an initramfs and fail.
# In the case of a failure, initrdless_boot_fallback_triggered is set to
# a non-zero value in the grubenv. This value can be checked after boot
# by looking in /boot/grub/grubenv or by using the grub-editenv list command.
set -e
END
    cat <<"END" >> "${mountpoint}/etc/grub.d/01_track_initrdless_boot_fallback"
cat <<"EOF"
if [ -n "${have_grubenv}" ]; then
  if [ -n "${initrdfail}" ]; then
    set initrdless_boot_fallback_triggered="${initrdfail}"
  else
    unset initrdless_boot_fallback_triggered
  fi
  save_env initrdless_boot_fallback_triggered
fi
EOF
END
    chmod +x "${mountpoint}/etc/grub.d/01_track_initrdless_boot_fallback"
}

force_boot_without_initramfs() {
    mountpoint=$1

    partuuid=$(blkid -s PARTUUID -o value $(findmnt -n -o SOURCE --target "${mountpoint}"))
    if [ -n "${partuuid}" ]; then
        echo "Force booting without an initramfs..."
        mkdir -p "${mountpoint}/etc/default/grub.d"
        cat << EOF >> "${mountpoint}/etc/default/grub.d/40-force-partuuid.cfg"
# Force boot without an initramfs by setting GRUB_FORCE_PARTUUID
# Remove this line to enable boot with an initramfs
GRUB_FORCE_PARTUUID=${partuuid}
EOF
        divert_grub "${mountpoint}"
        chroot "${mountpoint}" update-grub
        undivert_grub "${mountpoint}"
    fi
}

# find all files under /var/lib/snapd in the target directory that aren't
# shipped by the snapd package itself, and remove them
reset_snapd_state() {
    rootdir="$1"

    /usr/lib/snapd/snap-preseed --reset $(realpath "$rootdir") || true
    rm -rf "$rootdir/var/lib/snapd"
    setup_mountpoint "$rootdir"
    chroot "$rootdir" apt-get install --reinstall -y snapd
    teardown_mountpoint "$rootdir"
}