Work in progress for making iso images for the core-desktop installer.

ubuntu-core-desktop
Łukasz 'sil2100' Zemczak 11 months ago
parent 30418f67ce
commit cade6ef3b6

2
debian/changelog vendored

@ -1,4 +1,4 @@
livecd-rootfs (23.10.56~core1) mantic; urgency=medium
livecd-rootfs (1:24.04.20~core1) noble; urgency=medium
* Experiment with ubuntu-core-desktop livefs builds.
* This branch auto-checks-out the related git branch.

@ -155,6 +155,22 @@ if [ "${IMAGEFORMAT:-}" = "ubuntu-image" ]; then
exit 0
fi
if [ "$PROJECT" = "ubuntu-core-desktop" ]; then
# Special case for ubuntu-core-desktop - we need to call
# ubuntu-image twice, once for the core image payload and
# then for the actual image.
echo "Building the core image payload"
cd config/
/snap/bin/ubuntu-image snap $UBUNTU_IMAGE_PAYLOAD_ARGS \
-O output ubuntu-core-desktop*.model-assertion
mv output/*.img pc.img
xz -0 -T4 pc.img
rm -rf output/
# We need to rename the image file so that it can be pulled
# by the builder.
mv pc.img.xz "$PREFIX".img.xz
fi
# Setup cleanup function
Setup_cleanup

@ -810,6 +810,31 @@ case $PROJECT in
esac
;;
ubuntu-core-desktop)
touch config/universe-enabled
KERNEL_FLAVOURS='generic-hwe-22.04'
PASSES_TO_LAYERS="true"
# the live layer, contains all packages for the live session installer
add_task live minimal
add_snap live ubuntu-core-desktop-installer/classic core22 snapd
add_package live linux-$KERNEL_FLAVOURS plymouth-theme-spinner squashfs-tools snapd cloud-init
# now let's create the neccessary catalog files
cat <<-EOF > config/install-sources-in.yaml
- default: true
description:
en: Ubuntu Core Desktop.
id: ubuntu-core-desktop
locale_support: none
name:
en: Ubuntu Core Desktop
path: pc.img.xz
type: dd-xz:file
size: @SIZE@
variant: core
EOF
;;
ubuntu-oem)
touch config/universe-enabled
PASSES_TO_LAYERS="true"
@ -1435,7 +1460,7 @@ EOF
;;
ubuntu-cpc:*|ubuntu-server:live|ubuntu:desktop-preinstalled| \
ubuntu-wsl:*|ubuntu-mini-iso:*|ubuntu:|ubuntu-oem:*)
ubuntu-wsl:*|ubuntu-mini-iso:*|ubuntu:|ubuntu-oem:*|ubuntu-core-desktop:*)
# Ensure that most things e.g. includes.chroot are copied as is
for entry in /usr/share/livecd-rootfs/live-build/${PROJECT}/*; do
case $entry in

@ -0,0 +1,19 @@
#! /bin/sh
set -eu
case ${PASS:-} in
live)
;;
*)
exit 0
;;
esac
cat <<EOF > /etc/initramfs-tools/conf.d/casperize.conf
export CASPER_GENERATE_UUID=1
EOF
cat <<EOF > /etc/initramfs-tools/conf.d/default-layer.conf
LAYERFS_PATH=${PASS}.squashfs
EOF

@ -0,0 +1,117 @@
# The top level settings are used as module
# and system configuration.
# A set of users which may be applied and/or used by various modules
# when a 'default' entry is found it will reference the 'default_user'
# from the distro configuration specified below
users:
- default
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the default $user
disable_root: true
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: true
ssh_pwauth: yes
chpasswd:
expire: false
# This is the initial network config.
# It can be overwritten by cloud-init or subiquity.
network:
version: 2
ethernets:
zz-all-en:
match:
name: "en*"
dhcp4: true
zz-all-eth:
match:
name: "eth*"
dhcp4: true
# We used to have a custom final_message here. Just use the default instead.
# Example datasource config
# datasource:
# Ec2:
# metadata_urls: [ 'blah.com' ]
# timeout: 5 # (defaults to 50 seconds)
# max_wait: 10 # (defaults to 120 seconds)
# The modules that run in the 'init' stage
cloud_init_modules:
- bootcmd
- write-files
- ca-certs
- rsyslog
- users-groups
- ssh
# The modules that run in the 'config' stage
cloud_config_modules:
# Emit the cloud config ready event
# this can be used by upstart jobs for 'start on cloud-config'.
- ssh-import-id
- set-passwords
- timezone
- disable-ec2-metadata
- runcmd
# The modules that run in the 'final' stage
cloud_final_modules:
- scripts-per-once
- scripts-user
- ssh-authkey-fingerprints
- keys-to-console
- phone-home
- final-message
# System and/or distro specific settings
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
distro: ubuntu
# Default user name + that default users groups (if added/used)
default_user:
name: installer
lock_passwd: false
gecos: Ubuntu
groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /usr/bin/subiquity-shell
# Automatically discover the best ntp_client
ntp_client: auto
# Other config here will be given to the distro class and/or path classes
paths:
cloud_dir: /var/lib/cloud/
templates_dir: /etc/cloud/templates/
upstart_dir: /etc/init/
package_mirrors:
- arches: [i386, amd64]
failsafe:
primary: http://archive.ubuntu.com/ubuntu
security: http://security.ubuntu.com/ubuntu
search:
primary:
- http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/
- http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/
- http://%(region)s.clouds.archive.ubuntu.com/ubuntu/
security: []
- arches: [arm64, armel, armhf]
failsafe:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
search:
primary:
- http://%(ec2_region)s.ec2.ports.ubuntu.com/ubuntu-ports/
- http://%(availability_zone)s.clouds.ports.ubuntu.com/ubuntu-ports/
- http://%(region)s.clouds.ports.ubuntu.com/ubuntu-ports/
security: []
- arches: [default]
failsafe:
primary: http://ports.ubuntu.com/ubuntu-ports
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh

@ -0,0 +1,4 @@
output: {all: '>> /var/log/cloud-init-output.log'}
no_ssh_fingerprints: true
ssh:
emit_keys_to_console: false

@ -0,0 +1,4 @@
# systemd in 23.04+ uses a newer "compact" format by default which is not
# understood by the systemd libraries from jammy used in the subiquity snap.
[Service]
Environment="SYSTEMD_JOURNAL_COMPACT=0"

@ -0,0 +1,4 @@
# systemd in 22.04+ uses "hash table hardening" by default which is not
# understood by the systemd libraries from focal used in the subiquity snap.
[Service]
Environment="SYSTEMD_JOURNAL_KEYED_HASH=0"

@ -0,0 +1,3 @@
[Service]
ExecStart=
ExecStart=-/sbin/agetty --noclear -n --autologin ubuntu-server %I $TERM

@ -0,0 +1,9 @@
[Unit]
Description=Subiquity, the installer for Ubuntu Server %I
After=snapd.seeded.service
StartLimitInterval=0
[Service]
Environment=SNAP_REEXEC=0
ExecStart=
ExecStart=/usr/bin/snap run subiquity.subiquity-service %I

@ -0,0 +1,4 @@
[Service]
StandardOutput=tty
ExecStart=
ExecStart=/usr/bin/snap run subiquity --ssh

@ -0,0 +1,14 @@
[Unit]
IgnoreOnIsolate=yes
After=systemd-user-sessions.service plymouth-quit-wait.service snap.seeded.service
[Service]
Environment=SNAP_REEXEC=0
UtmpIdentifier=tty1
TTYPath=/dev/tty1
TTYReset=yes
TTYVHangup=yes
TTYVTDisallocate=yes
KillMode=process
IgnoreSIGPIPE=no
SendSIGHUP=yes

@ -0,0 +1,4 @@
[Mount]
What=/dev/disk/by-uuid/00c629d6-06ab-4dfd-b21e-c3186f34105d
Where=/subiquity_config
Type=ext4

@ -0,0 +1,9 @@
127.0.0.1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
Loading…
Cancel
Save