Merge branch 'ubuntu/master' into live-server-serial-argh

delete-obsolete-ubuntu-core
Michael Hudson-Doyle 4 years ago
commit 7cd7f1e0bf

33
debian/changelog vendored

@ -1,9 +1,38 @@
livecd-rootfs (2.720) UNRELEASED; urgency=medium
livecd-rootfs (2.722) UNRELEASED; urgency=medium
* Simplify how getty services are customized / overriden in the live server
installer environment. (LP: #1888497)
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Wed, 21 Apr 2021 11:37:17 +1200
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Thu, 06 May 2021 10:27:51 +1200
livecd-rootfs (2.721) impish; urgency=medium
[ Dimitri John Ledkov ]
* 999-cpc-fixes: enable more code on grub2 armhf & arm64 (LP: #1925780)
* Add support for generic preinstalled images. LP: #1923832
* Change iptables calls, to query rules and quickly check that
connectivity works after transparent proxy has been installed. LP:
#1917920
* magic-proxy: replace http.client with urllib calls
* buildd: Re-enable merged /usr by default, as it is now required
[ Michael Hudson-Doyle ]
* remove the workaround for bug #1893818
[ Łukasz 'sil2100' Zemczak ]
* Start using the ubuntu-server-raspi seeds for preinstalled raspi server
images, similarly to how it's done for desktop-preinstalled images.
* Drop the unused imx6 support.
-- Dimitri John Ledkov <xnox@ubuntu.com> Wed, 05 May 2021 19:08:21 +0100
livecd-rootfs (2.720) impish; urgency=medium
[ Thomas Bechtold ]
* Add a new ubuntu-oci project that contains the customizations currently
performed downstream for the official Ubuntu images on dockerhub.
-- Michael Hudson-Doyle <michael.hudson@ubuntu.com> Fri, 30 Apr 2021 11:48:32 +1200
livecd-rootfs (2.719) hirsute; urgency=medium

@ -35,6 +35,18 @@ run_iptables () {
kver="${kver#*.}"
kver_minor="${kver%%.*}"
# LP: #1917920
# I'm seeing issues after iptables got upgraded from 1.8.5 to
# 1.8.7 Somehow installing our nat rule doesn't get activated, and
# no networking is happening at all.
# But somehow calling both iptables -S makes things start working.
# Maybe no default chains are installed in our network namespace?!
# Or 1.8.7 is somehow broken?
iptables -v -t nat -S
iptables-legacy -v -t nat -S
if [ "$kver_major" -lt 4 ] || \
([ "$kver_major" = 4 ] && [ "$kver_minor" -lt 15 ]); then
iptables-legacy "$@"
@ -52,10 +64,11 @@ if [ -n "$REPO_SNAPSHOT_STAMP" ]; then
apt-get -qyy install iptables
# Redirect all outgoing traffic to port 80 to proxy instead.
run_iptables -t nat -A OUTPUT -p tcp --dport 80 \
run_iptables -v -t nat -A OUTPUT -p tcp --dport 80 \
-m owner ! --uid-owner daemon -j REDIRECT --to 8080
# Run proxy as "daemon" to avoid infinite loop.
LB_PARENT_MIRROR_BOOTSTRAP=$LB_PARENT_MIRROR_BOOTSTRAP \
/usr/share/livecd-rootfs/magic-proxy \
--address="127.0.0.1" \
--port=8080 \
@ -65,6 +78,9 @@ if [ -n "$REPO_SNAPSHOT_STAMP" ]; then
--pid-file=config/magic-proxy.pid \
--background \
--setsid
# Quick check that magic proxy & iptables chains are working
timeout 3m apt-get update
fi
# Link output files somewhere launchpad-buildd will be able to find them.
@ -245,7 +261,7 @@ if [ "$(dpkg-divert --truename /usr/bin/man)" = "/usr/bin/man.REAL" ]; then
fi
EOF
if [ "$PROJECT" != "ubuntu-base" ]; then
if [ "$PROJECT" != "ubuntu-base" ] && [ "$PROJECT" != "ubuntu-oci" ]; then
# ubuntu-minimal is too much for a docker container (it contains
# systemd and other things)
cat >> chroot/usr/local/sbin/unminimize <<'EOF'
@ -441,6 +457,10 @@ serial: $BUILDSTAMP
EOF
fi
if [ "$PROJECT" = "ubuntu-oci" ]; then
configure_oci chroot
fi
configure_network_manager
echo "===== Checking size of /usr/share/doc ====="

@ -281,7 +281,7 @@ if [ -z "${IMAGEFORMAT:-}" ]; then
case $PROJECT:${SUBPROJECT:-} in
ubuntu-cpc:*|ubuntu:desktop-preinstalled)
case $SUBARCH in
raspi|imx6)
raspi)
IMAGEFORMAT=ubuntu-image
;;
*)
@ -295,6 +295,19 @@ if [ -z "${IMAGEFORMAT:-}" ]; then
esac
fi
# Configure preinstalled ubuntu-cpc images with included password
# one also must request disk1-img-xz image format
if [ "$IMAGEFORMAT" = "ext4" ] && [ "$PROJECT" = "ubuntu-cpc" ]; then
case $ARCH:$SUBARCH in
armhf:raspi2|riscv64:sifive_*|*:generic)
IMAGE_HAS_HARDCODED_PASSWORD=1
if [ -z "${IMAGE_TARGETS:-}" ]; then
export IMAGE_TARGETS="disk1-img-xz"
fi
;;
esac
fi
skip_lb_stage() {
STAGE="$1"
mkdir -p .build
@ -339,8 +352,6 @@ case $IMAGEFORMAT in
MODEL=pi3-arm64 ;;
armhf+cm3)
MODEL=cm3 ;;
armhf+imx6)
MODEL=nitrogen6x ;;
*)
echo "Model $ARCH+${SUBARCH:-} unknown to livecd-rootfs" >&2
exit 1
@ -514,7 +525,7 @@ if [ "$PREINSTALLED" = "true" ]; then
ubuntu-server)
add_package live oem-config-debconf ubiquity-frontend-debconf
;;
ubuntu-core|ubuntu-base|base|ubuntu-cpc)
ubuntu-core|ubuntu-base|ubuntu-oci|base|ubuntu-cpc)
;;
ubuntu)
add_package live oem-config-gtk ubiquity-frontend-gtk
@ -796,6 +807,10 @@ case $PROJECT in
OPTS="${OPTS:+$OPTS }--bootstrap-flavour=minimal"
;;
ubuntu-oci)
OPTS="${OPTS:+$OPTS }--bootstrap-flavour=minimal"
;;
ubuntu-cpc)
KERNEL_FLAVOURS=virtual
@ -814,15 +829,26 @@ case $PROJECT in
BINARY_REMOVE_LINUX=false
OPTS="${OPTS:+$OPTS }--initramfs=none"
case $ARCH in
armhf)
case "$ARCH+${SUBARCH:-}" in
*+raspi)
add_task install ubuntu-server-raspi
;;
armhf*)
KERNEL_FLAVOURS=generic-lpae
add_package install flash-kernel
;;
arm64)
arm64*)
add_package install flash-kernel
if [ "${SUBARCH:-}" = "generic" ]; then
KERNEL_FLAVOURS=generic
fi
;;
riscv64)
amd64)
if [ "${SUBARCH:-}" = "generic" ]; then
KERNEL_FLAVOURS=generic
fi
;;
riscv64*)
if [ -n "$SUBARCH" ]; then
KERNEL_FLAVOURS=generic
fi
@ -940,29 +966,21 @@ case $PROJECT in
;;
esac
case $ARCH in
armhf|arm64)
KERNEL_FLAVOURS="${SUBARCH:-$KERNEL_FLAVOURS}"
case $SUBARCH in
raspi)
# Generic Raspberry Pi images
case "$ARCH+${SUBARCH:-}" in
arm*+raspi)
# Common configuration for all Raspberry Pi image variants (server,
# desktop etc.)
KERNEL_FLAVOURS="$SUBARCH"
COMPONENTS='main restricted universe multiverse'
add_package install linux-firmware-raspi2 pi-bluetooth u-boot-rpi flash-kernel u-boot-tools wpasupplicant ubuntu-raspi-settings
BINARY_REMOVE_LINUX=false
;;
imx6)
COMPONENTS='main restricted universe multiverse'
KERNEL_FLAVOURS=generic
add_package install flash-kernel u-boot-tools wpasupplicant
# Most Pi-specific package installation is handled via the seeds in the
# per-project/subproject cases above
add_package install linux-firmware-raspi2 pi-bluetooth u-boot-rpi u-boot-tool
BINARY_REMOVE_LINUX=false
;;
esac
;;
esac
case $PROJECT:${SUBPROJECT:-} in
ubuntu-server:*|ubuntu-base:*)
ubuntu-server:*|ubuntu-base:*|ubuntu-oci:*)
OPTS="${OPTS:+$OPTS }--linux-packages=none --initramfs=none"
KERNEL_FLAVOURS=none
BINARY_REMOVE_LINUX=false
@ -1028,8 +1046,15 @@ echo "BUILDSTAMP=\"$NOW\"" >> config/binary
echo "SUBPROJECT=\"${SUBPROJECT:-}\"" >> config/binary
echo "LB_DISTRIBUTION=\"$SUITE\"" >> config/binary
if [ "${IMAGE_HAS_HARDCODED_PASSWORD:-}" = "1" ]; then
echo IMAGE_HAS_HARDCODED_PASSWORD=1 >> config/binary
if [ -n "${IMAGE_TARGETS:-}" ]; then
echo "IMAGE_TARGETS=\"${IMAGE_TARGETS:-}\"" >> config/binary
fi
fi
case $PROJECT in
ubuntu-cpc|ubuntu-core|ubuntu-base|base)
ubuntu-cpc|ubuntu-core|ubuntu-base|ubuntu-oci|base)
# ubuntu-cpc gets this added in 025-create-groups.chroot, and we do
# not want this group in projects that are effectively just chroots
;;
@ -1225,11 +1250,6 @@ esac
case $SUBPROJECT in
buildd)
cp -af /usr/share/livecd-rootfs/live-build/buildd/* config/
# Disable merged /usr to avoid building packages with
# hardcoded paths that assume it.
echo 'DEBOOTSTRAP_OPTIONS="$DEBOOTSTRAP_OPTIONS --no-merged-usr"' \
>> config/common
;;
esac

@ -833,7 +833,7 @@ clean_debian_chroot() {
rm -f chroot/var/cache/debconf/*-old chroot/var/lib/dpkg/*-old
Chroot chroot apt clean
# For the docker images we remove even more stuff.
if [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-base:minimized" ]; then
if [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-base:minimized" ] || [ "${PROJECT}:${SUBPROJECT:-}" = "ubuntu-oci:minimized" ]; then
# Remove apt lists (that are currently removed downstream
# anyway)
rm -rf chroot/var/lib/apt/lists/*
@ -906,6 +906,62 @@ EOF
fi
}
configure_oci() {
# configure a chroot to be a OCI/docker container
# theses changes are taken from the current Dockerfile modifications done
# at https://github.com/tianon/docker-brew-ubuntu-core/blob/master/update.sh
local chroot=$1
echo "==== Configuring OCI ===="
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L40-L48
echo '#!/bin/sh' > ${chroot}/usr/sbin/policy-rc.d
echo 'exit 101' >> ${chroot}/usr/sbin/policy-rc.d
Chroot ${chroot} "chmod +x /usr/sbin/policy-rc.d"
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L54-L56
Chroot ${chroot} "dpkg-divert --local --rename --add /sbin/initctl"
cp -a ${chroot}/usr/sbin/policy-rc.d ${chroot}/sbin/initctl
sed -i 's/^exit.*/exit 0/' ${chroot}/sbin/initctl
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L71-L78
echo 'force-unsafe-io' > ${chroot}/etc/dpkg/dpkg.cfg.d/docker-apt-speedup
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L85-L105
echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' > ${chroot}/etc/apt/apt.conf.d/docker-clean
echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' >> ${chroot}/etc/apt/apt.conf.d/docker-clean
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' >> ${chroot}/etc/apt/apt.conf.d/docker-clean
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L109-L115
echo 'Acquire::Languages "none";' > ${chroot}/etc/apt/apt.conf.d/docker-no-languages
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L118-L130
echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > ${chroot}/etc/apt/apt.conf.d/docker-gzip-indexes
# https://github.com/docker/docker/blob/9a9fc01af8fb5d98b8eec0740716226fadb3735c/contrib/mkimage/debootstrap#L134-L151
echo 'Apt::AutoRemove::SuggestsImportant "false";' > ${chroot}/etc/apt/apt.conf.d/docker-autoremove-suggests
# delete all the apt list files since they're big and get stale quickly
rm -rf ${chroot}/var/lib/apt/lists/*
# verify that the APT lists files do not exist
Chroot chroot "apt-get indextargets" > indextargets.out
[ ! -s indextargets.out ]
rm indextargets.out
# (see https://bugs.launchpad.net/cloud-images/+bug/1699913)
# make systemd-detect-virt return "docker"
# See: https://github.com/systemd/systemd/blob/aa0c34279ee40bce2f9681b496922dedbadfca19/src/basic/virt.c#L434
mkdir -p ${chroot}/run/systemd
echo 'docker' > ${chroot}/run/systemd/container
rm -rf ${chroot}/var/cache/apt/*.bin
echo "==== Configuring OCI done ===="
}
configure_network_manager() {
# If the image pre-installs network-manager, let it manage all devices by
# default. Installing NM on an existing system only manages wifi and wwan via
@ -1004,6 +1060,42 @@ END
umount ${mountpoint}
}
setup_cinocloud() {
if [ "${IMAGE_HAS_HARDCODED_PASSWORD:-}" != "1" ] || [ "${IMAGE_TARGETS:-}" != "disk1-img-xz" ]; then
echo "unexpected attempt to add a hardcoded password to an image"
exit 1
fi
local mountpoint=$1
mkdir -p $mountpoint/var/lib/cloud/seed/nocloud-net
cat <<EOF >$mountpoint/var/lib/cloud/seed/nocloud-net/meta-data
instance-id: iid-$(openssl rand -hex 8)
EOF
cat <<EOF >$mountpoint/var/lib/cloud/seed/nocloud-net/user-data
#cloud-config
chpasswd:
expire: True
list:
- ubuntu:ubuntu
ssh_pwauth: True
EOF
cat <<EOF >$mountpoint/var/lib/cloud/seed/nocloud-net/network-config
# This is the initial network config.
# It can be overwritten by cloud-init.
version: 2
ethernets:
zz-all-en:
match:
name: "en*"
dhcp4: true
optional: true
zz-all-eth:
match:
name: "eth*"
dhcp4: true
optional: true
EOF
}
replace_kernel () {
mountpoint=$1
new_kernel=$2

@ -21,9 +21,12 @@ case ${PROJECT:-} in
;;
esac
if [ "$ARCH" = "riscv64" ] && [ -n "${SUBARCH:-}" ]; then
# Change image size for preinstalled generic images & all preinstalled riscv64 images
if [ -n "${SUBARCH:-}" ]; then
if [ "${SUBARCH:-}" = "generic" ] || [ "$ARCH" = "riscv64" ]; then
IMAGE_SIZE=3758096384 # bump to 3.5G (3584*1024**2), due to linux-generic instead of virtual
fi
fi
. config/binary
@ -34,10 +37,20 @@ create_partitions() {
sgdisk "${disk_image}" --zap-all
case $ARCH in
arm64|armhf)
if [ "${SUBARCH:-}" = "generic" ]; then
sgdisk "${disk_image}" \
--new=15:0:204800 \
--typecode=15:ef00 \
--attributes=15:set:2 \
--new=14::+4M \
--change-name=14:CIDATA \
--new=1:
else
sgdisk "${disk_image}" \
--new=15:0:204800 \
--typecode=15:ef00 \
--new=1:
fi
;;
riscv64)
# same as arm64/armhf, but set bit 2 legacy bios bootable
@ -72,6 +85,17 @@ create_partitions() {
fi
;;
amd64)
if [ "${SUBARCH:-}" = "generic" ]; then
sgdisk "${disk_image}" \
--new=14::+4M \
--typecode=14:ef02 \
--attributes=14:set:2 \
--new=15::+106M \
--typecode=15:ef00 \
--new=13::+4M \
--change-name=13:CIDATA \
--new=1::
else
sgdisk "${disk_image}" \
--new=14::+4M \
--new=15::+106M \
@ -79,6 +103,7 @@ create_partitions() {
sgdisk "${disk_image}" \
-t 14:ef02 \
-t 15:ef00
fi
;;
esac
sgdisk "${disk_image}" \
@ -119,14 +144,38 @@ install_grub() {
arm64)
chroot mountpoint apt-get -qqy install --no-install-recommends shim-signed grub-efi-arm64-signed
efi_target=arm64-efi
if [ "${SUBARCH:-}" = "generic" ]; then
# Server preinstalled image
# Setup cidata sample data & nocloud fallback
# Allows login on first boot with or without metadata
cidata_dev="/dev/mapper${loop_device///dev/}p14"
setup_cidata "${cidata_dev}"
setup_cinocloud mountpoint
fi
;;
armhf)
chroot mountpoint apt-get -qqy install --no-install-recommends grub-efi-arm grub-efi-arm-bin
efi_target=arm-efi
if [ "${SUBARCH:-}" = "generic" ]; then
# Server preinstalled image
# Setup cidata sample data & nocloud fallback
# Allows login on first boot with or without metadata
cidata_dev="/dev/mapper${loop_device///dev/}p14"
setup_cidata "${cidata_dev}"
setup_cinocloud mountpoint
fi
;;
amd64)
chroot mountpoint apt-get install -qqy grub-pc shim-signed
efi_target=x86_64-efi
if [ "${SUBARCH:-}" = "generic" ]; then
# Server preinstalled image
# Setup cidata sample data & nocloud fallback
# Allows login on first boot with or without metadata
cidata_dev="/dev/mapper${loop_device///dev/}p13"
setup_cidata "${cidata_dev}"
setup_cinocloud mountpoint
fi
;;
riscv64)
# TODO grub-efi-riscv64 does not exist yet on riscv64
@ -153,34 +202,7 @@ install_grub() {
# Provide stock nocloud datasource
# Allow interactive login on baremetal SiFive board,
# without a cloud datasource.
mkdir -p mountpoint/var/lib/cloud/seed/nocloud-net
cat <<EOF >mountpoint/var/lib/cloud/seed/nocloud-net/meta-data
instance-id: iid-$(openssl rand -hex 8)
EOF
cat <<EOF >mountpoint/var/lib/cloud/seed/nocloud-net/user-data
#cloud-config
chpasswd:
expire: True
list:
- ubuntu:ubuntu
ssh_pwauth: True
EOF
cat <<EOF >mountpoint/var/lib/cloud/seed/nocloud-net/network-config
# This is the initial network config.
# It can be overwritten by cloud-init.
version: 2
ethernets:
zz-all-en:
match:
name: "en*"
dhcp4: true
optional: true
zz-all-eth:
match:
name: "eth*"
dhcp4: true
optional: true
EOF
setup_cinocloud mountpoint
fi
## TODO remove below once we have grub-efi-riscv64
rm mountpoint/tmp/device.map

@ -31,6 +31,10 @@ case $ARCH:$SUBARCH in
echo "We only create EFI images for $ARCH."
exit 0
;;
amd64:generic)
echo "We only create EFI images for $SUBARCH."
exit 0
;;
*)
;;
esac

@ -0,0 +1,17 @@
#!/bin/bash -ex
. config/functions
. config/binary
if [ "${IMAGE_HAS_HARDCODED_PASSWORD:-}" != "1" ]; then
echo ".disk1.img.xz preinstalled image requested to be built"
echo "but the build is not generated with hardcoded password"
echo "the build is missconfigured"
exit 1
fi
if [ -f binary/boot/disk-uefi.ext4 ]; then
xz -T4 -c binary/boot/disk-uefi.ext4 > livecd.ubuntu-cpc.disk1.img.xz
elif [ -f binary/boot/disk.ext4 ]; then
xz -T4 -c binary/boot/disk.ext4 > livecd.ubuntu-cpc.disk1.img.xz
fi

@ -1,18 +1,5 @@
#!/bin/bash -ex
case $ARCH:$SUBARCH in
# Not sure if any other cloud images use subarch for something that
# should take qcow2 format, so only skipping this on raspi2 for now.
armhf:raspi2)
xz -T4 -c binary/boot/disk.ext4 > livecd.ubuntu-cpc.disk1.img.xz
exit 0
;;
riscv64:hifive|riscv64:sifive_*)
xz -T4 -c binary/boot/disk-uefi.ext4 > livecd.ubuntu-cpc.disk1.img.xz
exit 0
;;
esac
. config/functions
if [ -f binary/boot/disk-uefi.ext4 ]; then

@ -0,0 +1,3 @@
depends disk-image
base/disk1-img-xz.binary
provides livecd.ubuntu-cpc.disk1.img.xz

@ -117,7 +117,7 @@ fi
case $arch in
# ARM, ppc, riscv64 and s390x images are special
armhf|arm64|powerpc|ppc64el|s390x|riscv64)
powerpc|ppc64el|s390x|riscv64)
exit 0
;;
esac
@ -208,17 +208,20 @@ GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0"
# Set the grub console type
GRUB_TERMINAL=console
EOF
_xchroot "${rootd}" update-grub2
# since this is a disk image, we technically don't need to install all the
# grub modules, as the image itself is not bootable. This makes for a small
# disk image
_xchroot "${rootd}" update-grub
# Sometimes grub is not installed yet (i.e. uefi arm images). Overall
# it is odd that we run this out of chroot hooks, instead of binary
# hooks. I wonder if we can move all of this to disk-image hooks.
if [ -x "${rootd}/usr/sbin/update-grub" ]; then
_xchroot "${rootd}" update-grub
fi
# reconfigure grub so that upgrades to grub-pc do not force a debconf config
# changed prompt (LP: #1009294). This re-runs update-grub
_xchroot "${rootd}" env DEBIAN_FRONTEND=noninteractive \
if [ -n "`_xchroot "${rootd}" dpkg-query -W grub-pc 2>/dev/null`" ]; then
_xchroot "${rootd}" env DEBIAN_FRONTEND=noninteractive \
dpkg-reconfigure grub-pc
fi
grub2cfg="${rootd}/boot/grub/grub.cfg"
[ ! -f "${grub2cfg}" ] ||

@ -75,11 +75,6 @@ chroot $INSTALLER_ROOT apt-get clean
# "helpful" casper script that mounts any swap partitions it finds.
rm -f $INSTALLER_ROOT/usr/share/initramfs-tools/scripts/casper-bottom/*swap
# For bug #1893818 "Several blockprobe errors if trying to install the
# groovy daily live on LPAR", remove a udev rule that removes
# partition nodes for multipathed disks that breaks the installer.
rm -f $INSTALLER_ROOT/lib/udev/rules.d/68-del-part-nodes.rules
# Preseed subiquity into installer layer
snap_prepare $INSTALLER_ROOT
snap_preseed $INSTALLER_ROOT subiquity/classic

@ -68,6 +68,45 @@ class LPInReleaseCacheError(LPInReleaseBaseError):
class LPInReleaseProxyError(LPInReleaseBaseError):
pass
IN_LP = "http://ftpmaster.internal/ubuntu" in os.environ.get("LB_PARENT_MIRROR_BOOTSTRAP", "")
# We cannot proxy & rewrite https requests Thus apt will talk to us
# over http But we must upgrade to https for private-ppas, outside of
# launchpad hence use this helper to re-write urls.
def get_uri(host, path):
if host in ("private-ppa.launchpad.net", "private-ppa.buildd"):
if IN_LP:
return "http://private-ppa.buildd" + path
else:
return "https://private-ppa.launchpad.net" + path
# TODO add split mirror handling for ftpmaster.internal =>
# (ports|archive).ubuntu.com
return "http://" + host + path
def initialize_auth():
auth_handler = urllib.request.HTTPBasicAuthHandler()
with open('/etc/apt/sources.list') as f:
for line in f.readlines():
for word in line.split():
if not word.startswith('http'):
continue
parse=urllib.parse.urlparse(word)
if not parse.username:
continue
if parse.hostname not in ("private-ppa.launchpad.net", "private-ppa.buildd"):
continue
auth_handler.add_password(
"Token Required", "https://private-ppa.launchpad.net" + parse.path,
parse.username, parse.password)
auth_handler.add_password(
"Token Required", "http://private-ppa.buildd" + parse.path,
parse.username, parse.password)
print("add password for", parse.path)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
initialize_auth()
class InRelease:
"""This class represents an InRelease file."""
@ -97,7 +136,8 @@ class InRelease:
this is set explicitly to correspond to the Last-Modified header spat
out by the Web server.
"""
self.mirror = mirror
parsed = urllib.parse.urlparse(mirror)
self.mirror = get_uri(parsed.hostname, parsed.path)
self.suite = suite
self.data = data
self.dict = {}
@ -363,7 +403,7 @@ class LPInReleaseCache:
suite."""
with self._lock:
url_obj = urllib.parse.urlparse(mirror)
address = url_obj.hostname + url_obj.path.rstrip("/")
address = url_obj.scheme + url_obj.hostname + url_obj.path.rstrip("/")
inrel_by_hash = self._data\
.get(address, {})\
@ -403,7 +443,8 @@ class LPInReleaseIndex:
which case all look-ups will first go to the cache and only cache
misses will result in requests to the Web server.
"""
self._mirror = mirror
parsed = urllib.parse.urlparse(mirror)
self._mirror = get_uri(parsed.hostname, parsed.path)
self._suite = suite
self._cache = cache
@ -528,7 +569,8 @@ class LPInReleaseIndex:
return [inrel.hash for inrel in cache_entry]
try:
with urllib.request.urlopen(self._base_url) as response:
request=urllib.request.Request(self._base_url)
with urllib.request.urlopen(request) as response:
content_encoding = self._guess_content_encoding_for_response(
response)
@ -744,6 +786,23 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""Process a GET request."""
self.__get_request()
def sanitize_requestline(self):
requestline = []
for word in self.requestline.split():
if word.startswith('http'):
parse = urllib.parse.urlparse(word)
parse = urllib.parse.ParseResult(
parse.scheme,
parse.hostname, # not netloc, to sanitize username/password
parse.path,
parse.params,
parse.query,
parse.fragment)
requestline.append(urllib.parse.urlunparse(parse))
else:
requestline.append(word)
self.requestline = ' '.join(requestline)
def __get_request(self, verb="GET"):
"""Pass all requests on to the destination server 1:1 except when the
target is an InRelease file or a resource listed in an InRelease files.
@ -756,15 +815,18 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
happening here, the client does not know that what it receives is not
exactly what it requested."""
host, path = self.__get_host_path()
uri = self.headers.get("host") + self.path
parsed = urllib.parse.urlparse(uri)
self.sanitize_requestline()
m = re.match(
r"^(?P<base>.*?)/dists/(?P<suite>[^/]+)/(?P<target>.*)$",
path
parsed.path
)
if m:
mirror = "http://" + host + m.group("base")
mirror = get_uri(parsed.hostname, m.group("base"))
base = m.group("base")
suite = m.group("suite")
target = m.group("target")
@ -775,27 +837,17 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
self.server.snapshot_stamp)
if inrelease is None:
self.__send_error(404, "No InRelease file found for given "
self.log_message(
"InRelease not found for {}/{}".format(parsed.hostname, parsed.path))
self.send_error(404, "No InRelease file found for given "
"mirror, suite and timestamp.")
return
if target == "InRelease":
# If target is InRelease, send back contents directly.
data = inrelease.data.encode("utf-8")
self.log_message(
"Inject InRelease '{}'".format(inrelease.hash))
self.send_response(200)
self.send_header("Content-Length", len(data))
self.end_headers()
if verb == "GET":
self.wfile.write(data)
hash_ = None
return
if target == "InRelease":
hash_ = inrelease.hash
else:
# If target hash is listed, then redirect to by-hash URL.
hash_ = inrelease.get_hash_for(target)
if hash_:
@ -804,21 +856,30 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
target_path = target.rsplit("/", 1)[0]
path = "{}/dists/{}/{}/by-hash/SHA256/{}"\
.format(base, suite, target_path, hash_)
uri = "{}/dists/{}/by-hash/SHA256/{}"\
.format(mirror, suite, hash_)
else:
uri = get_uri(parsed.hostname, parsed.path)
## use requests such that authentication via password database happens
## reuse all the headers that we got asked to provide
try:
client = http.client.HTTPConnection(host)
client.request(verb, path)
except Exception as e:
self.log_error("Failed to retrieve http://{}{}: {}"
.format(host, path, str(e)))
return
with urllib.request.urlopen(
urllib.request.Request(
uri,
method=verb,
headers=self.headers)) as response:
self.__send_response(response)
except urllib.error.HTTPError as e:
if e.code not in (304,):
self.log_message(
"urlopen() failed for {} with {}".format(uri, e.reason))
self.__send_response(e)
except urllib.error.URLError as e:
self.log_message(
"urlopen() failed for {} with {}".format(uri, e.reason))
self.send_error(501, e.reason)
try:
self.__send_response(client.getresponse())
except Exception as e:
self.log_error("Error delivering response: {}".format(str(e)))
def __get_host_path(self):
"""Figure out the host to contact and the path of the resource that is
@ -831,21 +892,27 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def __send_response(self, response):
"""Pass on upstream response headers and body to the client."""
self.send_response(response.status)
if hasattr(response, "status"):
status = response.status
elif hassattr(response, "code"):
status = response.code
elif hasattr(response, "getstatus"):
status = response.getstatus()
if hasattr(response, "headers"):
headers = response.headers
elif hasattr(response, "info"):
headers = response.info()
self.send_response(status)
for name, value in response.getheaders():
for name, value in headers.items():
self.send_header(name, value)
self.end_headers()
if hasattr(response, "read"):
shutil.copyfileobj(response, self.wfile)
def __send_error(self, status, message):
"""Return an HTTP error status and a message in the response body."""
self.send_response(status)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.end_headers()
self.wfile.write(message.encode("utf-8"))
class MagicHTTPProxy(socketserver.ThreadingMixIn, http.server.HTTPServer):
"""Tiny HTTP server using ProxyingHTTPRequestHandler instances to provide

Loading…
Cancel
Save