diff --git a/debian/changelog b/debian/changelog index e7de211d..bc0015de 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,24 @@ +livecd-rootfs (2.721) impish; urgency=medium + + [ Dimitri John Ledkov ] + * 999-cpc-fixes: enable more code on grub2 armhf & arm64 (LP: #1925780) + * Add support for generic preinstalled images. LP: #1923832 + * Change iptables calls, to query rules and quickly check that + connectivity works after transparent proxy has been installed. LP: + #1917920 + * magic-proxy: replace http.client with urllib calls + * buildd: Re-enable merged /usr by default, as it is now required + + [ Michael Hudson-Doyle ] + * remove the workaround for bug #1893818 + + [ Ɓukasz 'sil2100' Zemczak ] + * Start using the ubuntu-server-raspi seeds for preinstalled raspi server + images, similarly to how it's done for desktop-preinstalled images. + * Drop the unused imx6 support. + + -- Dimitri John Ledkov Wed, 05 May 2021 19:08:21 +0100 + livecd-rootfs (2.720) impish; urgency=medium [ Thomas Bechtold ] diff --git a/live-build/auto/build b/live-build/auto/build index f1b7bc15..a20f26d2 100755 --- a/live-build/auto/build +++ b/live-build/auto/build @@ -35,6 +35,18 @@ run_iptables () { kver="${kver#*.}" kver_minor="${kver%%.*}" + + # LP: #1917920 + # I'm seeing issues after iptables got upgraded from 1.8.5 to + # 1.8.7 Somehow installing our nat rule doesn't get activated, and + # no networking is happening at all. + + # But somehow calling both iptables -S makes things start working. + # Maybe no default chains are installed in our network namespace?! + # Or 1.8.7 is somehow broken? + iptables -v -t nat -S + iptables-legacy -v -t nat -S + if [ "$kver_major" -lt 4 ] || \ ([ "$kver_major" = 4 ] && [ "$kver_minor" -lt 15 ]); then iptables-legacy "$@" @@ -52,10 +64,11 @@ if [ -n "$REPO_SNAPSHOT_STAMP" ]; then apt-get -qyy install iptables # Redirect all outgoing traffic to port 80 to proxy instead. - run_iptables -t nat -A OUTPUT -p tcp --dport 80 \ + run_iptables -v -t nat -A OUTPUT -p tcp --dport 80 \ -m owner ! --uid-owner daemon -j REDIRECT --to 8080 # Run proxy as "daemon" to avoid infinite loop. + LB_PARENT_MIRROR_BOOTSTRAP=$LB_PARENT_MIRROR_BOOTSTRAP \ /usr/share/livecd-rootfs/magic-proxy \ --address="127.0.0.1" \ --port=8080 \ @@ -65,6 +78,9 @@ if [ -n "$REPO_SNAPSHOT_STAMP" ]; then --pid-file=config/magic-proxy.pid \ --background \ --setsid + + # Quick check that magic proxy & iptables chains are working + timeout 3m apt-get update fi # Link output files somewhere launchpad-buildd will be able to find them. diff --git a/live-build/auto/config b/live-build/auto/config index de77478b..0017e799 100755 --- a/live-build/auto/config +++ b/live-build/auto/config @@ -281,7 +281,7 @@ if [ -z "${IMAGEFORMAT:-}" ]; then case $PROJECT:${SUBPROJECT:-} in ubuntu-cpc:*|ubuntu:desktop-preinstalled) case $SUBARCH in - raspi|imx6) + raspi) IMAGEFORMAT=ubuntu-image ;; *) @@ -295,6 +295,19 @@ if [ -z "${IMAGEFORMAT:-}" ]; then esac fi +# Configure preinstalled ubuntu-cpc images with included password +# one also must request disk1-img-xz image format +if [ "$IMAGEFORMAT" = "ext4" ] && [ "$PROJECT" = "ubuntu-cpc" ]; then + case $ARCH:$SUBARCH in + armhf:raspi2|riscv64:sifive_*|*:generic) + IMAGE_HAS_HARDCODED_PASSWORD=1 + if [ -z "${IMAGE_TARGETS:-}" ]; then + export IMAGE_TARGETS="disk1-img-xz" + fi + ;; + esac +fi + skip_lb_stage() { STAGE="$1" mkdir -p .build @@ -339,8 +352,6 @@ case $IMAGEFORMAT in MODEL=pi3-arm64 ;; armhf+cm3) MODEL=cm3 ;; - armhf+imx6) - MODEL=nitrogen6x ;; *) echo "Model $ARCH+${SUBARCH:-} unknown to livecd-rootfs" >&2 exit 1 @@ -818,15 +829,26 @@ case $PROJECT in BINARY_REMOVE_LINUX=false OPTS="${OPTS:+$OPTS }--initramfs=none" - case $ARCH in - armhf) + case "$ARCH+${SUBARCH:-}" in + *+raspi) + add_task install ubuntu-server-raspi + ;; + armhf*) KERNEL_FLAVOURS=generic-lpae add_package install flash-kernel ;; - arm64) + arm64*) add_package install flash-kernel + if [ "${SUBARCH:-}" = "generic" ]; then + KERNEL_FLAVOURS=generic + fi + ;; + amd64) + if [ "${SUBARCH:-}" = "generic" ]; then + KERNEL_FLAVOURS=generic + fi ;; - riscv64) + riscv64*) if [ -n "$SUBARCH" ]; then KERNEL_FLAVOURS=generic fi @@ -944,24 +966,16 @@ case $PROJECT in ;; esac -case $ARCH in - armhf|arm64) - KERNEL_FLAVOURS="${SUBARCH:-$KERNEL_FLAVOURS}" - case $SUBARCH in - raspi) - # Generic Raspberry Pi images - COMPONENTS='main restricted universe multiverse' - add_package install linux-firmware-raspi2 pi-bluetooth u-boot-rpi flash-kernel u-boot-tools wpasupplicant ubuntu-raspi-settings - BINARY_REMOVE_LINUX=false - ;; - imx6) - COMPONENTS='main restricted universe multiverse' - KERNEL_FLAVOURS=generic - add_package install flash-kernel u-boot-tools wpasupplicant - BINARY_REMOVE_LINUX=false - ;; - - esac +case "$ARCH+${SUBARCH:-}" in + arm*+raspi) + # Common configuration for all Raspberry Pi image variants (server, + # desktop etc.) + KERNEL_FLAVOURS="$SUBARCH" + COMPONENTS='main restricted universe multiverse' + # Most Pi-specific package installation is handled via the seeds in the + # per-project/subproject cases above + add_package install linux-firmware-raspi2 pi-bluetooth u-boot-rpi u-boot-tool + BINARY_REMOVE_LINUX=false ;; esac @@ -1032,6 +1046,13 @@ echo "BUILDSTAMP=\"$NOW\"" >> config/binary echo "SUBPROJECT=\"${SUBPROJECT:-}\"" >> config/binary echo "LB_DISTRIBUTION=\"$SUITE\"" >> config/binary +if [ "${IMAGE_HAS_HARDCODED_PASSWORD:-}" = "1" ]; then + echo IMAGE_HAS_HARDCODED_PASSWORD=1 >> config/binary + if [ -n "${IMAGE_TARGETS:-}" ]; then + echo "IMAGE_TARGETS=\"${IMAGE_TARGETS:-}\"" >> config/binary + fi +fi + case $PROJECT in ubuntu-cpc|ubuntu-core|ubuntu-base|ubuntu-oci|base) # ubuntu-cpc gets this added in 025-create-groups.chroot, and we do @@ -1229,11 +1250,6 @@ esac case $SUBPROJECT in buildd) cp -af /usr/share/livecd-rootfs/live-build/buildd/* config/ - - # Disable merged /usr to avoid building packages with - # hardcoded paths that assume it. - echo 'DEBOOTSTRAP_OPTIONS="$DEBOOTSTRAP_OPTIONS --no-merged-usr"' \ - >> config/common ;; esac diff --git a/live-build/functions b/live-build/functions index 4ad3b15f..d1be603b 100644 --- a/live-build/functions +++ b/live-build/functions @@ -1060,6 +1060,42 @@ END umount ${mountpoint} } +setup_cinocloud() { + if [ "${IMAGE_HAS_HARDCODED_PASSWORD:-}" != "1" ] || [ "${IMAGE_TARGETS:-}" != "disk1-img-xz" ]; then + echo "unexpected attempt to add a hardcoded password to an image" + exit 1 + fi + local mountpoint=$1 + mkdir -p $mountpoint/var/lib/cloud/seed/nocloud-net + cat <$mountpoint/var/lib/cloud/seed/nocloud-net/meta-data +instance-id: iid-$(openssl rand -hex 8) +EOF + cat <$mountpoint/var/lib/cloud/seed/nocloud-net/user-data +#cloud-config +chpasswd: + expire: True + list: + - ubuntu:ubuntu +ssh_pwauth: True +EOF + cat <$mountpoint/var/lib/cloud/seed/nocloud-net/network-config +# This is the initial network config. +# It can be overwritten by cloud-init. +version: 2 +ethernets: + zz-all-en: + match: + name: "en*" + dhcp4: true + optional: true + zz-all-eth: + match: + name: "eth*" + dhcp4: true + optional: true +EOF +} + replace_kernel () { mountpoint=$1 new_kernel=$2 diff --git a/live-build/ubuntu-cpc/hooks.d/base/disk-image-uefi.binary b/live-build/ubuntu-cpc/hooks.d/base/disk-image-uefi.binary index a619299b..c73d0fe2 100755 --- a/live-build/ubuntu-cpc/hooks.d/base/disk-image-uefi.binary +++ b/live-build/ubuntu-cpc/hooks.d/base/disk-image-uefi.binary @@ -21,8 +21,11 @@ case ${PROJECT:-} in ;; esac -if [ "$ARCH" = "riscv64" ] && [ -n "${SUBARCH:-}" ]; then - IMAGE_SIZE=3758096384 # bump to 3.5G (3584*1024**2), due to linux-generic instead of virtual +# Change image size for preinstalled generic images & all preinstalled riscv64 images +if [ -n "${SUBARCH:-}" ]; then + if [ "${SUBARCH:-}" = "generic" ] || [ "$ARCH" = "riscv64" ]; then + IMAGE_SIZE=3758096384 # bump to 3.5G (3584*1024**2), due to linux-generic instead of virtual +fi fi . config/binary @@ -34,10 +37,20 @@ create_partitions() { sgdisk "${disk_image}" --zap-all case $ARCH in arm64|armhf) - sgdisk "${disk_image}" \ - --new=15:0:204800 \ - --typecode=15:ef00 \ - --new=1: + if [ "${SUBARCH:-}" = "generic" ]; then + sgdisk "${disk_image}" \ + --new=15:0:204800 \ + --typecode=15:ef00 \ + --attributes=15:set:2 \ + --new=14::+4M \ + --change-name=14:CIDATA \ + --new=1: + else + sgdisk "${disk_image}" \ + --new=15:0:204800 \ + --typecode=15:ef00 \ + --new=1: + fi ;; riscv64) # same as arm64/armhf, but set bit 2 legacy bios bootable @@ -72,13 +85,25 @@ create_partitions() { fi ;; amd64) - sgdisk "${disk_image}" \ - --new=14::+4M \ - --new=15::+106M \ - --new=1:: - sgdisk "${disk_image}" \ - -t 14:ef02 \ - -t 15:ef00 + if [ "${SUBARCH:-}" = "generic" ]; then + sgdisk "${disk_image}" \ + --new=14::+4M \ + --typecode=14:ef02 \ + --attributes=14:set:2 \ + --new=15::+106M \ + --typecode=15:ef00 \ + --new=13::+4M \ + --change-name=13:CIDATA \ + --new=1:: + else + sgdisk "${disk_image}" \ + --new=14::+4M \ + --new=15::+106M \ + --new=1:: + sgdisk "${disk_image}" \ + -t 14:ef02 \ + -t 15:ef00 + fi ;; esac sgdisk "${disk_image}" \ @@ -119,14 +144,38 @@ install_grub() { arm64) chroot mountpoint apt-get -qqy install --no-install-recommends shim-signed grub-efi-arm64-signed efi_target=arm64-efi + if [ "${SUBARCH:-}" = "generic" ]; then + # Server preinstalled image + # Setup cidata sample data & nocloud fallback + # Allows login on first boot with or without metadata + cidata_dev="/dev/mapper${loop_device///dev/}p14" + setup_cidata "${cidata_dev}" + setup_cinocloud mountpoint + fi ;; armhf) chroot mountpoint apt-get -qqy install --no-install-recommends grub-efi-arm grub-efi-arm-bin efi_target=arm-efi + if [ "${SUBARCH:-}" = "generic" ]; then + # Server preinstalled image + # Setup cidata sample data & nocloud fallback + # Allows login on first boot with or without metadata + cidata_dev="/dev/mapper${loop_device///dev/}p14" + setup_cidata "${cidata_dev}" + setup_cinocloud mountpoint + fi ;; amd64) chroot mountpoint apt-get install -qqy grub-pc shim-signed efi_target=x86_64-efi + if [ "${SUBARCH:-}" = "generic" ]; then + # Server preinstalled image + # Setup cidata sample data & nocloud fallback + # Allows login on first boot with or without metadata + cidata_dev="/dev/mapper${loop_device///dev/}p13" + setup_cidata "${cidata_dev}" + setup_cinocloud mountpoint + fi ;; riscv64) # TODO grub-efi-riscv64 does not exist yet on riscv64 @@ -153,34 +202,7 @@ install_grub() { # Provide stock nocloud datasource # Allow interactive login on baremetal SiFive board, # without a cloud datasource. - mkdir -p mountpoint/var/lib/cloud/seed/nocloud-net - cat <mountpoint/var/lib/cloud/seed/nocloud-net/meta-data -instance-id: iid-$(openssl rand -hex 8) -EOF - cat <mountpoint/var/lib/cloud/seed/nocloud-net/user-data -#cloud-config -chpasswd: - expire: True - list: - - ubuntu:ubuntu -ssh_pwauth: True -EOF - cat <mountpoint/var/lib/cloud/seed/nocloud-net/network-config -# This is the initial network config. -# It can be overwritten by cloud-init. -version: 2 -ethernets: - zz-all-en: - match: - name: "en*" - dhcp4: true - optional: true - zz-all-eth: - match: - name: "eth*" - dhcp4: true - optional: true -EOF + setup_cinocloud mountpoint fi ## TODO remove below once we have grub-efi-riscv64 rm mountpoint/tmp/device.map diff --git a/live-build/ubuntu-cpc/hooks.d/base/disk-image.binary b/live-build/ubuntu-cpc/hooks.d/base/disk-image.binary index fda732da..7c8bc9b7 100755 --- a/live-build/ubuntu-cpc/hooks.d/base/disk-image.binary +++ b/live-build/ubuntu-cpc/hooks.d/base/disk-image.binary @@ -31,6 +31,10 @@ case $ARCH:$SUBARCH in echo "We only create EFI images for $ARCH." exit 0 ;; + amd64:generic) + echo "We only create EFI images for $SUBARCH." + exit 0 + ;; *) ;; esac diff --git a/live-build/ubuntu-cpc/hooks.d/base/disk1-img-xz.binary b/live-build/ubuntu-cpc/hooks.d/base/disk1-img-xz.binary new file mode 100755 index 00000000..b97fe2e8 --- /dev/null +++ b/live-build/ubuntu-cpc/hooks.d/base/disk1-img-xz.binary @@ -0,0 +1,17 @@ +#!/bin/bash -ex + +. config/functions +. config/binary + +if [ "${IMAGE_HAS_HARDCODED_PASSWORD:-}" != "1" ]; then + echo ".disk1.img.xz preinstalled image requested to be built" + echo "but the build is not generated with hardcoded password" + echo "the build is missconfigured" + exit 1 +fi + +if [ -f binary/boot/disk-uefi.ext4 ]; then + xz -T4 -c binary/boot/disk-uefi.ext4 > livecd.ubuntu-cpc.disk1.img.xz +elif [ -f binary/boot/disk.ext4 ]; then + xz -T4 -c binary/boot/disk.ext4 > livecd.ubuntu-cpc.disk1.img.xz +fi diff --git a/live-build/ubuntu-cpc/hooks.d/base/qcow2-image.binary b/live-build/ubuntu-cpc/hooks.d/base/qcow2-image.binary index 5b38fe69..8dbbb9ae 100755 --- a/live-build/ubuntu-cpc/hooks.d/base/qcow2-image.binary +++ b/live-build/ubuntu-cpc/hooks.d/base/qcow2-image.binary @@ -1,18 +1,5 @@ #!/bin/bash -ex -case $ARCH:$SUBARCH in - # Not sure if any other cloud images use subarch for something that - # should take qcow2 format, so only skipping this on raspi2 for now. - armhf:raspi2) - xz -T4 -c binary/boot/disk.ext4 > livecd.ubuntu-cpc.disk1.img.xz - exit 0 - ;; - riscv64:hifive|riscv64:sifive_*) - xz -T4 -c binary/boot/disk-uefi.ext4 > livecd.ubuntu-cpc.disk1.img.xz - exit 0 - ;; -esac - . config/functions if [ -f binary/boot/disk-uefi.ext4 ]; then diff --git a/live-build/ubuntu-cpc/hooks.d/base/series/disk1-img-xz b/live-build/ubuntu-cpc/hooks.d/base/series/disk1-img-xz new file mode 100644 index 00000000..dfbdf1b2 --- /dev/null +++ b/live-build/ubuntu-cpc/hooks.d/base/series/disk1-img-xz @@ -0,0 +1,3 @@ +depends disk-image +base/disk1-img-xz.binary +provides livecd.ubuntu-cpc.disk1.img.xz diff --git a/live-build/ubuntu-cpc/hooks.d/chroot/999-cpc-fixes.chroot b/live-build/ubuntu-cpc/hooks.d/chroot/999-cpc-fixes.chroot index 3e1a9131..3071e01b 100755 --- a/live-build/ubuntu-cpc/hooks.d/chroot/999-cpc-fixes.chroot +++ b/live-build/ubuntu-cpc/hooks.d/chroot/999-cpc-fixes.chroot @@ -117,7 +117,7 @@ fi case $arch in # ARM, ppc, riscv64 and s390x images are special - armhf|arm64|powerpc|ppc64el|s390x|riscv64) + powerpc|ppc64el|s390x|riscv64) exit 0 ;; esac @@ -208,17 +208,20 @@ GRUB_CMDLINE_LINUX_DEFAULT="console=tty1 console=ttyS0" # Set the grub console type GRUB_TERMINAL=console EOF -_xchroot "${rootd}" update-grub2 -# since this is a disk image, we technically don't need to install all the -# grub modules, as the image itself is not bootable. This makes for a small -# disk image -_xchroot "${rootd}" update-grub +# Sometimes grub is not installed yet (i.e. uefi arm images). Overall +# it is odd that we run this out of chroot hooks, instead of binary +# hooks. I wonder if we can move all of this to disk-image hooks. +if [ -x "${rootd}/usr/sbin/update-grub" ]; then + _xchroot "${rootd}" update-grub +fi # reconfigure grub so that upgrades to grub-pc do not force a debconf config # changed prompt (LP: #1009294). This re-runs update-grub -_xchroot "${rootd}" env DEBIAN_FRONTEND=noninteractive \ - dpkg-reconfigure grub-pc +if [ -n "`_xchroot "${rootd}" dpkg-query -W grub-pc 2>/dev/null`" ]; then + _xchroot "${rootd}" env DEBIAN_FRONTEND=noninteractive \ + dpkg-reconfigure grub-pc +fi grub2cfg="${rootd}/boot/grub/grub.cfg" [ ! -f "${grub2cfg}" ] || diff --git a/live-build/ubuntu-server/hooks/032-installer-squashfs.binary b/live-build/ubuntu-server/hooks/032-installer-squashfs.binary index b28fb6c7..a86deb35 100755 --- a/live-build/ubuntu-server/hooks/032-installer-squashfs.binary +++ b/live-build/ubuntu-server/hooks/032-installer-squashfs.binary @@ -75,11 +75,6 @@ chroot $INSTALLER_ROOT apt-get clean # "helpful" casper script that mounts any swap partitions it finds. rm -f $INSTALLER_ROOT/usr/share/initramfs-tools/scripts/casper-bottom/*swap -# For bug #1893818 "Several blockprobe errors if trying to install the -# groovy daily live on LPAR", remove a udev rule that removes -# partition nodes for multipathed disks that breaks the installer. -rm -f $INSTALLER_ROOT/lib/udev/rules.d/68-del-part-nodes.rules - # Preseed subiquity into installer layer snap_prepare $INSTALLER_ROOT snap_preseed $INSTALLER_ROOT subiquity/classic diff --git a/magic-proxy b/magic-proxy index e2d0c28d..29d95ab4 100755 --- a/magic-proxy +++ b/magic-proxy @@ -68,6 +68,45 @@ class LPInReleaseCacheError(LPInReleaseBaseError): class LPInReleaseProxyError(LPInReleaseBaseError): pass +IN_LP = "http://ftpmaster.internal/ubuntu" in os.environ.get("LB_PARENT_MIRROR_BOOTSTRAP", "") + +# We cannot proxy & rewrite https requests Thus apt will talk to us +# over http But we must upgrade to https for private-ppas, outside of +# launchpad hence use this helper to re-write urls. +def get_uri(host, path): + if host in ("private-ppa.launchpad.net", "private-ppa.buildd"): + if IN_LP: + return "http://private-ppa.buildd" + path + else: + return "https://private-ppa.launchpad.net" + path + # TODO add split mirror handling for ftpmaster.internal => + # (ports|archive).ubuntu.com + return "http://" + host + path + +def initialize_auth(): + auth_handler = urllib.request.HTTPBasicAuthHandler() + with open('/etc/apt/sources.list') as f: + for line in f.readlines(): + for word in line.split(): + if not word.startswith('http'): + continue + parse=urllib.parse.urlparse(word) + if not parse.username: + continue + if parse.hostname not in ("private-ppa.launchpad.net", "private-ppa.buildd"): + continue + auth_handler.add_password( + "Token Required", "https://private-ppa.launchpad.net" + parse.path, + parse.username, parse.password) + auth_handler.add_password( + "Token Required", "http://private-ppa.buildd" + parse.path, + parse.username, parse.password) + print("add password for", parse.path) + opener = urllib.request.build_opener(auth_handler) + urllib.request.install_opener(opener) + +initialize_auth() + class InRelease: """This class represents an InRelease file.""" @@ -97,7 +136,8 @@ class InRelease: this is set explicitly to correspond to the Last-Modified header spat out by the Web server. """ - self.mirror = mirror + parsed = urllib.parse.urlparse(mirror) + self.mirror = get_uri(parsed.hostname, parsed.path) self.suite = suite self.data = data self.dict = {} @@ -363,7 +403,7 @@ class LPInReleaseCache: suite.""" with self._lock: url_obj = urllib.parse.urlparse(mirror) - address = url_obj.hostname + url_obj.path.rstrip("/") + address = url_obj.scheme + url_obj.hostname + url_obj.path.rstrip("/") inrel_by_hash = self._data\ .get(address, {})\ @@ -403,7 +443,8 @@ class LPInReleaseIndex: which case all look-ups will first go to the cache and only cache misses will result in requests to the Web server. """ - self._mirror = mirror + parsed = urllib.parse.urlparse(mirror) + self._mirror = get_uri(parsed.hostname, parsed.path) self._suite = suite self._cache = cache @@ -528,7 +569,8 @@ class LPInReleaseIndex: return [inrel.hash for inrel in cache_entry] try: - with urllib.request.urlopen(self._base_url) as response: + request=urllib.request.Request(self._base_url) + with urllib.request.urlopen(request) as response: content_encoding = self._guess_content_encoding_for_response( response) @@ -744,6 +786,23 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler): """Process a GET request.""" self.__get_request() + def sanitize_requestline(self): + requestline = [] + for word in self.requestline.split(): + if word.startswith('http'): + parse = urllib.parse.urlparse(word) + parse = urllib.parse.ParseResult( + parse.scheme, + parse.hostname, # not netloc, to sanitize username/password + parse.path, + parse.params, + parse.query, + parse.fragment) + requestline.append(urllib.parse.urlunparse(parse)) + else: + requestline.append(word) + self.requestline = ' '.join(requestline) + def __get_request(self, verb="GET"): """Pass all requests on to the destination server 1:1 except when the target is an InRelease file or a resource listed in an InRelease files. @@ -756,15 +815,18 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler): happening here, the client does not know that what it receives is not exactly what it requested.""" - host, path = self.__get_host_path() + uri = self.headers.get("host") + self.path + parsed = urllib.parse.urlparse(uri) + + self.sanitize_requestline() m = re.match( r"^(?P.*?)/dists/(?P[^/]+)/(?P.*)$", - path + parsed.path ) if m: - mirror = "http://" + host + m.group("base") + mirror = get_uri(parsed.hostname, m.group("base")) base = m.group("base") suite = m.group("suite") target = m.group("target") @@ -775,50 +837,49 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler): self.server.snapshot_stamp) if inrelease is None: - self.__send_error(404, "No InRelease file found for given " - "mirror, suite and timestamp.") - return - - if target == "InRelease": - # If target is InRelease, send back contents directly. - data = inrelease.data.encode("utf-8") - self.log_message( - "Inject InRelease '{}'".format(inrelease.hash)) - - self.send_response(200) - self.send_header("Content-Length", len(data)) - self.end_headers() + "InRelease not found for {}/{}".format(parsed.hostname, parsed.path)) + self.send_error(404, "No InRelease file found for given " + "mirror, suite and timestamp.") + return - if verb == "GET": - self.wfile.write(data) + hash_ = None - return + if target == "InRelease": + hash_ = inrelease.hash else: - # If target hash is listed, then redirect to by-hash URL. hash_ = inrelease.get_hash_for(target) - if hash_: - self.log_message( - "Inject {} for {}".format(hash_, target)) + if hash_: + self.log_message( + "Inject {} for {}".format(hash_, target)) - target_path = target.rsplit("/", 1)[0] + target_path = target.rsplit("/", 1)[0] - path = "{}/dists/{}/{}/by-hash/SHA256/{}"\ - .format(base, suite, target_path, hash_) + uri = "{}/dists/{}/by-hash/SHA256/{}"\ + .format(mirror, suite, hash_) + else: + uri = get_uri(parsed.hostname, parsed.path) + ## use requests such that authentication via password database happens + ## reuse all the headers that we got asked to provide try: - client = http.client.HTTPConnection(host) - client.request(verb, path) - except Exception as e: - self.log_error("Failed to retrieve http://{}{}: {}" - .format(host, path, str(e))) - return + with urllib.request.urlopen( + urllib.request.Request( + uri, + method=verb, + headers=self.headers)) as response: + self.__send_response(response) + except urllib.error.HTTPError as e: + if e.code not in (304,): + self.log_message( + "urlopen() failed for {} with {}".format(uri, e.reason)) + self.__send_response(e) + except urllib.error.URLError as e: + self.log_message( + "urlopen() failed for {} with {}".format(uri, e.reason)) + self.send_error(501, e.reason) - try: - self.__send_response(client.getresponse()) - except Exception as e: - self.log_error("Error delivering response: {}".format(str(e))) def __get_host_path(self): """Figure out the host to contact and the path of the resource that is @@ -831,20 +892,26 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler): def __send_response(self, response): """Pass on upstream response headers and body to the client.""" - self.send_response(response.status) + if hasattr(response, "status"): + status = response.status + elif hassattr(response, "code"): + status = response.code + elif hasattr(response, "getstatus"): + status = response.getstatus() + + if hasattr(response, "headers"): + headers = response.headers + elif hasattr(response, "info"): + headers = response.info() - for name, value in response.getheaders(): - self.send_header(name, value) + self.send_response(status) - self.end_headers() - shutil.copyfileobj(response, self.wfile) + for name, value in headers.items(): + self.send_header(name, value) - def __send_error(self, status, message): - """Return an HTTP error status and a message in the response body.""" - self.send_response(status) - self.send_header("Content-Type", "text/plain; charset=utf-8") self.end_headers() - self.wfile.write(message.encode("utf-8")) + if hasattr(response, "read"): + shutil.copyfileobj(response, self.wfile) class MagicHTTPProxy(socketserver.ThreadingMixIn, http.server.HTTPServer):