mirror of
https://git.launchpad.net/livecd-rootfs
synced 2025-03-13 12:21:15 +00:00
Imported 2.664.33
No reason for CPC update specified.
This commit is contained in:
parent
9d01f6c218
commit
b9cdfdb1e2
38
debian/changelog
vendored
38
debian/changelog
vendored
@ -1,3 +1,41 @@
|
||||
livecd-rootfs (2.664.33) focal; urgency=medium
|
||||
|
||||
* Install cloud-initramfs-growroot to actually enable rootfs resize.
|
||||
* Fix a grub error by making sure the unicode.pf2 font is installed in the
|
||||
right path for preinstalled amd64 desktop images.
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Fri, 29 Oct 2021 15:33:34 +0200
|
||||
|
||||
livecd-rootfs (2.664.32) focal; urgency=medium
|
||||
|
||||
* 099-ubuntu-image-customization.chroot: fix a typo in it.
|
||||
|
||||
-- Brian Murray <brian@ubuntu.com> Thu, 28 Oct 2021 11:12:32 -0700
|
||||
|
||||
livecd-rootfs (2.664.31) focal; urgency=medium
|
||||
|
||||
[ Łukasz 'sil2100' Zemczak ]
|
||||
* Add the 099-ubuntu-image-customization.chroot for
|
||||
desktop-preinstalled images similar to what we have in groovy+ (for the pi
|
||||
desktop), but improved for amd64 platforms. We need it to generate a valid
|
||||
grub.cfg on the rootfs (similar to ubuntu-cpc) and then use that instead
|
||||
of a static configuration locked on the boot partition (LP: #1949102).
|
||||
|
||||
[ Brian Murray ]
|
||||
* Properly check ARCH when setting the intel-iot model.
|
||||
|
||||
-- Łukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Thu, 28 Oct 2021 17:35:12 +0200
|
||||
|
||||
livecd-rootfs (2.664.30) focal; urgency=medium
|
||||
|
||||
[ Thomas Bechtold ]
|
||||
* magic-proxy: Replace http.client with urllib calls. live-build/auto/build:
|
||||
change iptables calls to query rules and quickly check that connectivity
|
||||
works after transparent proxy has been installed. (LP: #1917920)
|
||||
* magic-proxy: fix TypeError when trying to call get_uri() (LP: #1944906)
|
||||
|
||||
-- Brian Murray <brian@ubuntu.com> Thu, 21 Oct 2021 11:55:24 -0700
|
||||
|
||||
livecd-rootfs (2.664.29) focal; urgency=medium
|
||||
|
||||
* Generate manifest for HyperV desktop image (LP: #1940136)
|
||||
|
@ -35,6 +35,18 @@ run_iptables () {
|
||||
kver="${kver#*.}"
|
||||
kver_minor="${kver%%.*}"
|
||||
|
||||
|
||||
# LP: #1917920
|
||||
# I'm seeing issues after iptables got upgraded from 1.8.5 to
|
||||
# 1.8.7 Somehow installing our nat rule doesn't get activated, and
|
||||
# no networking is happening at all.
|
||||
|
||||
# But somehow calling both iptables -S makes things start working.
|
||||
# Maybe no default chains are installed in our network namespace?!
|
||||
# Or 1.8.7 is somehow broken?
|
||||
iptables -v -t nat -S
|
||||
iptables-legacy -v -t nat -S
|
||||
|
||||
if [ "$kver_major" -lt 4 ] || \
|
||||
([ "$kver_major" = 4 ] && [ "$kver_minor" -lt 15 ]); then
|
||||
iptables-legacy "$@"
|
||||
@ -52,10 +64,11 @@ if [ -n "$REPO_SNAPSHOT_STAMP" ]; then
|
||||
apt-get -qyy install iptables
|
||||
|
||||
# Redirect all outgoing traffic to port 80 to proxy instead.
|
||||
run_iptables -t nat -A OUTPUT -p tcp --dport 80 \
|
||||
run_iptables -v -t nat -A OUTPUT -p tcp --dport 80 \
|
||||
-m owner ! --uid-owner daemon -j REDIRECT --to 8080
|
||||
|
||||
# Run proxy as "daemon" to avoid infinite loop.
|
||||
LB_PARENT_MIRROR_BOOTSTRAP=$LB_PARENT_MIRROR_BOOTSTRAP \
|
||||
/usr/share/livecd-rootfs/magic-proxy \
|
||||
--address="127.0.0.1" \
|
||||
--port=8080 \
|
||||
@ -65,6 +78,9 @@ if [ -n "$REPO_SNAPSHOT_STAMP" ]; then
|
||||
--pid-file=config/magic-proxy.pid \
|
||||
--background \
|
||||
--setsid
|
||||
|
||||
# Quick check that magic proxy & iptables chains are working
|
||||
timeout 3m apt-get update
|
||||
fi
|
||||
|
||||
# Link output files somewhere launchpad-buildd will be able to find them.
|
||||
|
@ -601,6 +601,9 @@ case $PROJECT in
|
||||
desktop-preinstalled)
|
||||
add_task install minimal standard ubuntu-desktop
|
||||
if [ "$SUBARCH" = "intel-iot" ]; then
|
||||
# Since for non-pi we don't have any seeds yet but we want to be able to
|
||||
# grow the rootfs, manually install cloud-initramfs-growroot during build
|
||||
add_package install cloud-initramfs-growroot
|
||||
KERNEL_FLAVOURS='image-intel'
|
||||
COMPONENTS='main restricted universe'
|
||||
OPTS="${OPTS:+$OPTS }--initramfs=none"
|
||||
@ -835,7 +838,7 @@ case $PROJECT in
|
||||
arm64)
|
||||
add_package install flash-kernel
|
||||
;;
|
||||
amd64)
|
||||
amd64*)
|
||||
if [ "${SUBARCH:-}" = "intel-iot" ]; then
|
||||
KERNEL_FLAVOURS=image-intel
|
||||
COMPONENTS='main restricted universe'
|
||||
|
122
live-build/ubuntu/hooks/099-ubuntu-image-customization.chroot
Normal file
122
live-build/ubuntu/hooks/099-ubuntu-image-customization.chroot
Normal file
@ -0,0 +1,122 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
. /root/config/chroot
|
||||
|
||||
# Specific ubuntu-image chroot configuration goes here.
|
||||
if [ "$IMAGEFORMAT" == "none" ]; then
|
||||
if [ "$SUBPROJECT" == "desktop-preinstalled" ]; then
|
||||
# Create files/dirs Ubiquity requires
|
||||
mkdir -p /var/log/installer
|
||||
touch /var/log/installer/debug
|
||||
touch /var/log/syslog
|
||||
chown syslog:adm /var/log/syslog
|
||||
|
||||
# Create the oem user account
|
||||
if [ -e "/usr/sbin/oem-config-prepare" ]; then
|
||||
/usr/sbin/useradd -d /home/oem -G adm,sudo -m -N -u 29999 oem
|
||||
|
||||
/usr/sbin/oem-config-prepare --quiet
|
||||
touch "/var/lib/oem-config/run"
|
||||
fi
|
||||
|
||||
# Make the writable partition grow
|
||||
echo "LABEL=writable / ext4 defaults,x-systemd.growfs 0 0" >>/etc/fstab
|
||||
|
||||
# Create a 1GB swapfile
|
||||
dd if=/dev/zero of=/swapfile bs=1M count=1024
|
||||
chmod 0600 /swapfile
|
||||
mkswap /swapfile
|
||||
|
||||
echo "/swapfile none swap sw 0 0" >>/etc/fstab
|
||||
|
||||
if [ $(dpkg --print-architecture) == "amd64" ]; then
|
||||
# We need to get a grub.cfg generated for our image
|
||||
# This is copy-pasted from 999-cpc-fixes.chroot
|
||||
# TODO: Make this better. One idea would be to have this exported
|
||||
# in functions or something, and then reused by both the cpc-fixes
|
||||
# and here. Another possibility is to actually trim down the
|
||||
# pseudo_grub_probe to only work for a regular preinstalled
|
||||
# desktop.
|
||||
# But short term it's safer to use a known code-base.
|
||||
psuedo_grub_probe() {
|
||||
cat <<"PSUEDO_GRUB_PROBE"
|
||||
#!/bin/sh
|
||||
Usage() {
|
||||
cat <<EOF
|
||||
Usage: euca-psuedo-grub-probe
|
||||
this is a wrapper around grub-probe to provide the answers for an ec2 guest
|
||||
EOF
|
||||
}
|
||||
bad_Usage() { Usage 1>&2; fail "$@"; }
|
||||
|
||||
short_opts=""
|
||||
long_opts="device-map:,target:,device"
|
||||
getopt_out=$(getopt --name "${0##*/}" \
|
||||
--options "${short_opts}" --long "${long_opts}" -- "$@") &&
|
||||
eval set -- "${getopt_out}" ||
|
||||
bad_Usage
|
||||
|
||||
device_map=""
|
||||
target=""
|
||||
device=0
|
||||
arg=""
|
||||
|
||||
while [ $# -ne 0 ]; do
|
||||
cur=${1}; next=${2};
|
||||
case "$cur" in
|
||||
--device-map) device_map=${next}; shift;;
|
||||
--device) device=1;;
|
||||
--target) target=${next}; shift;;
|
||||
--) shift; break;;
|
||||
esac
|
||||
shift;
|
||||
done
|
||||
arg=${1}
|
||||
|
||||
case "${target}:${device}:${arg}" in
|
||||
device:*:/*) echo "/dev/sda1"; exit 0;;
|
||||
fs:*:*) echo "ext2"; exit 0;;
|
||||
partmap:*:*)
|
||||
# older versions of grub (lucid) want 'part_msdos' written
|
||||
# rather than 'msdos'
|
||||
legacy_pre=""
|
||||
grubver=$(dpkg-query --show --showformat '${Version}\n' grub-pc 2>/dev/null) &&
|
||||
dpkg --compare-versions "${grubver}" lt 1.98+20100804-5ubuntu3 &&
|
||||
legacy_pre="part_"
|
||||
echo "${legacy_pre}msdos";
|
||||
exit 0;;
|
||||
abstraction:*:*) echo ""; exit 0;;
|
||||
drive:*:/dev/sda) echo "(hd0)";;
|
||||
drive:*:/dev/sda*) echo "(hd0,1)";;
|
||||
fs_uuid:*:*) exit 1;;
|
||||
esac
|
||||
PSUEDO_GRUB_PROBE
|
||||
}
|
||||
|
||||
gprobe="/usr/sbin/grub-probe"
|
||||
moved=0
|
||||
if [ -f "${gprobe}" ]; then
|
||||
mv "${gprobe}" "${gprobe}.dist"
|
||||
moved=1
|
||||
fi
|
||||
psuedo_grub_probe > "${gprobe}"
|
||||
chmod 755 "${gprobe}"
|
||||
|
||||
# Generate grub.cfg
|
||||
/usr/sbin/update-grub2
|
||||
|
||||
# Somehow grub doesn't copy unicode.pf2 to the right fonts
|
||||
# directory.
|
||||
if [ ! -e "/boot/grub/fonts/unicode.pf2" ]; then
|
||||
mkdir -p /boot/grub/fonts
|
||||
ln /boot/grub/unicode.pf2 /boot/grub/fonts/unicode.pf2
|
||||
fi
|
||||
|
||||
grub2cfg="/boot/grub/grub.cfg"
|
||||
[ ! -f "${grub2cfg}" ] ||
|
||||
sed -i -e "s,root=/dev/[hs]da1,root=LABEL=writable," "${grub2cfg}"
|
||||
|
||||
[ ${moved} -eq 0 ] || mv "${gprobe}.dist" "${gprobe}"
|
||||
fi
|
||||
fi
|
||||
fi
|
173
magic-proxy
173
magic-proxy
@ -68,6 +68,45 @@ class LPInReleaseCacheError(LPInReleaseBaseError):
|
||||
class LPInReleaseProxyError(LPInReleaseBaseError):
|
||||
pass
|
||||
|
||||
IN_LP = "http://ftpmaster.internal/ubuntu" in os.environ.get("LB_PARENT_MIRROR_BOOTSTRAP", "")
|
||||
|
||||
# We cannot proxy & rewrite https requests Thus apt will talk to us
|
||||
# over http But we must upgrade to https for private-ppas, outside of
|
||||
# launchpad hence use this helper to re-write urls.
|
||||
def get_uri(host, path):
|
||||
if host in ("private-ppa.launchpad.net", "private-ppa.buildd"):
|
||||
if IN_LP:
|
||||
return "http://private-ppa.buildd" + path
|
||||
else:
|
||||
return "https://private-ppa.launchpad.net" + path
|
||||
# TODO add split mirror handling for ftpmaster.internal =>
|
||||
# (ports|archive).ubuntu.com
|
||||
return "http://" + host + path
|
||||
|
||||
def initialize_auth():
|
||||
auth_handler = urllib.request.HTTPBasicAuthHandler()
|
||||
with open('/etc/apt/sources.list') as f:
|
||||
for line in f.readlines():
|
||||
for word in line.split():
|
||||
if not word.startswith('http'):
|
||||
continue
|
||||
parse=urllib.parse.urlparse(word)
|
||||
if not parse.username:
|
||||
continue
|
||||
if parse.hostname not in ("private-ppa.launchpad.net", "private-ppa.buildd"):
|
||||
continue
|
||||
auth_handler.add_password(
|
||||
"Token Required", "https://private-ppa.launchpad.net" + parse.path,
|
||||
parse.username, parse.password)
|
||||
auth_handler.add_password(
|
||||
"Token Required", "http://private-ppa.buildd" + parse.path,
|
||||
parse.username, parse.password)
|
||||
print("add password for", parse.path)
|
||||
opener = urllib.request.build_opener(auth_handler)
|
||||
urllib.request.install_opener(opener)
|
||||
|
||||
initialize_auth()
|
||||
|
||||
class InRelease:
|
||||
"""This class represents an InRelease file."""
|
||||
|
||||
@ -97,7 +136,8 @@ class InRelease:
|
||||
this is set explicitly to correspond to the Last-Modified header spat
|
||||
out by the Web server.
|
||||
"""
|
||||
self.mirror = mirror
|
||||
parsed = urllib.parse.urlparse(mirror)
|
||||
self.mirror = get_uri(parsed.hostname, parsed.path)
|
||||
self.suite = suite
|
||||
self.data = data
|
||||
self.dict = {}
|
||||
@ -363,7 +403,7 @@ class LPInReleaseCache:
|
||||
suite."""
|
||||
with self._lock:
|
||||
url_obj = urllib.parse.urlparse(mirror)
|
||||
address = url_obj.hostname + url_obj.path.rstrip("/")
|
||||
address = url_obj.scheme + url_obj.hostname + url_obj.path.rstrip("/")
|
||||
|
||||
inrel_by_hash = self._data\
|
||||
.get(address, {})\
|
||||
@ -403,7 +443,8 @@ class LPInReleaseIndex:
|
||||
which case all look-ups will first go to the cache and only cache
|
||||
misses will result in requests to the Web server.
|
||||
"""
|
||||
self._mirror = mirror
|
||||
parsed = urllib.parse.urlparse(mirror)
|
||||
self._mirror = get_uri(parsed.hostname, parsed.path)
|
||||
self._suite = suite
|
||||
self._cache = cache
|
||||
|
||||
@ -528,7 +569,8 @@ class LPInReleaseIndex:
|
||||
return [inrel.hash for inrel in cache_entry]
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(self._base_url) as response:
|
||||
request=urllib.request.Request(self._base_url)
|
||||
with urllib.request.urlopen(request) as response:
|
||||
content_encoding = self._guess_content_encoding_for_response(
|
||||
response)
|
||||
|
||||
@ -744,6 +786,23 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||
"""Process a GET request."""
|
||||
self.__get_request()
|
||||
|
||||
def sanitize_requestline(self):
|
||||
requestline = []
|
||||
for word in self.requestline.split():
|
||||
if word.startswith('http'):
|
||||
parse = urllib.parse.urlparse(word)
|
||||
parse = urllib.parse.ParseResult(
|
||||
parse.scheme,
|
||||
parse.hostname, # not netloc, to sanitize username/password
|
||||
parse.path,
|
||||
parse.params,
|
||||
parse.query,
|
||||
parse.fragment)
|
||||
requestline.append(urllib.parse.urlunparse(parse))
|
||||
else:
|
||||
requestline.append(word)
|
||||
self.requestline = ' '.join(requestline)
|
||||
|
||||
def __get_request(self, verb="GET"):
|
||||
"""Pass all requests on to the destination server 1:1 except when the
|
||||
target is an InRelease file or a resource listed in an InRelease files.
|
||||
@ -755,16 +814,25 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||
It is important to understand that there is no status 3xx HTTP redirect
|
||||
happening here, the client does not know that what it receives is not
|
||||
exactly what it requested."""
|
||||
host = self.headers.get("host")
|
||||
|
||||
host, path = self.__get_host_path()
|
||||
# the host does not start with http(s):// which result in urlparse
|
||||
# to not detect the host & path correctly (LP:#1944906)
|
||||
if not host.startswith("http"):
|
||||
host = "http://{}".format(host)
|
||||
uri = host + self.path
|
||||
|
||||
parsed = urllib.parse.urlparse(uri)
|
||||
|
||||
self.sanitize_requestline()
|
||||
|
||||
m = re.match(
|
||||
r"^(?P<base>.*?)/dists/(?P<suite>[^/]+)/(?P<target>.*)$",
|
||||
path
|
||||
parsed.path
|
||||
)
|
||||
|
||||
if m:
|
||||
mirror = "http://" + host + m.group("base")
|
||||
mirror = get_uri(parsed.hostname, m.group("base"))
|
||||
base = m.group("base")
|
||||
suite = m.group("suite")
|
||||
target = m.group("target")
|
||||
@ -775,50 +843,49 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||
self.server.snapshot_stamp)
|
||||
|
||||
if inrelease is None:
|
||||
self.__send_error(404, "No InRelease file found for given "
|
||||
"mirror, suite and timestamp.")
|
||||
self.log_message(
|
||||
"InRelease not found for {}/{}".format(parsed.hostname, parsed.path))
|
||||
self.send_error(404, "No InRelease file found for given "
|
||||
"mirror, suite and timestamp.")
|
||||
return
|
||||
|
||||
hash_ = None
|
||||
|
||||
if target == "InRelease":
|
||||
# If target is InRelease, send back contents directly.
|
||||
data = inrelease.data.encode("utf-8")
|
||||
|
||||
self.log_message(
|
||||
"Inject InRelease '{}'".format(inrelease.hash))
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Length", len(data))
|
||||
self.end_headers()
|
||||
|
||||
if verb == "GET":
|
||||
self.wfile.write(data)
|
||||
|
||||
return
|
||||
hash_ = inrelease.hash
|
||||
else:
|
||||
# If target hash is listed, then redirect to by-hash URL.
|
||||
hash_ = inrelease.get_hash_for(target)
|
||||
|
||||
if hash_:
|
||||
self.log_message(
|
||||
"Inject {} for {}".format(hash_, target))
|
||||
if hash_:
|
||||
self.log_message(
|
||||
"Inject {} for {}".format(hash_, target))
|
||||
|
||||
target_path = target.rsplit("/", 1)[0]
|
||||
target_path = target.rsplit("/", 1)[0]
|
||||
|
||||
path = "{}/dists/{}/{}/by-hash/SHA256/{}"\
|
||||
.format(base, suite, target_path, hash_)
|
||||
uri = "{}/dists/{}/by-hash/SHA256/{}"\
|
||||
.format(mirror, suite, hash_)
|
||||
else:
|
||||
uri = get_uri(parsed.hostname, parsed.path)
|
||||
|
||||
## use requests such that authentication via password database happens
|
||||
## reuse all the headers that we got asked to provide
|
||||
try:
|
||||
client = http.client.HTTPConnection(host)
|
||||
client.request(verb, path)
|
||||
except Exception as e:
|
||||
self.log_error("Failed to retrieve http://{}{}: {}"
|
||||
.format(host, path, str(e)))
|
||||
return
|
||||
with urllib.request.urlopen(
|
||||
urllib.request.Request(
|
||||
uri,
|
||||
method=verb,
|
||||
headers=self.headers)) as response:
|
||||
self.__send_response(response)
|
||||
except urllib.error.HTTPError as e:
|
||||
if e.code not in (304,):
|
||||
self.log_message(
|
||||
"urlopen() failed for {} with {}".format(uri, e.reason))
|
||||
self.__send_response(e)
|
||||
except urllib.error.URLError as e:
|
||||
self.log_message(
|
||||
"urlopen() failed for {} with {}".format(uri, e.reason))
|
||||
self.send_error(501, e.reason)
|
||||
|
||||
try:
|
||||
self.__send_response(client.getresponse())
|
||||
except Exception as e:
|
||||
self.log_error("Error delivering response: {}".format(str(e)))
|
||||
|
||||
def __get_host_path(self):
|
||||
"""Figure out the host to contact and the path of the resource that is
|
||||
@ -831,20 +898,26 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||
|
||||
def __send_response(self, response):
|
||||
"""Pass on upstream response headers and body to the client."""
|
||||
self.send_response(response.status)
|
||||
if hasattr(response, "status"):
|
||||
status = response.status
|
||||
elif hassattr(response, "code"):
|
||||
status = response.code
|
||||
elif hasattr(response, "getstatus"):
|
||||
status = response.getstatus()
|
||||
|
||||
for name, value in response.getheaders():
|
||||
if hasattr(response, "headers"):
|
||||
headers = response.headers
|
||||
elif hasattr(response, "info"):
|
||||
headers = response.info()
|
||||
|
||||
self.send_response(status)
|
||||
|
||||
for name, value in headers.items():
|
||||
self.send_header(name, value)
|
||||
|
||||
self.end_headers()
|
||||
shutil.copyfileobj(response, self.wfile)
|
||||
|
||||
def __send_error(self, status, message):
|
||||
"""Return an HTTP error status and a message in the response body."""
|
||||
self.send_response(status)
|
||||
self.send_header("Content-Type", "text/plain; charset=utf-8")
|
||||
self.end_headers()
|
||||
self.wfile.write(message.encode("utf-8"))
|
||||
if hasattr(response, "read"):
|
||||
shutil.copyfileobj(response, self.wfile)
|
||||
|
||||
|
||||
class MagicHTTPProxy(socketserver.ThreadingMixIn, http.server.HTTPServer):
|
||||
|
Loading…
x
Reference in New Issue
Block a user