Imported 2.719.3

No reason for CPC update specified.
2.719.3
CloudBuilder 3 years ago
parent 4a77957183
commit 0ec9f393ff

10
debian/changelog vendored

@ -1,3 +1,13 @@
livecd-rootfs (2.719.3) hirsute; urgency=medium
[ Thomas Bechtold ]
* magic-proxy: Replace http.client with urllib calls. live-build/auto/build:
change iptables calls to query rules and quickly check that connectivity
works after transparent proxy has been installed. (LP: #1917920)
* magic-proxy: fix TypeError when trying to call get_uri() (LP: #1944906)
-- Brian Murray <brian@ubuntu.com> Thu, 21 Oct 2021 11:17:58 -0700
livecd-rootfs (2.719.2) hirsute; urgency=medium livecd-rootfs (2.719.2) hirsute; urgency=medium
* Generate manifest for HyperV desktop image (LP: #1940136) * Generate manifest for HyperV desktop image (LP: #1940136)

@ -35,6 +35,18 @@ run_iptables () {
kver="${kver#*.}" kver="${kver#*.}"
kver_minor="${kver%%.*}" kver_minor="${kver%%.*}"
# LP: #1917920
# I'm seeing issues after iptables got upgraded from 1.8.5 to
# 1.8.7 Somehow installing our nat rule doesn't get activated, and
# no networking is happening at all.
# But somehow calling both iptables -S makes things start working.
# Maybe no default chains are installed in our network namespace?!
# Or 1.8.7 is somehow broken?
iptables -v -t nat -S
iptables-legacy -v -t nat -S
if [ "$kver_major" -lt 4 ] || \ if [ "$kver_major" -lt 4 ] || \
([ "$kver_major" = 4 ] && [ "$kver_minor" -lt 15 ]); then ([ "$kver_major" = 4 ] && [ "$kver_minor" -lt 15 ]); then
iptables-legacy "$@" iptables-legacy "$@"
@ -52,10 +64,11 @@ if [ -n "$REPO_SNAPSHOT_STAMP" ]; then
apt-get -qyy install iptables apt-get -qyy install iptables
# Redirect all outgoing traffic to port 80 to proxy instead. # Redirect all outgoing traffic to port 80 to proxy instead.
run_iptables -t nat -A OUTPUT -p tcp --dport 80 \ run_iptables -v -t nat -A OUTPUT -p tcp --dport 80 \
-m owner ! --uid-owner daemon -j REDIRECT --to 8080 -m owner ! --uid-owner daemon -j REDIRECT --to 8080
# Run proxy as "daemon" to avoid infinite loop. # Run proxy as "daemon" to avoid infinite loop.
LB_PARENT_MIRROR_BOOTSTRAP=$LB_PARENT_MIRROR_BOOTSTRAP \
/usr/share/livecd-rootfs/magic-proxy \ /usr/share/livecd-rootfs/magic-proxy \
--address="127.0.0.1" \ --address="127.0.0.1" \
--port=8080 \ --port=8080 \
@ -65,6 +78,9 @@ if [ -n "$REPO_SNAPSHOT_STAMP" ]; then
--pid-file=config/magic-proxy.pid \ --pid-file=config/magic-proxy.pid \
--background \ --background \
--setsid --setsid
# Quick check that magic proxy & iptables chains are working
timeout 3m apt-get update
fi fi
# Link output files somewhere launchpad-buildd will be able to find them. # Link output files somewhere launchpad-buildd will be able to find them.

@ -68,6 +68,45 @@ class LPInReleaseCacheError(LPInReleaseBaseError):
class LPInReleaseProxyError(LPInReleaseBaseError): class LPInReleaseProxyError(LPInReleaseBaseError):
pass pass
IN_LP = "http://ftpmaster.internal/ubuntu" in os.environ.get("LB_PARENT_MIRROR_BOOTSTRAP", "")
# We cannot proxy & rewrite https requests Thus apt will talk to us
# over http But we must upgrade to https for private-ppas, outside of
# launchpad hence use this helper to re-write urls.
def get_uri(host, path):
if host in ("private-ppa.launchpad.net", "private-ppa.buildd"):
if IN_LP:
return "http://private-ppa.buildd" + path
else:
return "https://private-ppa.launchpad.net" + path
# TODO add split mirror handling for ftpmaster.internal =>
# (ports|archive).ubuntu.com
return "http://" + host + path
def initialize_auth():
auth_handler = urllib.request.HTTPBasicAuthHandler()
with open('/etc/apt/sources.list') as f:
for line in f.readlines():
for word in line.split():
if not word.startswith('http'):
continue
parse=urllib.parse.urlparse(word)
if not parse.username:
continue
if parse.hostname not in ("private-ppa.launchpad.net", "private-ppa.buildd"):
continue
auth_handler.add_password(
"Token Required", "https://private-ppa.launchpad.net" + parse.path,
parse.username, parse.password)
auth_handler.add_password(
"Token Required", "http://private-ppa.buildd" + parse.path,
parse.username, parse.password)
print("add password for", parse.path)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
initialize_auth()
class InRelease: class InRelease:
"""This class represents an InRelease file.""" """This class represents an InRelease file."""
@ -97,7 +136,8 @@ class InRelease:
this is set explicitly to correspond to the Last-Modified header spat this is set explicitly to correspond to the Last-Modified header spat
out by the Web server. out by the Web server.
""" """
self.mirror = mirror parsed = urllib.parse.urlparse(mirror)
self.mirror = get_uri(parsed.hostname, parsed.path)
self.suite = suite self.suite = suite
self.data = data self.data = data
self.dict = {} self.dict = {}
@ -363,7 +403,7 @@ class LPInReleaseCache:
suite.""" suite."""
with self._lock: with self._lock:
url_obj = urllib.parse.urlparse(mirror) url_obj = urllib.parse.urlparse(mirror)
address = url_obj.hostname + url_obj.path.rstrip("/") address = url_obj.scheme + url_obj.hostname + url_obj.path.rstrip("/")
inrel_by_hash = self._data\ inrel_by_hash = self._data\
.get(address, {})\ .get(address, {})\
@ -403,7 +443,8 @@ class LPInReleaseIndex:
which case all look-ups will first go to the cache and only cache which case all look-ups will first go to the cache and only cache
misses will result in requests to the Web server. misses will result in requests to the Web server.
""" """
self._mirror = mirror parsed = urllib.parse.urlparse(mirror)
self._mirror = get_uri(parsed.hostname, parsed.path)
self._suite = suite self._suite = suite
self._cache = cache self._cache = cache
@ -528,7 +569,8 @@ class LPInReleaseIndex:
return [inrel.hash for inrel in cache_entry] return [inrel.hash for inrel in cache_entry]
try: try:
with urllib.request.urlopen(self._base_url) as response: request=urllib.request.Request(self._base_url)
with urllib.request.urlopen(request) as response:
content_encoding = self._guess_content_encoding_for_response( content_encoding = self._guess_content_encoding_for_response(
response) response)
@ -744,6 +786,23 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""Process a GET request.""" """Process a GET request."""
self.__get_request() self.__get_request()
def sanitize_requestline(self):
requestline = []
for word in self.requestline.split():
if word.startswith('http'):
parse = urllib.parse.urlparse(word)
parse = urllib.parse.ParseResult(
parse.scheme,
parse.hostname, # not netloc, to sanitize username/password
parse.path,
parse.params,
parse.query,
parse.fragment)
requestline.append(urllib.parse.urlunparse(parse))
else:
requestline.append(word)
self.requestline = ' '.join(requestline)
def __get_request(self, verb="GET"): def __get_request(self, verb="GET"):
"""Pass all requests on to the destination server 1:1 except when the """Pass all requests on to the destination server 1:1 except when the
target is an InRelease file or a resource listed in an InRelease files. target is an InRelease file or a resource listed in an InRelease files.
@ -755,16 +814,25 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
It is important to understand that there is no status 3xx HTTP redirect It is important to understand that there is no status 3xx HTTP redirect
happening here, the client does not know that what it receives is not happening here, the client does not know that what it receives is not
exactly what it requested.""" exactly what it requested."""
host = self.headers.get("host")
# the host does not start with http(s):// which result in urlparse
# to not detect the host & path correctly (LP:#1944906)
if not host.startswith("http"):
host = "http://{}".format(host)
uri = host + self.path
host, path = self.__get_host_path() parsed = urllib.parse.urlparse(uri)
self.sanitize_requestline()
m = re.match( m = re.match(
r"^(?P<base>.*?)/dists/(?P<suite>[^/]+)/(?P<target>.*)$", r"^(?P<base>.*?)/dists/(?P<suite>[^/]+)/(?P<target>.*)$",
path parsed.path
) )
if m: if m:
mirror = "http://" + host + m.group("base") mirror = get_uri(parsed.hostname, m.group("base"))
base = m.group("base") base = m.group("base")
suite = m.group("suite") suite = m.group("suite")
target = m.group("target") target = m.group("target")
@ -775,27 +843,17 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
self.server.snapshot_stamp) self.server.snapshot_stamp)
if inrelease is None: if inrelease is None:
self.__send_error(404, "No InRelease file found for given " self.log_message(
"InRelease not found for {}/{}".format(parsed.hostname, parsed.path))
self.send_error(404, "No InRelease file found for given "
"mirror, suite and timestamp.") "mirror, suite and timestamp.")
return return
if target == "InRelease": hash_ = None
# If target is InRelease, send back contents directly.
data = inrelease.data.encode("utf-8")
self.log_message(
"Inject InRelease '{}'".format(inrelease.hash))
self.send_response(200)
self.send_header("Content-Length", len(data))
self.end_headers()
if verb == "GET":
self.wfile.write(data)
return if target == "InRelease":
hash_ = inrelease.hash
else: else:
# If target hash is listed, then redirect to by-hash URL.
hash_ = inrelease.get_hash_for(target) hash_ = inrelease.get_hash_for(target)
if hash_: if hash_:
@ -804,21 +862,30 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
target_path = target.rsplit("/", 1)[0] target_path = target.rsplit("/", 1)[0]
path = "{}/dists/{}/{}/by-hash/SHA256/{}"\ uri = "{}/dists/{}/by-hash/SHA256/{}"\
.format(base, suite, target_path, hash_) .format(mirror, suite, hash_)
else:
uri = get_uri(parsed.hostname, parsed.path)
## use requests such that authentication via password database happens
## reuse all the headers that we got asked to provide
try: try:
client = http.client.HTTPConnection(host) with urllib.request.urlopen(
client.request(verb, path) urllib.request.Request(
except Exception as e: uri,
self.log_error("Failed to retrieve http://{}{}: {}" method=verb,
.format(host, path, str(e))) headers=self.headers)) as response:
return self.__send_response(response)
except urllib.error.HTTPError as e:
if e.code not in (304,):
self.log_message(
"urlopen() failed for {} with {}".format(uri, e.reason))
self.__send_response(e)
except urllib.error.URLError as e:
self.log_message(
"urlopen() failed for {} with {}".format(uri, e.reason))
self.send_error(501, e.reason)
try:
self.__send_response(client.getresponse())
except Exception as e:
self.log_error("Error delivering response: {}".format(str(e)))
def __get_host_path(self): def __get_host_path(self):
"""Figure out the host to contact and the path of the resource that is """Figure out the host to contact and the path of the resource that is
@ -831,21 +898,27 @@ class ProxyingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def __send_response(self, response): def __send_response(self, response):
"""Pass on upstream response headers and body to the client.""" """Pass on upstream response headers and body to the client."""
self.send_response(response.status) if hasattr(response, "status"):
status = response.status
elif hassattr(response, "code"):
status = response.code
elif hasattr(response, "getstatus"):
status = response.getstatus()
if hasattr(response, "headers"):
headers = response.headers
elif hasattr(response, "info"):
headers = response.info()
self.send_response(status)
for name, value in response.getheaders(): for name, value in headers.items():
self.send_header(name, value) self.send_header(name, value)
self.end_headers() self.end_headers()
if hasattr(response, "read"):
shutil.copyfileobj(response, self.wfile) shutil.copyfileobj(response, self.wfile)
def __send_error(self, status, message):
"""Return an HTTP error status and a message in the response body."""
self.send_response(status)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.end_headers()
self.wfile.write(message.encode("utf-8"))
class MagicHTTPProxy(socketserver.ThreadingMixIn, http.server.HTTPServer): class MagicHTTPProxy(socketserver.ThreadingMixIn, http.server.HTTPServer):
"""Tiny HTTP server using ProxyingHTTPRequestHandler instances to provide """Tiny HTTP server using ProxyingHTTPRequestHandler instances to provide

Loading…
Cancel
Save