2023-11-15 21:31:08 +01:00
|
|
|
# Copyright 2011 Corentin Chary <corentin.chary@gmail.com>
|
|
|
|
# Copyright 2020-2023 src_prepare group
|
|
|
|
# Distributed under the terms of the GNU General Public License v2
|
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
import errno
|
|
|
|
import os
|
|
|
|
import re
|
2023-11-14 22:43:04 +01:00
|
|
|
import urllib
|
2019-12-05 17:46:19 +01:00
|
|
|
import urllib.error
|
|
|
|
import urllib.parse
|
|
|
|
import urllib.request
|
2023-11-14 22:43:04 +01:00
|
|
|
import urllib.robotparser
|
2012-09-02 18:39:04 +02:00
|
|
|
from xml.dom.minidom import Document
|
2011-09-06 16:32:29 +02:00
|
|
|
|
2012-05-24 23:44:05 +02:00
|
|
|
import portage
|
|
|
|
from portage import dep
|
|
|
|
|
2011-12-11 14:32:38 +01:00
|
|
|
import euscan
|
2012-07-02 11:20:52 +02:00
|
|
|
from euscan import BLACKLIST_VERSIONS, CONFIG, ROBOTS_TXT_BLACKLIST_DOMAINS
|
|
|
|
from euscan.version import parse_version
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2012-07-07 18:09:14 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def htop_vercmp(a, b):
|
|
|
|
def fixver(v):
|
|
|
|
if v in ["0.11", "0.12", "0.13"]:
|
|
|
|
v = "0.1." + v[3:]
|
|
|
|
return v
|
|
|
|
|
|
|
|
return simple_vercmp(fixver(a), fixver(b))
|
|
|
|
|
|
|
|
|
|
|
|
VERSION_CMP_PACKAGE_QUIRKS = {"sys-process/htop": htop_vercmp}
|
2023-11-14 22:03:53 +01:00
|
|
|
|
2012-07-25 10:44:15 +02:00
|
|
|
_v_end = r"(?:(?:-|_)(?:pre|p|beta|b|alpha|a|rc|r)\d*)"
|
|
|
|
_v = r"((?:\d+)(?:(?:\.\d+)*)(?:[a-zA-Z]*?)(?:" + _v_end + "*))"
|
2011-08-31 15:38:32 +02:00
|
|
|
|
2012-07-24 15:02:36 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def cast_int_components(version):
|
|
|
|
for i, obj in enumerate(version):
|
|
|
|
try:
|
|
|
|
version[i] = int(obj)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
return version
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def simple_vercmp(a, b):
|
|
|
|
if a == b:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
# For sane versions
|
|
|
|
r = portage.versions.vercmp(a, b)
|
|
|
|
|
|
|
|
if r is not None:
|
|
|
|
return r
|
|
|
|
|
|
|
|
# Fallback
|
2012-07-07 18:09:14 +02:00
|
|
|
a = parse_version(a)
|
|
|
|
b = parse_version(b)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
if a < b:
|
|
|
|
return -1
|
|
|
|
else:
|
|
|
|
return 1
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def vercmp(package, a, b):
|
|
|
|
if package in VERSION_CMP_PACKAGE_QUIRKS:
|
|
|
|
return VERSION_CMP_PACKAGE_QUIRKS[package](a, b)
|
|
|
|
return simple_vercmp(a, b)
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def version_is_nightly(a, b):
|
2012-07-02 11:20:52 +02:00
|
|
|
a = parse_version(a)
|
|
|
|
b = parse_version(b)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
2012-08-07 11:07:10 +02:00
|
|
|
# Try to skip nightly builds when not wanted (www-apps/moodle)
|
2011-08-31 15:38:32 +02:00
|
|
|
if len(a) != len(b) and len(b) == 2 and len(b[0]) == len("yyyymmdd"):
|
2011-11-27 16:25:44 +01:00
|
|
|
if b[0][:4] != "0000":
|
|
|
|
return True
|
2011-08-31 15:38:32 +02:00
|
|
|
return False
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def version_blacklisted(cp, version):
|
|
|
|
rule = None
|
|
|
|
cpv = "%s-%s" % (cp, version)
|
|
|
|
|
2012-08-07 11:07:10 +02:00
|
|
|
# Check that the generated cpv can be used by portage
|
2011-08-31 15:38:32 +02:00
|
|
|
if not portage.versions.catpkgsplit(cpv):
|
|
|
|
return False
|
|
|
|
|
|
|
|
for bv in BLACKLIST_VERSIONS:
|
|
|
|
if dep.match_from_list(bv, [cpv]):
|
|
|
|
rule = bv
|
|
|
|
None
|
|
|
|
|
|
|
|
if rule:
|
2019-12-05 19:03:49 +01:00
|
|
|
euscan.output.einfo("%s is blacklisted by rule %s" % (cpv, rule))
|
2011-08-31 15:38:32 +02:00
|
|
|
return rule is not None
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2012-02-20 08:21:07 +01:00
|
|
|
def version_change_end_sep(version):
|
2012-08-07 11:07:10 +02:00
|
|
|
match = re.match(r".*(%s)" % _v_end, version)
|
2012-02-20 08:21:07 +01:00
|
|
|
if not match:
|
|
|
|
return None
|
|
|
|
end = match.group(1)
|
|
|
|
if end[0] == "_":
|
|
|
|
newend = end.replace("_", "-")
|
|
|
|
elif end[0] == "-":
|
|
|
|
newend = end.replace("-", "_")
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
return version.replace(end, newend)
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2012-04-23 17:05:23 +02:00
|
|
|
def version_filtered(cp, base, version, vercmp=vercmp):
|
2011-08-31 15:38:32 +02:00
|
|
|
if vercmp(cp, base, version) >= 0:
|
|
|
|
return True
|
|
|
|
|
|
|
|
if version_blacklisted(cp, version):
|
|
|
|
return True
|
|
|
|
|
|
|
|
if version_is_nightly(base, version):
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def generate_templates_vars(version):
|
|
|
|
ret = []
|
|
|
|
|
|
|
|
part = split_version(version)
|
|
|
|
for i in range(2, len(part)):
|
|
|
|
ver = []
|
|
|
|
var = []
|
|
|
|
for j in range(i):
|
|
|
|
ver.append(str(part[j]))
|
|
|
|
var.append("${%d}" % j)
|
|
|
|
|
|
|
|
ret.append((".".join(ver), ".".join(var)))
|
|
|
|
ret.append((version, "${PV}"))
|
|
|
|
ret.reverse()
|
|
|
|
return ret
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def template_from_url(url, version):
|
|
|
|
prefix, chunks = url.split("://")
|
|
|
|
chunks = chunks.split("/")
|
|
|
|
|
|
|
|
for i in range(len(chunks)):
|
|
|
|
chunk = chunks[i]
|
|
|
|
|
|
|
|
subs = generate_templates_vars(version)
|
|
|
|
for sub in subs:
|
|
|
|
chunk = chunk.replace(sub[0], sub[1])
|
|
|
|
|
|
|
|
chunks[i] = chunk
|
|
|
|
|
|
|
|
return prefix + "://" + "/".join(chunks)
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def url_from_template(url, version):
|
|
|
|
components = split_version(version)
|
|
|
|
|
|
|
|
url = url.replace("${PV}", version)
|
|
|
|
for i in range(len(components)):
|
|
|
|
url = url.replace("${%d}" % i, str(components[i]))
|
|
|
|
|
|
|
|
return url
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
# Stolen from distutils.LooseVersion
|
|
|
|
# Used for brute force to increment the version
|
|
|
|
def split_version(version):
|
|
|
|
component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)
|
2019-12-05 17:46:19 +01:00
|
|
|
components = [x for x in component_re.split(version) if x and x != "."]
|
2011-08-31 15:38:32 +02:00
|
|
|
for i in range(len(components)):
|
|
|
|
try:
|
|
|
|
components[i] = int(components[i])
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
return components
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def join_version(components):
|
|
|
|
version = ""
|
|
|
|
for i in range(len(components)):
|
|
|
|
version += str(components[i])
|
|
|
|
if i >= len(components) - 1:
|
|
|
|
break
|
2023-11-14 22:43:04 +01:00
|
|
|
if not isinstance(components[i], str) and not isinstance(
|
|
|
|
components[i + 1], str
|
|
|
|
):
|
2011-08-31 15:38:32 +02:00
|
|
|
version += "."
|
|
|
|
return version
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def increment_version(components, level):
|
|
|
|
n = len(components)
|
|
|
|
|
|
|
|
if level > n - 1 or level < 0:
|
|
|
|
raise Exception
|
|
|
|
|
|
|
|
for i in range(n, level + 1, -1):
|
2023-11-14 22:43:04 +01:00
|
|
|
if isinstance(components[i - 1], int):
|
2011-08-31 15:38:32 +02:00
|
|
|
components[i - 1] = 0
|
|
|
|
|
2023-11-14 22:43:04 +01:00
|
|
|
if isinstance(components[level], int):
|
2011-08-31 15:38:32 +02:00
|
|
|
components[level] += 1
|
|
|
|
|
|
|
|
return components
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def gen_versions(components, level):
|
|
|
|
n = len(components)
|
|
|
|
depth = level
|
|
|
|
level = min(level, n)
|
|
|
|
|
|
|
|
if not n:
|
|
|
|
return []
|
|
|
|
|
|
|
|
versions = []
|
|
|
|
|
|
|
|
for i in range(n, n - level, -1):
|
|
|
|
increment_version(components, i - 1)
|
|
|
|
for j in range(depth):
|
|
|
|
versions.append(list(components))
|
|
|
|
increment_version(components, i - 1)
|
|
|
|
|
|
|
|
return versions
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-09-06 15:47:54 +02:00
|
|
|
def timeout_for_url(url):
|
|
|
|
if "sourceforge" in url:
|
|
|
|
timeout = 15
|
|
|
|
else:
|
|
|
|
timeout = 5
|
|
|
|
return timeout
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2019-12-05 17:46:19 +01:00
|
|
|
class HeadRequest(urllib.request.Request):
|
2011-09-06 15:47:54 +02:00
|
|
|
def get_method(self):
|
|
|
|
return "HEAD"
|
2011-08-31 15:38:32 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2012-08-07 11:07:10 +02:00
|
|
|
# RobotParser cache
|
2011-09-06 16:32:29 +02:00
|
|
|
rpcache = {}
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
def urlallowed(url):
|
|
|
|
if CONFIG["skip-robots-txt"]:
|
|
|
|
return True
|
|
|
|
|
2019-12-05 17:46:19 +01:00
|
|
|
protocol, domain = urllib.parse.urlparse(url)[:2]
|
2011-09-06 16:32:29 +02:00
|
|
|
|
2011-09-21 10:09:50 +02:00
|
|
|
for bd in ROBOTS_TXT_BLACKLIST_DOMAINS:
|
|
|
|
if re.match(bd, domain):
|
|
|
|
return True
|
|
|
|
|
|
|
|
for d in ["sourceforge", "berlios", "github.com"]:
|
|
|
|
if d in domain:
|
|
|
|
return True
|
|
|
|
|
2011-09-10 08:23:46 +02:00
|
|
|
if protocol == "ftp":
|
2011-09-06 17:34:50 +02:00
|
|
|
return True
|
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
baseurl = "%s://%s" % (protocol, domain)
|
2019-12-05 17:46:19 +01:00
|
|
|
robotsurl = urllib.parse.urljoin(baseurl, "robots.txt")
|
2011-09-06 16:32:29 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
if baseurl in rpcache:
|
2011-09-06 16:32:29 +02:00
|
|
|
rp = rpcache[baseurl]
|
|
|
|
else:
|
2011-09-21 10:09:50 +02:00
|
|
|
from socket import getdefaulttimeout, setdefaulttimeout
|
|
|
|
|
|
|
|
timeout = getdefaulttimeout()
|
|
|
|
setdefaulttimeout(5)
|
|
|
|
|
2019-12-05 17:46:19 +01:00
|
|
|
rp = urllib.robotparser.RobotFileParser()
|
2011-09-06 16:32:29 +02:00
|
|
|
rp.set_url(robotsurl)
|
2011-09-10 08:23:46 +02:00
|
|
|
try:
|
|
|
|
rp.read()
|
|
|
|
rpcache[baseurl] = rp
|
2023-11-14 22:43:04 +01:00
|
|
|
except IOError:
|
2011-09-21 10:09:50 +02:00
|
|
|
rp = None
|
|
|
|
|
|
|
|
setdefaulttimeout(timeout)
|
|
|
|
|
2012-07-20 08:24:24 +02:00
|
|
|
return rp.can_fetch(CONFIG["user-agent"], url) if rp else True
|
2011-09-06 16:32:29 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-09-06 15:47:54 +02:00
|
|
|
def urlopen(url, timeout=None, verb="GET"):
|
2011-09-06 16:32:29 +02:00
|
|
|
if not urlallowed(url):
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.einfo("Url '%s' blocked by robots.txt" % url)
|
2011-09-06 16:32:29 +02:00
|
|
|
return None
|
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
if not timeout:
|
2011-09-06 15:47:54 +02:00
|
|
|
timeout = timeout_for_url(url)
|
|
|
|
|
|
|
|
if verb == "GET":
|
2019-12-05 17:46:19 +01:00
|
|
|
request = urllib.request.Request(url)
|
2011-09-06 15:47:54 +02:00
|
|
|
elif verb == "HEAD":
|
|
|
|
request = HeadRequest(url)
|
|
|
|
else:
|
|
|
|
return None
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
request.add_header("User-Agent", CONFIG["user-agent"])
|
2011-09-21 10:09:50 +02:00
|
|
|
|
2011-10-02 10:04:44 +02:00
|
|
|
handlers = []
|
|
|
|
|
|
|
|
if CONFIG["cache"]:
|
|
|
|
from cache import CacheHandler
|
|
|
|
|
|
|
|
handlers.append(CacheHandler(CONFIG["cache"]))
|
2023-11-14 22:03:53 +01:00
|
|
|
|
2011-09-21 10:09:50 +02:00
|
|
|
if CONFIG["verbose"]:
|
|
|
|
debuglevel = CONFIG["verbose"] - 1
|
2019-12-05 17:46:19 +01:00
|
|
|
handlers.append(urllib.request.HTTPHandler(debuglevel=debuglevel))
|
2011-09-21 10:09:50 +02:00
|
|
|
|
2019-12-05 17:46:19 +01:00
|
|
|
opener = urllib.request.build_opener(*handlers)
|
2011-09-21 10:09:50 +02:00
|
|
|
|
|
|
|
return opener.open(request, None, timeout)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def tryurl(fileurl, template):
|
|
|
|
result = True
|
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
if not urlallowed(fileurl):
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.einfo("Url '%s' blocked by robots.txt" % fileurl)
|
2011-09-06 16:32:29 +02:00
|
|
|
return None
|
|
|
|
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.ebegin("Trying: " + fileurl)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
basename = os.path.basename(fileurl)
|
|
|
|
|
2011-09-06 15:47:54 +02:00
|
|
|
fp = urlopen(fileurl, verb="HEAD")
|
2011-09-06 16:32:29 +02:00
|
|
|
if not fp:
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.eend(errno.EPERM)
|
2011-09-06 16:32:29 +02:00
|
|
|
return None
|
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
headers = fp.info()
|
|
|
|
|
2020-01-07 15:35:40 +01:00
|
|
|
# Some URLs return Content-disposition with different filename
|
|
|
|
# Disable check for now (I have no seen false positives)
|
|
|
|
# if 'Content-disposition' in headers and \
|
|
|
|
# basename not in headers['Content-disposition']:
|
|
|
|
# result = None
|
|
|
|
if "Content-Length" in headers and headers["Content-Length"] == "0":
|
2011-08-31 15:38:32 +02:00
|
|
|
result = None
|
2012-04-28 18:16:05 +02:00
|
|
|
elif "Content-Type" in headers and "text/html" in headers["Content-Type"]:
|
2011-08-31 15:38:32 +02:00
|
|
|
result = None
|
2012-04-28 18:16:05 +02:00
|
|
|
elif (
|
|
|
|
"Content-Type" in headers
|
|
|
|
and "application/x-httpd-php" in headers["Content-Type"]
|
|
|
|
):
|
2011-09-21 10:09:50 +02:00
|
|
|
result = None
|
2011-08-31 15:38:32 +02:00
|
|
|
elif fp.geturl() != fileurl:
|
|
|
|
regex = regex_from_template(template)
|
|
|
|
baseregex = regex_from_template(os.path.basename(template))
|
|
|
|
basename2 = os.path.basename(fp.geturl())
|
|
|
|
|
|
|
|
# Redirect to another (earlier?) version
|
2012-04-28 18:16:05 +02:00
|
|
|
if basename != basename2 and (
|
|
|
|
re.match(regex, fp.geturl()) or re.match(baseregex, basename2)
|
|
|
|
):
|
2011-08-31 15:38:32 +02:00
|
|
|
result = None
|
|
|
|
|
|
|
|
if result:
|
|
|
|
result = (fp.geturl(), fp.info())
|
|
|
|
|
2019-12-05 17:46:19 +01:00
|
|
|
except urllib.error.URLError:
|
2011-08-31 15:38:32 +02:00
|
|
|
result = None
|
|
|
|
except IOError:
|
|
|
|
result = None
|
|
|
|
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.eend(errno.ENOENT if not result else 0)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def regex_from_template(template):
|
2011-12-30 11:34:39 +01:00
|
|
|
# Escape
|
2012-09-15 23:20:30 +02:00
|
|
|
regexp = re.escape(template)
|
2011-12-30 11:34:39 +01:00
|
|
|
|
|
|
|
# Unescape specific stuff
|
2012-09-15 23:20:30 +02:00
|
|
|
regexp = regexp.replace("\$\{", "${")
|
|
|
|
regexp = regexp.replace("\}", "}")
|
|
|
|
regexp = regexp.replace("}\.$", "}.$")
|
2011-12-30 11:34:39 +01:00
|
|
|
|
|
|
|
# Replace ${\d+}
|
2012-09-15 23:20:30 +02:00
|
|
|
# regexp = regexp.replace('${0}', r'([\d]+?)')
|
2020-01-14 16:29:58 +01:00
|
|
|
regexp = re.sub(r"(\$\{\d+\}(\.?))+", r"([\\w\.]+?)", regexp)
|
2011-12-30 11:34:39 +01:00
|
|
|
|
2012-09-15 23:20:30 +02:00
|
|
|
# regexp = re.sub(r'(\$\{\d+\}\.?)+', r'([\w]+?)', regexp)
|
|
|
|
# regexp = re.sub(r'(\$\{\d+\}\.+)+', '(.+?)\.', regexp)
|
|
|
|
# regexp = re.sub(r'(\$\{\d+\})+', '(.+?)', regexp)
|
2011-12-30 11:34:39 +01:00
|
|
|
|
|
|
|
# Full version
|
2012-09-15 23:20:30 +02:00
|
|
|
regexp = regexp.replace("${PV}", _v)
|
2011-12-30 11:34:39 +01:00
|
|
|
|
|
|
|
# End
|
2012-09-15 23:20:30 +02:00
|
|
|
regexp = regexp + r"/?$"
|
|
|
|
|
|
|
|
return regexp
|
2011-08-31 15:38:32 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def basedir_from_template(template):
|
|
|
|
idx = template.find("${")
|
|
|
|
if idx == -1:
|
|
|
|
return template
|
|
|
|
|
|
|
|
idx = template[0:idx].rfind("/")
|
|
|
|
if idx == -1:
|
|
|
|
return ""
|
|
|
|
|
|
|
|
return template[0:idx]
|
|
|
|
|
2012-08-07 09:39:49 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def generate_scan_paths(url):
|
|
|
|
prefix, chunks = url.split("://")
|
|
|
|
chunks = chunks.split("/")
|
|
|
|
|
|
|
|
steps = []
|
|
|
|
|
|
|
|
path = prefix + ":/"
|
|
|
|
for chunk in chunks:
|
|
|
|
if "${" in chunk:
|
2012-09-15 23:20:30 +02:00
|
|
|
steps.append((path, "^(?:|.*/)" + regex_from_template(chunk)))
|
2011-08-31 15:38:32 +02:00
|
|
|
path = ""
|
|
|
|
else:
|
|
|
|
path += "/"
|
|
|
|
path += chunk
|
2011-12-30 11:34:39 +01:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
return steps
|
|
|
|
|
2012-07-24 15:02:36 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def parse_mirror(uri):
|
|
|
|
from random import shuffle
|
|
|
|
|
|
|
|
mirrors = portage.settings.thirdpartymirrors()
|
|
|
|
|
|
|
|
if not uri.startswith("mirror://"):
|
|
|
|
return uri
|
|
|
|
|
|
|
|
eidx = uri.find("/", 9)
|
|
|
|
if eidx == -1:
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.einfo("Invalid mirror definition in SRC_URI:\n")
|
|
|
|
euscan.output.einfo(" %s\n" % (uri))
|
2011-08-31 15:38:32 +02:00
|
|
|
return None
|
|
|
|
|
|
|
|
mirrorname = uri[9:eidx]
|
2012-04-28 18:16:05 +02:00
|
|
|
path = uri[eidx + 1 :]
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
if mirrorname in mirrors:
|
|
|
|
mirrors = mirrors[mirrorname]
|
|
|
|
shuffle(mirrors)
|
|
|
|
uri = mirrors[0].strip("/") + "/" + path
|
|
|
|
else:
|
2012-01-02 21:31:05 +01:00
|
|
|
euscan.output.einfo("No known mirror by the name: %s" % (mirrorname))
|
2011-08-31 15:38:32 +02:00
|
|
|
return None
|
|
|
|
|
|
|
|
return uri
|
2012-09-02 18:39:04 +02:00
|
|
|
|
|
|
|
|
|
|
|
def dict_to_xml(data, indent):
|
|
|
|
doc = Document()
|
|
|
|
root = doc.createElement("euscan")
|
|
|
|
doc.appendChild(root)
|
|
|
|
|
|
|
|
def _set_value(parent, value):
|
|
|
|
if isinstance(value, dict):
|
2019-12-05 17:46:19 +01:00
|
|
|
for k, v in list(value.items()):
|
2012-09-02 18:39:04 +02:00
|
|
|
node = doc.createElement(k)
|
|
|
|
_set_value(node, v)
|
|
|
|
parent.appendChild(node)
|
|
|
|
elif isinstance(value, list):
|
|
|
|
for item in value:
|
|
|
|
node = doc.createElement("value")
|
|
|
|
text = doc.createTextNode(item)
|
|
|
|
node.appendChild(text)
|
|
|
|
parent.appendChild(node)
|
|
|
|
else:
|
2019-12-05 17:46:19 +01:00
|
|
|
text = doc.createTextNode(str(value))
|
2012-09-02 18:39:04 +02:00
|
|
|
parent.appendChild(text)
|
|
|
|
|
2019-12-05 17:46:19 +01:00
|
|
|
for key, value in list(data.items()):
|
2012-09-02 18:39:04 +02:00
|
|
|
node = doc.createElement("package")
|
|
|
|
node.setAttribute("name", key)
|
|
|
|
_set_value(node, value)
|
|
|
|
root.appendChild(node)
|
|
|
|
|
|
|
|
return doc.toprettyxml(indent=" " * indent)
|