2011-08-31 15:38:32 +02:00
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import errno
|
2011-09-06 16:32:29 +02:00
|
|
|
import urllib2
|
|
|
|
|
2012-05-24 23:44:05 +02:00
|
|
|
import portage
|
|
|
|
from portage import dep
|
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
try:
|
|
|
|
from urllib import robotparser
|
|
|
|
from urllib import urlparse
|
|
|
|
except ImportError:
|
|
|
|
import robotparser
|
|
|
|
import urlparse
|
|
|
|
|
2011-12-11 14:32:38 +01:00
|
|
|
import euscan
|
2012-07-02 11:20:52 +02:00
|
|
|
from euscan import CONFIG, BLACKLIST_VERSIONS, ROBOTS_TXT_BLACKLIST_DOMAINS
|
|
|
|
from euscan.version import parse_version
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2012-07-07 18:09:14 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def htop_vercmp(a, b):
|
|
|
|
def fixver(v):
|
|
|
|
if v in ['0.11', '0.12', '0.13']:
|
|
|
|
v = '0.1.' + v[3:]
|
|
|
|
return v
|
|
|
|
|
|
|
|
return simple_vercmp(fixver(a), fixver(b))
|
|
|
|
|
|
|
|
VERSION_CMP_PACKAGE_QUIRKS = {
|
2012-04-28 18:16:05 +02:00
|
|
|
'sys-process/htop': htop_vercmp
|
2011-08-31 15:38:32 +02:00
|
|
|
}
|
|
|
|
|
2012-02-20 08:21:07 +01:00
|
|
|
_v_end = '((-|_)(pre|p|beta|b|alpha|a|rc|r)\d*)'
|
|
|
|
_v = r'((\d+)((\.\d+)*)([a-zA-Z]*?)(' + _v_end + '*))'
|
2011-08-31 15:38:32 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2012-07-17 12:27:22 +02:00
|
|
|
def is_version_stable(version):
|
2012-07-17 13:18:44 +02:00
|
|
|
return is_version_type_stable(get_version_type(version))
|
|
|
|
|
|
|
|
|
|
|
|
def is_version_type_stable(version_type):
|
|
|
|
return version_type not in ("alpha", "beta", "pre", "rc")
|
2012-07-17 12:27:22 +02:00
|
|
|
|
|
|
|
|
2012-05-23 23:08:07 +02:00
|
|
|
def get_version_type(version):
|
|
|
|
types = []
|
|
|
|
gentoo_types = ("alpha", "beta", "pre", "rc", "p")
|
|
|
|
|
|
|
|
for token in re.findall("[\._-]([a-zA-Z]+)", version):
|
|
|
|
if token in gentoo_types:
|
|
|
|
types.append(token)
|
|
|
|
if types:
|
2012-06-26 17:16:02 +02:00
|
|
|
return types[0] # TODO: consider returning all types
|
2012-05-23 23:08:07 +02:00
|
|
|
return "release"
|
|
|
|
|
|
|
|
|
2012-04-19 15:36:44 +02:00
|
|
|
# Stolen from g-pypi
|
|
|
|
def gentoo_mangle_version(up_pv):
|
|
|
|
"""Convert PV to MY_PV if needed
|
|
|
|
|
|
|
|
:param up_pv: Upstream package version
|
|
|
|
:type up_pv: string
|
|
|
|
:returns: pv
|
|
|
|
:rtype: string
|
|
|
|
|
|
|
|
Can't determine PV from upstream's version.
|
|
|
|
Do our best with some well-known versioning schemes:
|
|
|
|
|
|
|
|
* 1.0a1 (1.0_alpha1)
|
|
|
|
* 1.0-a1 (1.0_alpha1)
|
|
|
|
* 1.0b1 (1.0_beta1)
|
|
|
|
* 1.0-b1 (1.0_beta1)
|
|
|
|
* 1.0-r1234 (1.0_pre1234)
|
|
|
|
* 1.0dev-r1234 (1.0_pre1234)
|
|
|
|
* 1.0.dev-r1234 (1.0_pre1234)
|
|
|
|
* 1.0dev-20091118 (1.0_pre20091118)
|
|
|
|
|
|
|
|
Regex match.groups():
|
|
|
|
* pkgfoo-1.0.dev-r1234
|
|
|
|
* group 1 pv major (1.0)
|
|
|
|
* group 2 replace this with portage suffix (.dev-r)
|
|
|
|
* group 3 suffix version (1234)
|
|
|
|
|
|
|
|
The order of the regexes is significant. For instance if you have
|
|
|
|
.dev-r123, dev-r123 and -r123 you should order your regex's in
|
|
|
|
that order.
|
|
|
|
|
|
|
|
The chronological portage release versions are:
|
|
|
|
|
|
|
|
* _alpha
|
|
|
|
* _beta
|
|
|
|
* _pre
|
|
|
|
* _rc
|
|
|
|
* release
|
|
|
|
* _p
|
|
|
|
|
|
|
|
**Example:**
|
|
|
|
|
|
|
|
>>> gentoo_mangle_version('1.0b2')
|
|
|
|
'1.0_beta2'
|
|
|
|
|
|
|
|
.. note::
|
|
|
|
The number of regex's could have been reduced, but we use four
|
|
|
|
number of match.groups every time to simplify the code
|
|
|
|
|
|
|
|
"""
|
|
|
|
bad_suffixes = re.compile(
|
|
|
|
r'((?:[._-]*)(?:dev|devel|final|stable|snapshot)$)', re.I)
|
|
|
|
revision_suffixes = re.compile(
|
|
|
|
r'(.*?)([\._-]*(?:r|patch|p)[\._-]*)([0-9]*)$', re.I)
|
|
|
|
suf_matches = {
|
|
|
|
'_pre': [
|
|
|
|
r'(.*?)([\._-]*dev[\._-]*r?)([0-9]+)$',
|
|
|
|
r'(.*?)([\._-]*(?:pre|preview)[\._-]*)([0-9]*)$',
|
|
|
|
],
|
|
|
|
'_alpha': [
|
|
|
|
r'(.*?)([\._-]*(?:alpha|test)[\._-]*)([0-9]*)$',
|
|
|
|
r'(.*?)([\._-]*a[\._-]*)([0-9]*)$',
|
|
|
|
r'(.*[^a-z])(a)([0-9]*)$',
|
|
|
|
],
|
|
|
|
'_beta': [
|
|
|
|
r'(.*?)([\._-]*beta[\._-]*)([0-9]*)$',
|
|
|
|
r'(.*?)([\._-]*b)([0-9]*)$',
|
|
|
|
r'(.*[^a-z])(b)([0-9]*)$',
|
|
|
|
],
|
|
|
|
'_rc': [
|
|
|
|
r'(.*?)([\._-]*rc[\._-]*)([0-9]*)$',
|
|
|
|
r'(.*?)([\._-]*c[\._-]*)([0-9]*)$',
|
|
|
|
r'(.*[^a-z])(c[\._-]*)([0-9]+)$',
|
|
|
|
],
|
|
|
|
}
|
|
|
|
rs_match = None
|
|
|
|
pv = up_pv
|
|
|
|
additional_version = ""
|
|
|
|
|
|
|
|
rev_match = revision_suffixes.search(up_pv)
|
|
|
|
if rev_match:
|
|
|
|
pv = up_pv = rev_match.group(1)
|
|
|
|
replace_me = rev_match.group(2)
|
|
|
|
rev = rev_match.group(3)
|
2012-05-21 12:03:14 +02:00
|
|
|
additional_version = '_p' + rev
|
2012-04-19 15:36:44 +02:00
|
|
|
|
|
|
|
for this_suf in suf_matches.keys():
|
|
|
|
if rs_match:
|
|
|
|
break
|
|
|
|
for regex in suf_matches[this_suf]:
|
|
|
|
rsuffix_regex = re.compile(regex, re.I)
|
|
|
|
rs_match = rsuffix_regex.match(up_pv)
|
|
|
|
if rs_match:
|
|
|
|
portage_suffix = this_suf
|
|
|
|
break
|
|
|
|
|
|
|
|
if rs_match:
|
|
|
|
# e.g. 1.0.dev-r1234
|
|
|
|
major_ver = rs_match.group(1) # 1.0
|
|
|
|
replace_me = rs_match.group(2) # .dev-r
|
|
|
|
rev = rs_match.group(3) # 1234
|
|
|
|
pv = major_ver + portage_suffix + rev
|
|
|
|
else:
|
|
|
|
# Single suffixes with no numeric component are simply removed.
|
|
|
|
match = bad_suffixes.search(up_pv)
|
|
|
|
if match:
|
|
|
|
suffix = match.groups()[0]
|
|
|
|
pv = up_pv[: - (len(suffix))]
|
|
|
|
|
|
|
|
pv = pv + additional_version
|
|
|
|
|
|
|
|
return pv
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def cast_int_components(version):
|
|
|
|
for i, obj in enumerate(version):
|
|
|
|
try:
|
|
|
|
version[i] = int(obj)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
return version
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def simple_vercmp(a, b):
|
|
|
|
if a == b:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
# For sane versions
|
|
|
|
r = portage.versions.vercmp(a, b)
|
|
|
|
|
|
|
|
if r is not None:
|
|
|
|
return r
|
|
|
|
|
|
|
|
# Fallback
|
2012-07-07 18:09:14 +02:00
|
|
|
a = parse_version(a)
|
|
|
|
b = parse_version(b)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
if a < b:
|
|
|
|
return -1
|
|
|
|
else:
|
|
|
|
return 1
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def vercmp(package, a, b):
|
|
|
|
if package in VERSION_CMP_PACKAGE_QUIRKS:
|
|
|
|
return VERSION_CMP_PACKAGE_QUIRKS[package](a, b)
|
|
|
|
return simple_vercmp(a, b)
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def version_is_nightly(a, b):
|
2012-07-02 11:20:52 +02:00
|
|
|
a = parse_version(a)
|
|
|
|
b = parse_version(b)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
''' Try to skip nightly builds when not wanted (www-apps/moodle) '''
|
|
|
|
if len(a) != len(b) and len(b) == 2 and len(b[0]) == len('yyyymmdd'):
|
2011-11-27 16:25:44 +01:00
|
|
|
if b[0][:4] != '0000':
|
|
|
|
return True
|
2011-08-31 15:38:32 +02:00
|
|
|
return False
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def version_blacklisted(cp, version):
|
|
|
|
rule = None
|
|
|
|
cpv = '%s-%s' % (cp, version)
|
|
|
|
|
|
|
|
''' Check that the generated cpv can be used by portage '''
|
|
|
|
if not portage.versions.catpkgsplit(cpv):
|
|
|
|
return False
|
|
|
|
|
|
|
|
for bv in BLACKLIST_VERSIONS:
|
|
|
|
if dep.match_from_list(bv, [cpv]):
|
|
|
|
rule = bv
|
|
|
|
None
|
|
|
|
|
|
|
|
if rule:
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.einfo("%s is blacklisted by rule %s" % (cpv, bv))
|
2011-08-31 15:38:32 +02:00
|
|
|
return rule is not None
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2012-02-20 08:21:07 +01:00
|
|
|
def version_change_end_sep(version):
|
|
|
|
match = re.match('.*' + _v_end, version)
|
|
|
|
if not match:
|
|
|
|
return None
|
|
|
|
end = match.group(1)
|
|
|
|
if end[0] == '_':
|
|
|
|
newend = end.replace('_', '-')
|
|
|
|
elif end[0] == '-':
|
|
|
|
newend = end.replace('-', '_')
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
return version.replace(end, newend)
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2012-04-23 17:05:23 +02:00
|
|
|
def version_filtered(cp, base, version, vercmp=vercmp):
|
2011-08-31 15:38:32 +02:00
|
|
|
if vercmp(cp, base, version) >= 0:
|
|
|
|
return True
|
|
|
|
|
|
|
|
if version_blacklisted(cp, version):
|
|
|
|
return True
|
|
|
|
|
|
|
|
if version_is_nightly(base, version):
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def generate_templates_vars(version):
|
|
|
|
ret = []
|
|
|
|
|
|
|
|
part = split_version(version)
|
|
|
|
for i in range(2, len(part)):
|
|
|
|
ver = []
|
|
|
|
var = []
|
|
|
|
for j in range(i):
|
|
|
|
ver.append(str(part[j]))
|
|
|
|
var.append('${%d}' % j)
|
|
|
|
|
|
|
|
ret.append((".".join(ver), ".".join(var)))
|
|
|
|
ret.append((version, '${PV}'))
|
|
|
|
ret.reverse()
|
|
|
|
return ret
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def template_from_url(url, version):
|
|
|
|
prefix, chunks = url.split('://')
|
|
|
|
chunks = chunks.split('/')
|
|
|
|
|
|
|
|
for i in range(len(chunks)):
|
|
|
|
chunk = chunks[i]
|
|
|
|
|
|
|
|
subs = generate_templates_vars(version)
|
|
|
|
for sub in subs:
|
|
|
|
chunk = chunk.replace(sub[0], sub[1])
|
|
|
|
|
|
|
|
chunks[i] = chunk
|
|
|
|
|
|
|
|
return prefix + "://" + "/".join(chunks)
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def url_from_template(url, version):
|
|
|
|
components = split_version(version)
|
|
|
|
|
|
|
|
url = url.replace('${PV}', version)
|
|
|
|
for i in range(len(components)):
|
|
|
|
url = url.replace('${%d}' % i, str(components[i]))
|
|
|
|
|
|
|
|
return url
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
# Stolen from distutils.LooseVersion
|
|
|
|
# Used for brute force to increment the version
|
|
|
|
def split_version(version):
|
|
|
|
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
|
|
|
|
components = filter(lambda x: x and x != '.', component_re.split(version))
|
|
|
|
for i in range(len(components)):
|
|
|
|
try:
|
|
|
|
components[i] = int(components[i])
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
return components
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def join_version(components):
|
|
|
|
version = ""
|
|
|
|
for i in range(len(components)):
|
|
|
|
version += str(components[i])
|
|
|
|
if i >= len(components) - 1:
|
|
|
|
break
|
|
|
|
if type(components[i]) != str and type(components[i + 1]) != str:
|
|
|
|
version += "."
|
|
|
|
return version
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def increment_version(components, level):
|
|
|
|
n = len(components)
|
|
|
|
|
|
|
|
if level > n - 1 or level < 0:
|
|
|
|
raise Exception
|
|
|
|
|
|
|
|
for i in range(n, level + 1, -1):
|
|
|
|
if type(components[i - 1]) == int:
|
|
|
|
components[i - 1] = 0
|
|
|
|
|
|
|
|
if type(components[level]) == int:
|
|
|
|
components[level] += 1
|
|
|
|
|
|
|
|
return components
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def gen_versions(components, level):
|
|
|
|
n = len(components)
|
|
|
|
depth = level
|
|
|
|
level = min(level, n)
|
|
|
|
|
|
|
|
if not n:
|
|
|
|
return []
|
|
|
|
|
|
|
|
versions = []
|
|
|
|
|
|
|
|
for i in range(n, n - level, -1):
|
|
|
|
increment_version(components, i - 1)
|
|
|
|
for j in range(depth):
|
|
|
|
versions.append(list(components))
|
|
|
|
increment_version(components, i - 1)
|
|
|
|
|
|
|
|
return versions
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-09-06 15:47:54 +02:00
|
|
|
def timeout_for_url(url):
|
|
|
|
if 'sourceforge' in url:
|
|
|
|
timeout = 15
|
|
|
|
else:
|
|
|
|
timeout = 5
|
|
|
|
return timeout
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-09-06 15:47:54 +02:00
|
|
|
class HeadRequest(urllib2.Request):
|
|
|
|
def get_method(self):
|
|
|
|
return "HEAD"
|
2011-08-31 15:38:32 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
""" RobotParser cache """
|
|
|
|
rpcache = {}
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
def urlallowed(url):
|
|
|
|
if CONFIG['skip-robots-txt']:
|
|
|
|
return True
|
|
|
|
|
|
|
|
protocol, domain = urlparse.urlparse(url)[:2]
|
|
|
|
|
2011-09-21 10:09:50 +02:00
|
|
|
for bd in ROBOTS_TXT_BLACKLIST_DOMAINS:
|
|
|
|
if re.match(bd, domain):
|
|
|
|
return True
|
|
|
|
|
|
|
|
for d in ['sourceforge', 'berlios', 'github.com']:
|
|
|
|
if d in domain:
|
|
|
|
return True
|
|
|
|
|
2011-09-10 08:23:46 +02:00
|
|
|
if protocol == 'ftp':
|
2011-09-06 17:34:50 +02:00
|
|
|
return True
|
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
baseurl = '%s://%s' % (protocol, domain)
|
|
|
|
robotsurl = urlparse.urljoin(baseurl, 'robots.txt')
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
if baseurl in rpcache:
|
2011-09-06 16:32:29 +02:00
|
|
|
rp = rpcache[baseurl]
|
|
|
|
else:
|
2011-09-21 10:09:50 +02:00
|
|
|
from socket import setdefaulttimeout, getdefaulttimeout
|
|
|
|
|
|
|
|
timeout = getdefaulttimeout()
|
|
|
|
setdefaulttimeout(5)
|
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
rp = robotparser.RobotFileParser()
|
|
|
|
rp.set_url(robotsurl)
|
2011-09-10 08:23:46 +02:00
|
|
|
try:
|
|
|
|
rp.read()
|
|
|
|
rpcache[baseurl] = rp
|
|
|
|
except:
|
2011-09-21 10:09:50 +02:00
|
|
|
rp = None
|
|
|
|
|
|
|
|
setdefaulttimeout(timeout)
|
|
|
|
|
|
|
|
return rp.can_fetch(CONFIG['user-agent'], url) if rp else False
|
2011-09-06 16:32:29 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-09-06 15:47:54 +02:00
|
|
|
def urlopen(url, timeout=None, verb="GET"):
|
2011-09-06 16:32:29 +02:00
|
|
|
if not urlallowed(url):
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.einfo("Url '%s' blocked by robots.txt" % url)
|
2011-09-06 16:32:29 +02:00
|
|
|
return None
|
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
if not timeout:
|
2011-09-06 15:47:54 +02:00
|
|
|
timeout = timeout_for_url(url)
|
|
|
|
|
|
|
|
if verb == 'GET':
|
|
|
|
request = urllib2.Request(url)
|
|
|
|
elif verb == 'HEAD':
|
|
|
|
request = HeadRequest(url)
|
|
|
|
else:
|
|
|
|
return None
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
request.add_header('User-Agent', CONFIG['user-agent'])
|
2011-09-21 10:09:50 +02:00
|
|
|
|
2011-10-02 10:04:44 +02:00
|
|
|
handlers = []
|
|
|
|
|
|
|
|
if CONFIG['cache']:
|
|
|
|
from cache import CacheHandler
|
|
|
|
handlers.append(CacheHandler(CONFIG['cache']))
|
|
|
|
|
2011-09-21 10:09:50 +02:00
|
|
|
if CONFIG['verbose']:
|
|
|
|
debuglevel = CONFIG['verbose'] - 1
|
2011-10-02 10:04:44 +02:00
|
|
|
handlers.append(urllib2.HTTPHandler(debuglevel=debuglevel))
|
2011-09-21 10:09:50 +02:00
|
|
|
|
|
|
|
opener = urllib2.build_opener(*handlers)
|
|
|
|
|
|
|
|
return opener.open(request, None, timeout)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def tryurl(fileurl, template):
|
|
|
|
result = True
|
|
|
|
|
2011-09-06 16:32:29 +02:00
|
|
|
if not urlallowed(fileurl):
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.einfo("Url '%s' blocked by robots.txt" % fileurl)
|
2011-09-06 16:32:29 +02:00
|
|
|
return None
|
|
|
|
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.ebegin("Trying: " + fileurl)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
basename = os.path.basename(fileurl)
|
|
|
|
|
2011-09-06 15:47:54 +02:00
|
|
|
fp = urlopen(fileurl, verb='HEAD')
|
2011-09-06 16:32:29 +02:00
|
|
|
if not fp:
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.eend(errno.EPERM)
|
2011-09-06 16:32:29 +02:00
|
|
|
return None
|
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
headers = fp.info()
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
if 'Content-disposition' in headers and \
|
|
|
|
basename not in headers['Content-disposition']:
|
2011-08-31 15:38:32 +02:00
|
|
|
result = None
|
|
|
|
elif 'Content-Length' in headers and headers['Content-Length'] == '0':
|
|
|
|
result = None
|
2012-04-28 18:16:05 +02:00
|
|
|
elif 'Content-Type' in headers and \
|
|
|
|
'text/html' in headers['Content-Type']:
|
2011-08-31 15:38:32 +02:00
|
|
|
result = None
|
2012-04-28 18:16:05 +02:00
|
|
|
elif 'Content-Type' in headers and \
|
|
|
|
'application/x-httpd-php' in headers['Content-Type']:
|
2011-09-21 10:09:50 +02:00
|
|
|
result = None
|
2011-08-31 15:38:32 +02:00
|
|
|
elif fp.geturl() != fileurl:
|
|
|
|
regex = regex_from_template(template)
|
|
|
|
baseregex = regex_from_template(os.path.basename(template))
|
|
|
|
basename2 = os.path.basename(fp.geturl())
|
|
|
|
|
|
|
|
# Redirect to another (earlier?) version
|
2012-04-28 18:16:05 +02:00
|
|
|
if basename != basename2 and (re.match(regex, fp.geturl()) or \
|
|
|
|
re.match(baseregex, basename2)):
|
2011-08-31 15:38:32 +02:00
|
|
|
result = None
|
|
|
|
|
|
|
|
if result:
|
|
|
|
result = (fp.geturl(), fp.info())
|
|
|
|
|
|
|
|
except urllib2.URLError:
|
|
|
|
result = None
|
|
|
|
except IOError:
|
|
|
|
result = None
|
|
|
|
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.eend(errno.ENOENT if not result else 0)
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def regex_from_template(template):
|
2011-12-30 11:34:39 +01:00
|
|
|
# Escape
|
2011-08-31 15:38:32 +02:00
|
|
|
template = re.escape(template)
|
2011-12-30 11:34:39 +01:00
|
|
|
|
|
|
|
# Unescape specific stuff
|
2011-08-31 15:38:32 +02:00
|
|
|
template = template.replace('\$\{', '${')
|
|
|
|
template = template.replace('\}', '}')
|
|
|
|
template = template.replace('}\.$', '}.$')
|
2011-12-30 11:34:39 +01:00
|
|
|
|
|
|
|
# Replace ${\d+}
|
|
|
|
#template = template.replace('${0}', r'([\d]+?)')
|
|
|
|
template = re.sub(r'(\$\{\d+\}(\.?))+', r'([\w\.]+?)', template)
|
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
#template = re.sub(r'(\$\{\d+\}\.?)+', r'([\w]+?)', template)
|
|
|
|
#template = re.sub(r'(\$\{\d+\}\.+)+', '(.+?)\.', template)
|
|
|
|
#template = re.sub(r'(\$\{\d+\})+', '(.+?)', template)
|
2011-12-30 11:34:39 +01:00
|
|
|
|
|
|
|
# Full version
|
2011-08-31 15:38:32 +02:00
|
|
|
template = template.replace('${PV}', _v)
|
2011-12-30 11:34:39 +01:00
|
|
|
|
|
|
|
# End
|
2011-08-31 15:38:32 +02:00
|
|
|
template = template + r'/?$'
|
|
|
|
return template
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def basedir_from_template(template):
|
|
|
|
idx = template.find('${')
|
|
|
|
if idx == -1:
|
|
|
|
return template
|
|
|
|
|
|
|
|
idx = template[0:idx].rfind('/')
|
|
|
|
if idx == -1:
|
|
|
|
return ""
|
|
|
|
|
|
|
|
return template[0:idx]
|
|
|
|
|
2012-04-28 18:16:05 +02:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
def generate_scan_paths(url):
|
|
|
|
prefix, chunks = url.split('://')
|
|
|
|
chunks = chunks.split('/')
|
|
|
|
|
|
|
|
steps = []
|
|
|
|
|
|
|
|
path = prefix + ":/"
|
|
|
|
for chunk in chunks:
|
|
|
|
if '${' in chunk:
|
|
|
|
steps.append((path, regex_from_template(chunk)))
|
|
|
|
path = ""
|
|
|
|
else:
|
|
|
|
path += "/"
|
|
|
|
path += chunk
|
2011-12-30 11:34:39 +01:00
|
|
|
|
2011-08-31 15:38:32 +02:00
|
|
|
return steps
|
|
|
|
|
|
|
|
def parse_mirror(uri):
|
|
|
|
from random import shuffle
|
|
|
|
|
|
|
|
mirrors = portage.settings.thirdpartymirrors()
|
|
|
|
|
|
|
|
if not uri.startswith("mirror://"):
|
|
|
|
return uri
|
|
|
|
|
|
|
|
eidx = uri.find("/", 9)
|
|
|
|
if eidx == -1:
|
2011-12-11 14:32:38 +01:00
|
|
|
euscan.output.einfo("Invalid mirror definition in SRC_URI:\n")
|
|
|
|
euscan.output.einfo(" %s\n" % (uri))
|
2011-08-31 15:38:32 +02:00
|
|
|
return None
|
|
|
|
|
|
|
|
mirrorname = uri[9:eidx]
|
2012-04-28 18:16:05 +02:00
|
|
|
path = uri[eidx + 1:]
|
2011-08-31 15:38:32 +02:00
|
|
|
|
|
|
|
if mirrorname in mirrors:
|
|
|
|
mirrors = mirrors[mirrorname]
|
|
|
|
shuffle(mirrors)
|
|
|
|
uri = mirrors[0].strip("/") + "/" + path
|
|
|
|
else:
|
2012-01-02 21:31:05 +01:00
|
|
|
euscan.output.einfo("No known mirror by the name: %s" % (mirrorname))
|
2011-08-31 15:38:32 +02:00
|
|
|
return None
|
|
|
|
|
|
|
|
return uri
|