More python3 fixes

This commit is contained in:
Bernard Cafarelli 2019-12-05 18:22:38 +01:00
parent 94c3eeba49
commit 9afec1a034
No known key found for this signature in database
GPG Key ID: 5A761FC3AEC20F13
3 changed files with 6 additions and 6 deletions

View File

@ -158,7 +158,7 @@ def scan_url(pkg, urls, options, on_progress=None):
except Exception as e: except Exception as e:
output.ewarn( output.ewarn(
"Handler failed: [%s] %s" % "Handler failed: [%s] %s" %
(e.__class__.__name__, e.message) (e.__class__.__name__, str(e))
) )
if versions and CONFIG['oneshot']: if versions and CONFIG['oneshot']:

View File

@ -22,7 +22,7 @@ def scan_pkg(pkg, options):
output.einfo("Using FreeCode handler: " + package) output.einfo("Using FreeCode handler: " + package)
fp = urllib.request.urlopen("http://freecode.com/projects/%s/releases" % package) fp = urllib.request.urlopen("http://freecode.com/projects/%s/releases" % package)
content = fp.read() content = str(fp.read())
result = re.findall( result = re.findall(
r'<a href="/projects/%s/releases/(\d+)">([^<]+)</a>' % package, r'<a href="/projects/%s/releases/(\d+)">([^<]+)</a>' % package,
@ -36,10 +36,10 @@ def scan_pkg(pkg, options):
continue continue
fp = urllib.request.urlopen("http://freecode.com/projects/%s/releases/%s" % fp = urllib.request.urlopen("http://freecode.com/projects/%s/releases/%s" %
(package, release_id)) (package, release_id))
content = fp.read() content = str(fp.read())
download_page = re.findall(r'<a href="(/urls/[^"]+)"', content)[0] download_page = re.findall(r'<a href="(/urls/[^"]+)"', content)[0]
fp = urllib.request.urlopen("http://freecode.com%s" % download_page) fp = urllib.request.urlopen("http://freecode.com%s" % download_page)
content = fp.read() content = str(fp.read())
url = re.findall( url = re.findall(
r'In case it doesn\'t, click here: <a href="([^"]+)"', r'In case it doesn\'t, click here: <a href="([^"]+)"',
content content

View File

@ -52,7 +52,7 @@ def confidence_score(found, original, minimum=CONFIDENCE):
def scan_html(data, url, pattern): def scan_html(data, url, pattern):
soup = BeautifulSoup(data) soup = BeautifulSoup(data, features="lxml")
results = [] results = []
for link in soup.findAll('a'): for link in soup.findAll('a'):
@ -114,7 +114,7 @@ def scan_directory_recursive(cp, ver, rev, url, steps, orig_url, options):
results = [] results = []
if re.search("<\s*a\s+[^>]*href", data, re.I): if re.search(b"<\s*a\s+[^>]*href", data, re.I):
results.extend(scan_html(data, url, pattern)) results.extend(scan_html(data, url, pattern))
elif url.startswith('ftp://'): elif url.startswith('ftp://'):
results.extend(scan_ftp(data, url, pattern)) results.extend(scan_ftp(data, url, pattern))