"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "modules/pymol/plugins/repository.py" between
pymol-v1.8.6.0.tar.bz2 and pymol-v2.1.0.tar.bz2

About: PyMOL is a Python-enhanced molecular graphics tool. It excels at 3D visualization of proteins, small molecules, density, surfaces, and trajectories. It also includes molecular editing, ray tracing, and movies. Open Source version.

repository.py  (pymol-v1.8.6.0.tar.bz2):repository.py  (pymol-v2.1.0.tar.bz2)
skipping to change at line 38 skipping to change at line 38
The timeout does not effect the "urlopen" call itself, that takes 20sec The timeout does not effect the "urlopen" call itself, that takes 20sec
for unavailable urls in my tests. Also "socket.setdefaulttimeout" does for unavailable urls in my tests. Also "socket.setdefaulttimeout" does
not change that. not change that.
''' '''
from . import pref_get from . import pref_get
timeout = pref_get('network_timeout', 10.0) timeout = pref_get('network_timeout', 10.0)
return urllib2.urlopen(url, timeout=timeout) return urllib2.urlopen(url, timeout=timeout)
def urlreadstr(url, encoding='iso-8859-1'):
'''
Download helper to obtain 'str' content with Python 3.
'''
handle = urlopen(url)
content = handle.read()
if sys.version_info[0] > 2:
charset = handle.headers.get_content_charset() or encoding
content = content.decode(charset, errors='ignore')
return content
class Repository(): class Repository():
''' '''
Abstract repository class Abstract repository class
All open/read operations should raise IOError on failure. All open/read operations should raise IOError on failure.
''' '''
def __init__(self, url): def __init__(self, url):
self.url = url self.url = url
def list(self): def list(self):
''' '''
Return a list of filenames Return a list of filenames
''' '''
try: try:
return self.list_indexfile() return self.list_indexfile()
except: except:
return self.list_scan() return self.list_scan()
def list_indexfile(self): def list_indexfile(self):
s = self.retrieve('pluginindex.txt') s = self.retrieve('pluginindex.txt', binary=False)
return s.splitlines() return s.splitlines()
def list_scan(self): def list_scan(self):
raise NotImplementedError raise NotImplementedError
def retrieve(self, name): def retrieve(self, name, binary=True):
''' '''
Return file content as string Return file content as string
''' '''
raise NotImplementedError raise NotImplementedError
def copy(self, name, dst): def copy(self, name, dst):
''' '''
Copy file. The destination may be a directory. Returns the copy file nam e. Copy file. The destination may be a directory. Returns the copy file nam e.
''' '''
import os import os
skipping to change at line 106 skipping to change at line 119
return baseurl + '/' + name return baseurl + '/' + name
class HttpRepository(Repository): class HttpRepository(Repository):
''' '''
HTML page over HTTP HTML page over HTTP
''' '''
def list_scan(self): def list_scan(self):
import re import re
# fetch as string # fetch as string
handle = urlopen(self.url) content = urlreadstr(self.url)
content = handle.read()
# clear comments # clear comments
re_comment = re.compile(r'<!\s*--.*?--\s*>', re.DOTALL) re_comment = re.compile(r'<!\s*--.*?--\s*>', re.DOTALL)
content = re_comment.sub('', content) content = re_comment.sub('', content)
# find links # find links
names = [] names = []
re_a = re.compile(r'<a\s+(.*?)>') re_a = re.compile(r'<a\s+(.*?)>')
re_href = re.compile(r'''href\s*=\s*(\S+|".+?"|'.+?')''') re_href = re.compile(r'''href\s*=\s*(\S+|".+?"|'.+?')''')
re_anchor = re.compile(r'#.*') re_anchor = re.compile(r'#.*')
skipping to change at line 132 skipping to change at line 144
continue continue
name = name[1:-1] name = name[1:-1]
if '#' in name: if '#' in name:
name = re_anchor.sub('', name) name = re_anchor.sub('', name)
# filter for supported types # filter for supported types
if self.is_supported(name): if self.is_supported(name):
names.append(name) names.append(name)
return names return names
def retrieve(self, name): def retrieve(self, name, binary=True):
url = self.get_full_url(name) url = self.get_full_url(name)
handle = urlopen(url)
content = handle.read() if binary:
handle.close() handle = urlopen(url)
content = handle.read()
handle.close()
else:
content = urlreadstr(url)
return content return content
def get_full_url(self, name): def get_full_url(self, name):
import re import re
if '://' in name: if '://' in name:
return name return name
if name.startswith('/'): if name.startswith('/'):
baseurl = '/'.join(self.url.split('/')[:3]) baseurl = '/'.join(self.url.split('/')[:3])
else: else:
skipping to change at line 194 skipping to change at line 210
name = d['path'] name = d['path']
if self.is_supported(name): if self.is_supported(name):
names.append(name) names.append(name)
return names return names
list = list_scan list = list_scan
def fetchjson(self, url): def fetchjson(self, url):
import json import json
handle = urlopen('https://api.github.com' + url) content = urlreadstr('https://api.github.com' + url)
return json.loads(handle.read()) return json.loads(content)
class LocalRepository(Repository): class LocalRepository(Repository):
def __init__(self, url): def __init__(self, url):
r = urlparse(url) r = urlparse(url)
self.url = r.path self.url = r.path
def list_scan(self): def list_scan(self):
import os import os
names = os.listdir(self.url) names = os.listdir(self.url)
return list(filter(self.is_supported, names)) return list(filter(self.is_supported, names))
def retrieve(self, name): def retrieve(self, name, binary=True):
url = self.get_full_url(name) url = self.get_full_url(name)
handle = open(url, "rb") handle = open(url, "rb" if binary else "rU")
content = handle.read() content = handle.read()
handle.close() handle.close()
return content return content
def copy(self, name, dst): def copy(self, name, dst):
import shutil import shutil
url = self.get_full_url(name) url = self.get_full_url(name)
shutil.copy(url, dst) shutil.copy(url, dst)
return dst return dst
skipping to change at line 307 skipping to change at line 323
if os.path.exists(filename): if os.path.exists(filename):
if not quiet: if not quiet:
print('File "%s" exists, will not redownload') print('File "%s" exists, will not redownload')
else: else:
if not quiet: if not quiet:
print('Downloading', url) print('Downloading', url)
# get page content # get page content
try: try:
handle = urlopen(url) content = urlreadstr(url)
content = handle.read()
except IOError as e: except IOError as e:
raise CmdException(e, "Plugin-Error") raise CmdException(e, "Plugin-Error")
if not rawscript: if not rawscript:
# redirect # redirect
redirect = re.match(r'\s*#REDIRECT\s*\[\[(.*?)\]\]', content) redirect = re.match(r'\s*#REDIRECT\s*\[\[(.*?)\]\]', content)
if redirect is not None: if redirect is not None:
return fetchscript(redirect.group(1), dest, run, quiet) return fetchscript(redirect.group(1), dest, run, quiet)
# parse Infobox # parse Infobox
 End of changes. 10 change blocks. 
14 lines changed or deleted 29 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)