"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "scripts/import_sf.py" between
roundup-1.6.1.tar.gz and roundup-2.0.0.tar.gz

About: Roundup is an highly customisable issue-tracking system with command-line, web and e-mail interfaces (written in Python).

import_sf.py  (roundup-1.6.1):import_sf.py  (roundup-2.0.0)
skipping to change at line 24 skipping to change at line 24
this will generate a directory "/tmp/imported" which contains the this will generate a directory "/tmp/imported" which contains the
data to be imported into a Roundup tracker. data to be imported into a Roundup tracker.
4. Import the data: 4. Import the data:
roundup-admin -i <tracker home> import /tmp/imported roundup-admin -i <tracker home> import /tmp/imported
And you're done! And you're done!
""" """
import sys, os, csv, time, urllib2, httplib, mimetypes, urlparse import sys, os, csv, time, mimetypes
try: try:
import cElementTree as ElementTree import cElementTree as ElementTree
except ImportError: except ImportError:
from elementtree import ElementTree from elementtree import ElementTree
from roundup import instance, hyperdb, date, support, password from roundup import instance, hyperdb, date, support, password
from roundup.anypy import http_, urllib_
from roundup.anypy.strings import s2b, us2s
today = date.Date('.') today = date.Date('.')
DL_URL = 'http://sourceforge.net/tracker/download.php?group_id=%(group_id)s&atid =%(atid)s&aid=%(aid)s' DL_URL = 'http://sourceforge.net/tracker/download.php?group_id=%(group_id)s&atid =%(atid)s&aid=%(aid)s'
def get_url(aid): def get_url(aid):
""" so basically we have to jump through hoops, given an artifact id, to """ so basically we have to jump through hoops, given an artifact id, to
figure what the URL should be to access that artifact, and hence any figure what the URL should be to access that artifact, and hence any
attached files.""" attached files."""
# first we hit this URL... # first we hit this URL...
conn = httplib.HTTPConnection("sourceforge.net") conn = http_.client.HTTPConnection("sourceforge.net")
conn.request("GET", "/support/tracker.php?aid=%s"%aid) conn.request("GET", "/support/tracker.php?aid=%s"%aid)
response = conn.getresponse() response = conn.getresponse()
# which should respond with a redirect to the correct url which has the # which should respond with a redirect to the correct url which has the
# magic "group_id" and "atid" values in it that we need # magic "group_id" and "atid" values in it that we need
assert response.status == 302, 'response code was %s'%response.status assert response.status == 302, 'response code was %s'%response.status
location = response.getheader('location') location = response.getheader('location')
query = urlparse.urlparse(response.getheader('location'))[-2] query = urllib_.urlparse(response.getheader('location'))[-2]
info = dict([param.split('=') for param in query.split('&')]) info = dict([param.split('=') for param in query.split('&')])
return DL_URL%info return DL_URL%info
def fetch_files(xml_file, file_dir): def fetch_files(xml_file, file_dir):
""" Fetch files referenced in the xml_file into the dir file_dir. """ """ Fetch files referenced in the xml_file into the dir file_dir. """
root = ElementTree.parse(xml_file).getroot() root = ElementTree.parse(xml_file).getroot()
to_fetch = set() to_fetch = set()
deleted = set() deleted = set()
for artifact in root.find('artifacts'): for artifact in root.find('artifacts'):
for field in artifact.findall('field'): for field in artifact.findall('field'):
skipping to change at line 88 skipping to change at line 90
# load cached urls (sigh) # load cached urls (sigh)
urls = {} urls = {}
if os.path.exists(os.path.join(file_dir, 'urls.txt')): if os.path.exists(os.path.join(file_dir, 'urls.txt')):
for line in open(os.path.join(file_dir, 'urls.txt')): for line in open(os.path.join(file_dir, 'urls.txt')):
aid, url = line.strip().split() aid, url = line.strip().split()
urls[aid] = url urls[aid] = url
for aid, fid in support.Progress('Fetching files', list(to_fetch)): for aid, fid in support.Progress('Fetching files', list(to_fetch)):
if fid in got: continue if fid in got: continue
if not urls.has_key(aid): if aid not in urls:
urls[aid] = get_url(aid) urls[aid] = get_url(aid)
f = open(os.path.join(file_dir, 'urls.txt'), 'a') f = open(os.path.join(file_dir, 'urls.txt'), 'a')
f.write('%s %s\n'%(aid, urls[aid])) f.write('%s %s\n'%(aid, urls[aid]))
f.close() f.close()
url = urls[aid] + '&file_id=' + fid url = urls[aid] + '&file_id=' + fid
f = urllib2.urlopen(url) f = urllib_.urlopen(url)
data = f.read() data = f.read()
n = open(os.path.join(file_dir, fid), 'w') n = open(os.path.join(file_dir, fid), 'w')
n.write(data) n.write(data)
f.close() f.close()
n.close() n.close()
def import_xml(tracker_home, xml_file, file_dir): def import_xml(tracker_home, xml_file, file_dir):
""" Generate Roundup tracker import files based on the tracker schema, """ Generate Roundup tracker import files based on the tracker schema,
sf.net xml export and downloaded files from sf.net. """ sf.net xml export and downloaded files from sf.net. """
tracker = instance.open(tracker_home) tracker = instance.open(tracker_home)
skipping to change at line 178 skipping to change at line 180
users.add(field.text) users.add(field.text)
elif name == 'open_date': elif name == 'open_date':
thedate = to_date(field.text) thedate = to_date(field.text)
op['adddate'] = thedate op['adddate'] = thedate
d[name] = thedate d[name] = thedate
else: else:
d[name] = field.text d[name] = field.text
categories.add(d['category']) categories.add(d['category'])
if op.has_key('body'): if 'body' in op:
l = d.setdefault('messages', []) l = d.setdefault('messages', [])
l.insert(0, op) l.insert(0, op)
add_files -= remove_files add_files -= remove_files
# create users # create users
userd = {'nobody': '2'} userd = {'nobody': '2'}
users.remove('nobody') users.remove('nobody')
data = [ data = [
{'id': '1', 'username': 'admin', 'password': password.Password('admin'), {'id': '1', 'username': 'admin', 'password': password.Password('admin'),
skipping to change at line 281 skipping to change at line 283
if d['status'] == unread: if d['status'] == unread:
d['status'] = chatting d['status'] = chatting
# add import message # add import message
m = {'content': 'IMPORT FROM SOURCEFORGE', 'author': '1', m = {'content': 'IMPORT FROM SOURCEFORGE', 'author': '1',
'date': today, 'creation': today} 'date': today, 'creation': today}
message_data.append(m) message_data.append(m)
# sort messages and assign ids # sort messages and assign ids
d['messages'] = [] d['messages'] = []
message_data.sort(lambda a,b:cmp(a['date'],b['date'])) message_data.sort(key=lambda a:a['date'])
for message in message_data: for message in message_data:
message_id += 1 message_id += 1
message['id'] = str(message_id) message['id'] = str(message_id)
d['messages'].append(message_id) d['messages'].append(message_id)
d['nosy'] = list(nosy) d['nosy'] = list(nosy)
files = [] files = []
for event in artifact.get('history', []): for event in artifact.get('history', []):
if event['field_name'] == 'File Added': if event['field_name'] == 'File Added':
fid, name = event['old_value'].split(':', 1) fid, name = event['old_value'].split(':', 1)
if fid in add_files: if fid in add_files:
files.append(fid) files.append(fid)
name = name.strip() name = name.strip()
try: try:
f = open(os.path.join(file_dir, fid)) f = open(os.path.join(file_dir, fid), 'rb')
content = f.read() content = f.read()
f.close() f.close()
except: except:
content = 'content missing' content = 'content missing'
file_data.append({ file_data.append({
'id': fid, 'id': fid,
'creation': event['entrydate'], 'creation': event['entrydate'],
'creator': users[event['mod_by']], 'creator': users[event['mod_by']],
'name': name, 'name': name,
'type': mimetypes.guess_type(name)[0], 'type': mimetypes.guess_type(name)[0],
skipping to change at line 364 skipping to change at line 366
writer = csv.writer(f, colon_separated) writer = csv.writer(f, colon_separated)
propnames = klass.export_propnames() propnames = klass.export_propnames()
propnames.append('is retired') propnames.append('is retired')
writer.writerow(propnames) writer.writerow(propnames)
for entry in data: for entry in data:
row = [] row = []
for name in propnames: for name in propnames:
if name == 'is retired': if name == 'is retired':
continue continue
prop = props[name] prop = props[name]
if entry.has_key(name): if name in entry:
if isinstance(prop, hyperdb.Date) or \ if isinstance(prop, hyperdb.Date) or \
isinstance(prop, hyperdb.Interval): isinstance(prop, hyperdb.Interval):
row.append(repr(entry[name].get_tuple())) row.append(repr(entry[name].get_tuple()))
elif isinstance(prop, hyperdb.Password): elif isinstance(prop, hyperdb.Password):
row.append(repr(str(entry[name]))) row.append(repr(str(entry[name])))
else: else:
row.append(repr(entry[name])) row.append(repr(entry[name]))
elif isinstance(prop, hyperdb.Multilink): elif isinstance(prop, hyperdb.Multilink):
row.append('[]') row.append('[]')
elif name in ('creator', 'actor'): elif name in ('creator', 'actor'):
skipping to change at line 386 skipping to change at line 388
elif name in ('created', 'activity'): elif name in ('created', 'activity'):
row.append(repr(today.get_tuple())) row.append(repr(today.get_tuple()))
else: else:
row.append('None') row.append('None')
row.append(entry.get('is retired', False)) row.append(entry.get('is retired', False))
writer.writerow(row) writer.writerow(row)
if isinstance(klass, hyperdb.FileClass) and entry.get('content'): if isinstance(klass, hyperdb.FileClass) and entry.get('content'):
fname = klass.exportFilename('/tmp/imported/', entry['id']) fname = klass.exportFilename('/tmp/imported/', entry['id'])
support.ensureParentsExist(fname) support.ensureParentsExist(fname)
c = open(fname, 'w') c = open(fname, 'wb')
if isinstance(entry['content'], unicode): if isinstance(entry['content'], bytes):
c.write(entry['content'].encode('utf8'))
else:
c.write(entry['content']) c.write(entry['content'])
else:
c.write(s2b(us2s(entry['content'])))
c.close() c.close()
f.close() f.close()
f = open('/tmp/imported/%s-journals.csv'%klass.classname, 'w') f = open('/tmp/imported/%s-journals.csv'%klass.classname, 'w')
f.close() f.close()
if __name__ == '__main__': if __name__ == '__main__':
if sys.argv[1] == 'import': if sys.argv[1] == 'import':
import_xml(*sys.argv[2:]) import_xml(*sys.argv[2:])
elif sys.argv[1] == 'files': elif sys.argv[1] == 'files':
 End of changes. 12 change blocks. 
13 lines changed or deleted 15 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)