"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "src/Tardis/Client.py" between
Tardis-1.1.5.tar.gz and Tardis-1.2.1.tar.gz

About: Tardis is a system for making encrypted, incremental backups of filesystems.

Client.py  (Tardis-1.1.5):Client.py  (Tardis-1.2.1)
skipping to change at line 60 skipping to change at line 60
import io import io
import shlex import shlex
import urllib.parse import urllib.parse
import functools import functools
import stat import stat
import uuid import uuid
import errno import errno
import unicodedata import unicodedata
import pprint import pprint
import traceback import traceback
import hmac import threading
import cProfile import cProfile
import socket
import concurrent.futures
from binascii import hexlify from binascii import hexlify
import magic import magic
import pid import pid
import parsedatetime import parsedatetime
import srp import srp
import colorlog import colorlog
from pathmatch import wildmatch from pathmatch import wildmatch
from functools import reduce from functools import reduce
from collections import defaultdict
import Tardis import Tardis
import Tardis.TardisCrypto as TardisCrypto import Tardis.TardisCrypto as TardisCrypto
import Tardis.CompressedBuffer as CompressedBuffer import Tardis.CompressedBuffer as CompressedBuffer
import Tardis.Connection as Connection import Tardis.Connection as Connection
import Tardis.Util as Util import Tardis.Util as Util
import Tardis.Defaults as Defaults import Tardis.Defaults as Defaults
import Tardis.librsync as librsync import Tardis.librsync as librsync
import Tardis.MultiFormatter as MultiFormatter import Tardis.MultiFormatter as MultiFormatter
import Tardis.StatusBar as StatusBar import Tardis.StatusBar as StatusBar
import Tardis.Backend as Backend
#import Tardis.Throttler as Throttler
import Tardis.ThreadedScheduler as ThreadedScheduler
features = Tardis.check_features() features = Tardis.check_features()
support_xattr = 'xattr' in features support_xattr = 'xattr' in features
support_acl = 'pylibacl' in features support_acl = 'pylibacl' in features
if support_xattr: if support_xattr:
import xattr import xattr
if support_acl: if support_acl:
import posix1e import posix1e
globalExcludeFile = Defaults.getDefault('TARDIS_GLOBAL_EXCLUDES') globalExcludeFile = Defaults.getDefault('TARDIS_GLOBAL_EXCLUDES')
local_config = Defaults.getDefault('TARDIS_LOCAL_CONFIG') local_config = Defaults.getDefault('TARDIS_LOCAL_CONFIG')
if not os.path.exists(local_config): if not os.path.exists(local_config):
local_config = Defaults.getDefault('TARDIS_DAEMON_CONFIG') local_config = Defaults.getDefault('TARDIS_DAEMON_CONFIG')
configDefaults = { configDefaults = {
# Remote Socket connectionk params
'Server': Defaults.getDefault('TARDIS_SERVER'), 'Server': Defaults.getDefault('TARDIS_SERVER'),
'Port': Defaults.getDefault('TARDIS_PORT'), 'Port': Defaults.getDefault('TARDIS_PORT'),
# Local Direct connect params
'BaseDir': Defaults.getDefault('TARDIS_DB'),
'DBDir': Defaults.getDefault('TARDIS_DBDIR'),
'DBName': Defaults.getDefault('TARDIS_DBNAME'),
'Schema': Defaults.getDefault('TARDIS_SCHEMA'),
'Local': '',
'Client': Defaults.getDefault('TARDIS_CLIENT'), 'Client': Defaults.getDefault('TARDIS_CLIENT'),
'Force': str(False), 'Force': str(False),
'Full': str(False), 'Full': str(False),
'Timeout': str(300.0), 'Timeout': str(300.0),
'Password': None, 'Password': None,
'PasswordFile': Defaults.getDefault('TARDIS_PWFILE'), 'PasswordFile': Defaults.getDefault('TARDIS_PWFILE'),
'PasswordProg': None, 'PasswordProg': None,
'Crypt': str(True), 'Crypt': str(True),
'KeyFile': Defaults.getDefault('TARDIS_KEYFILE'), 'KeyFile': Defaults.getDefault('TARDIS_KEYFILE'),
'SendClientConfig': Defaults.getDefault('TARDIS_SEND_CONFIG'), 'SendClientConfig': Defaults.getDefault('TARDIS_SEND_CONFIG'),
'CompressData': 'none', 'CompressData': 'none',
'CompressMin': str(4096), 'CompressMin': str(4096),
'NoCompressFile': Defaults.getDefault('TARDIS_NOCOMPRESS'), 'NoCompressFile': Defaults.getDefault('TARDIS_NOCOMPRESS'),
'NoCompress': '', 'NoCompress': '',
'Local': str(False),
'LocalServerCmd': 'tardisd --config ' + local_config,
'CompressMsgs': 'none', 'CompressMsgs': 'none',
'Purge': str(False), 'Purge': str(False),
'IgnoreCVS': str(False), 'IgnoreCVS': str(False),
'SkipCaches': str(False), 'SkipCaches': str(False),
'SendSig': str(False), 'SendSig': str(False),
'ExcludePatterns': '', 'ExcludePatterns': '',
'ExcludeFiles': '',
'ExcludeDirs': '', 'ExcludeDirs': '',
'GlobalExcludeFileName':Defaults.getDefault('TARDIS_GLOBAL_EXCLUDES'), 'GlobalExcludeFileName':Defaults.getDefault('TARDIS_GLOBAL_EXCLUDES'),
'ExcludeFileName': Defaults.getDefault('TARDIS_EXCLUDES'), 'ExcludeFileName': Defaults.getDefault('TARDIS_EXCLUDES'),
'LocalExcludeFileName': Defaults.getDefault('TARDIS_LOCAL_EXCLUDES'), 'LocalExcludeFileName': Defaults.getDefault('TARDIS_LOCAL_EXCLUDES'),
'SkipFileName': Defaults.getDefault('TARDIS_SKIP'), 'SkipFileName': Defaults.getDefault('TARDIS_SKIP'),
'ExcludeNoAccess': str(True), 'ExcludeNoAccess': str(True),
'LogFiles': '', 'LogFiles': '',
'Verbosity': str(0), 'Verbosity': str(0),
'Stats': str(False), 'Stats': str(False),
'Report': str(False), 'Report': 'none',
'Directories': '.', 'Directories': '.',
# Backend parameters
'Formats' : 'Monthly-%Y-%m, Weekly-%Y-%U, Daily-%Y-%m-%d',
'Priorities' : '40, 30, 20',
'KeepDays' : '0, 180, 30',
'ForceFull' : '0, 0, 0',
'Umask' : '027',
'User' : '',
'Group' : '',
'CksContent' : '65536',
'AutoPurge' : str(False),
'SaveConfig' : str(True),
'AllowClientOverrides' : str(True),
'AllowSchemaUpgrades' : str(False),
'JournalFile' : Defaults.getDefault('TARDIS_JOURNAL'),
'SaveFull' : str(False),
'MaxDeltaChain' : '5',
'MaxChangePercent' : '50',
'DBBackups' : '0',
'LinkBasis' : str(False),
'RequirePassword' : str(False),
} }
excludeDirs = [] excludeDirs = []
starttime = None starttime = None
encoding = None encoding = None
encoder = None encoder = None
decoder = None decoder = None
purgePriority = None purgePriority = None
purgeTime = None purgeTime = None
globalExcludes = [] globalExcludes = set()
cvsExcludes = ["RCS", "SCCS", "CVS", "CVS.adm", "RCSLOG", "cvslog.*", "t ags", "TAGS", ".make.state", ".nse_depinfo", cvsExcludes = ["RCS", "SCCS", "CVS", "CVS.adm", "RCSLOG", "cvslog.*", "t ags", "TAGS", ".make.state", ".nse_depinfo",
"*~", "#*", ".#*", ",*", "_$*", "*$", "*.old", "*.bak", " *.BAK", "*.orig", "*.rej", ".del-*", "*.a", "*~", "#*", ".#*", ",*", "_$*", "*$", "*.old", "*.bak", " *.BAK", "*.orig", "*.rej", ".del-*", "*.a",
"*.olb", "*.o", "*.obj", "*.so", "*.exe", "*.Z", "*.elc", "*.ln", "core", ".*.swp", ".*.swo", "*.olb", "*.o", "*.obj", "*.so", "*.exe", "*.Z", "*.elc", "*.ln", "core", ".*.swp", ".*.swo",
".svn", ".git", ".hg", ".bzr"] ".svn", ".git", ".hg", ".bzr"]
verbosity = 0 verbosity = 0
conn = None conn = None
args = None args = None
config = None config = None
skipping to change at line 195 skipping to change at line 229
# backed == Total size of data represented by the backup. # backed == Total size of data represented by the backup.
# dataSent == Number of data bytes sent this run (not including message s) # dataSent == Number of data bytes sent this run (not including message s)
# dataBacked == Number of bytes backed up this run # dataBacked == Number of bytes backed up this run
# Example: If you have 100 files, and 99 of them are already backed up (ie, one new), backed would be 100, but new would be 1. # Example: If you have 100 files, and 99 of them are already backed up (ie, one new), backed would be 100, but new would be 1.
# dataSent is the compressed and encrypted size of the files (or deltas) sent in this run, but dataBacked is the total size of # dataSent is the compressed and encrypted size of the files (or deltas) sent in this run, but dataBacked is the total size of
# the files. # the files.
stats = { 'dirs' : 0, 'files' : 0, 'links' : 0, 'backed' : 0, 'dataSent': 0, 'da taBacked': 0 , 'new': 0, 'delta': 0, 'gone': 0, 'denied': 0 } stats = { 'dirs' : 0, 'files' : 0, 'links' : 0, 'backed' : 0, 'dataSent': 0, 'da taBacked': 0 , 'new': 0, 'delta': 0, 'gone': 0, 'denied': 0 }
report = {} report = {}
inodeDB = {} class InodeEntry:
def __init__(self):
self.paths = []
self.numEntries = 0
self.finfo = None
class InodeDB:
def __init__(self):
self.db = defaultdict(InodeEntry)
def insert(self, inode, finfo, path):
entry = self.db[inode]
entry.numEntries += 1
entry.paths.append(path)
entry.finfo = finfo
def get(self, inode, num=0):
if not inode in self.db:
return (None, None)
entry = self.db[inode]
if num >= len(entry.paths):
return (entry.finfo, None)
return (entry.finfo, entry.paths[num])
def delete(self, inode, path=None):
if inode in self.db:
entry = self.db[inode]
entry.numEntries -= 1
if entry.numEntries == 0:
self.db.pop(inode)
if path:
entry.paths.remove(path)
else:
entry.paths.pop(0)
inodeDB = InodeDB()
dirHashes = {} dirHashes = {}
# Logging Formatter that allows us to specify formats that won't have a levelnam e header, ie, those that # Logging Formatter that allows us to specify formats that won't have a levelnam e header, ie, those that
# will only have a message # will only have a message
class MessageOnlyFormatter(logging.Formatter): class MessageOnlyFormatter(logging.Formatter):
def __init__(self, fmt = '%(levelname)s: %(message)s', levels=[logging.INFO] ): def __init__(self, fmt = '%(levelname)s: %(message)s', levels=[logging.INFO] ):
logging.Formatter.__init__(self, fmt) logging.Formatter.__init__(self, fmt)
self.levels = levels self.levels = levels
def format(self, record): def format(self, record):
skipping to change at line 238 skipping to change at line 307
class ProtocolError(Exception): class ProtocolError(Exception):
pass pass
class AuthenticationFailed(Exception): class AuthenticationFailed(Exception):
pass pass
class ExitRecursionException(Exception): class ExitRecursionException(Exception):
def __init__(self, rootException): def __init__(self, rootException):
self.rootException = rootException self.rootException = rootException
class FakeDirEntry:
def __init__(self, dirname, filename):
self.name = filename
self.path = os.path.join(dirname, filename)
def stat(self, follow_symlinks=True):
if follow_symlinks:
return os.stat(self.path)
else:
return os.lstat(self.path)
def setEncoder(format): def setEncoder(format):
global encoder, encoding, decoder global encoder, encoding, decoder
if format == 'base64': if format == 'base64':
encoding = "base64" encoding = "base64"
encoder = base64.b64encode encoder = base64.b64encode
decoder = base64.b64decode decoder = base64.b64decode
elif format == 'bin': elif format == 'bin':
encoding = "bin" encoding = "bin"
encoder = lambda x: x encoder = lambda x: x
decoder = lambda x: x decoder = lambda x: x
skipping to change at line 267 skipping to change at line 347
return val return val
def checkMessage(message, expected): def checkMessage(message, expected):
""" Check that a message is of the expected type. Throw an exception if not """ """ Check that a message is of the expected type. Throw an exception if not """
if not message['message'] == expected: if not message['message'] == expected:
logger.critical("Expected {} message, received {}".format(expected, mess age['message'])) logger.critical("Expected {} message, received {}".format(expected, mess age['message']))
raise ProtocolError("Expected {} message, received {}".format(expected, message['message'])) raise ProtocolError("Expected {} message, received {}".format(expected, message['message']))
def filelist(dirname, excludes): def filelist(dirname, excludes):
""" List the files in a directory, except those that match something in a se t of patterns """ """ List the files in a directory, except those that match something in a se t of patterns """
files = os.listdir(dirname) files = os.scandir(dirname)
for p in excludes: for f in files:
# This has to be listifed. If it doesn't, it seems to not do the filter if all(not p.match(f.path) for p in excludes):
ing. Not sure why yield f
files = list(itertools.filterfalse(lambda x: p.match(os.path.join(dirnam
e, x)), files))
return files
#_deletedInodes = {} #_deletedInodes = {}
def delInode(inode):
if args.loginodes:
args.loginodes.write(f"Del {str(inode)}\n".encode('utf8'))
if inode in inodeDB:
del inodeDB[inode]
#_deletedInodes[inode] = (currentResponse, currentBatch)
def msgInfo(resp=None, batch=None): def msgInfo(resp=None, batch=None):
if resp is None: resp = currentResponse if resp is None: resp = currentResponse
if batch is None: batch = currentBatch if batch is None: batch = currentBatch
respId = resp['respid'] respId = resp['respid']
respType = resp['message'] respType = resp['message']
if batch: if batch:
batchId = batch['respid'] batchId = batch['respid']
else: else:
batchId = None batchId = None
return (respId, respType, batchId) return (respId, respType, batchId)
def processChecksums(inodes): pool = concurrent.futures.ThreadPoolExecutor()
""" Generate checksums for requested checksum files """
files = []
for inode in inodes:
try:
(_, pathname) = inodeDB[inode]
setProgress("File [C]:", pathname) def genChecksum(inode):
checksum = None
try:
(_, pathname) = inodeDB.get(inode)
setProgress("File [C]:", pathname)
m = crypt.getHash() m = crypt.getHash()
s = os.lstat(pathname) s = os.lstat(pathname)
mode = s.st_mode mode = s.st_mode
if stat.S_ISLNK(mode): if stat.S_ISLNK(mode):
m.update(fs_encode(os.readlink(pathname))) m.update(fs_encode(os.readlink(pathname)))
else: else:
try: try:
with open(pathname, "rb") as f: with open(pathname, "rb") as f:
for chunk in iter(functools.partial(f.read, args.chunksi for chunk in iter(functools.partial(f.read, args.chunksize),
ze), b''): b''):
if chunk: if chunk:
m.update(chunk) m.update(chunk)
else: else:
break break
checksum = m.hexdigest() checksum = m.hexdigest()
files.append({ "inode": inode, "checksum": checksum }) # files.append({ "inode": inode, "checksum": checksum })
except IOError as e: except IOError as e:
logger.error("Unable to generate checksum for %s: %s", pathn logger.error("Unable to generate checksum for %s: %s", pathname,
ame, str(e)) str(e))
exceptionLogger.log(e) exceptionLogger.log(e)
# TODO: Add an error response? # TODO: Add an error response?
except KeyError as e: except KeyError as e:
(rId, rType, bId) = msgInfo() (rId, rType, bId) = msgInfo()
logger.error("Unable to process checksum for %s, not found in inodeD logger.error("Unable to process checksum for %s, not found in inodeDB (%
B (%s, %s -- %s)", str(inode), rId, rType, bId) s, %s -- %s)", str(inode), rId, rType, bId)
exceptionLogger.log(e) exceptionLogger.log(e)
# TODO: Add an error response? except FileNotFoundError as e:
#if inode in _deletedInodes: logger.error("Unable to stat %s. File not found", pathname)
# (resp, batch) = _deletedInodes[inode] exceptionLogger.log(e)
# (rId, rType, bId) = msgInfo(resp, batch) # TODO: Add an error response?
#
# logger.error("Already deleted inode %s in message: %s %s -- %s",
str(inode), rId, rType, bId)
logger.debug(repr(traceback.format_stack()))
except FileNotFoundError as e:
logger.error("Unable to stat %s. File not found", pathname)
exceptionLogger.log(e)
# TODO: Add an error response?
return inode, checksum
def processChecksums(inodes):
""" Generate checksums for requested checksum files """
files = []
jobs = pool.map(genChecksum, inodes)
for job in jobs:
inode, checksum = job
files.append({ "inode": inode, "checksum": checksum })
message = { message = {
"message": "CKS", "message": "CKS",
"files": files "files": files
} }
#response = sendAndReceive(message) #response = sendAndReceive(message)
#handleAckSum(response) #handleAckSum(response)
batchMessage(message) batchMessage(message)
def logFileInfo(i, c): def logFileInfo(i, c):
if i in inodeDB: (x, name) = inodeDB.get(i)
(x, name) = inodeDB[i] if name:
if "size" in x: if "size" in x:
size = x["size"] size = x["size"]
else: else:
size = 0 size = 0
size = Util.fmtSize(size, formats=['','KB','MB','GB', 'TB', 'PB']) size = Util.fmtSize(size, formats=['','KB','MB','GB', 'TB', 'PB'])
logger.log(logging.FILES, "[%c]: %s (%s)", c, Util.shortPath(name), size ) logger.log(logging.FILES, "[%c]: %s (%s)", c, Util.shortPath(name), size )
cname = crypt.encryptPath(name) cname = crypt.encryptPath(name)
logger.debug("Filename: %s => %s", Util.shortPath(name), Util.shortPath( cname)) logger.debug("Filename: %s => %s", Util.shortPath(name), Util.shortPath( cname))
def handleAckSum(response): def handleAckSum(response):
checkMessage(response, 'ACKSUM') checkMessage(response, 'ACKSUM')
logfiles = logger.isEnabledFor(logging.FILES) logfiles = logger.isEnabledFor(logging.FILES)
done = response.setdefault('done', {}) done = response.setdefault('done', {})
content = response.setdefault('content', {}) content = response.setdefault('content', {})
delta = response.setdefault('delta', {}) delta = response.setdefault('delta', {})
# First, delete all the files which are "done", ie, matched # First, delete all the files which are "done", ie, matched
for i in [tuple(x) for x in done]: for i in [tuple(x) for x in done]:
if logfiles: if logfiles:
if i in inodeDB: (x, name) = inodeDB.get(i)
(x, name) = inodeDB[i] if name:
logger.log(logging.FILES, "[C]: %s", Util.shortPath(name)) logger.log(logging.FILES, "[C]: %s", Util.shortPath(name))
delInode(i) inodeDB.delete(i)
# First, then send content for any files which don't # First, then send content for any files which don't
# FIXME: TODO: There should be a test in here for Delta's # FIXME: TODO: There should be a test in here for Delta's
for i in [tuple(x) for x in content]: for i in [tuple(x) for x in content]:
if logfiles: if logfiles:
logFileInfo(i, 'n') logFileInfo(i, 'n')
sendContent(i, 'Full') sendContent(i, 'Full')
delInode(i) inodeDB.delete(i)
signatures = None signatures = None
if not args.full and len(delta) != 0: if not args.full and len(delta) != 0:
signatures = prefetchSigFiles(delta) signatures = prefetchSigFiles(delta)
for i in [tuple(x) for x in delta]: for i in [tuple(x) for x in delta]:
if logfiles: if logfiles:
logFileInfo(i, 'd') logFileInfo(i, 'd')
processDelta(i, signatures) processDelta(i, signatures)
delInode(i) inodeDB.delete(i)
def makeEncryptor(): def makeEncryptor():
iv = crypt.getIV() iv = crypt.getIV()
encryptor = crypt.getContentEncryptor(iv) encryptor = crypt.getContentEncryptor(iv)
return (encryptor, iv) return (encryptor, iv)
def prefetchSigFiles(inodes): def prefetchSigFiles(inodes):
logger.debug("Requesting signature files: %s", str(inodes)) logger.debug("Requesting signature files: %s", str(inodes))
signatures = {} signatures = {}
skipping to change at line 456 skipping to change at line 529
checksum = sigmessage['checksum'] checksum = sigmessage['checksum']
else: else:
(_, pathname) = inodeDB[inode] (_, pathname) = inodeDB[inode]
logger.warning("No signature file received for %s: %s", inode, pathname) logger.warning("No signature file received for %s: %s", inode, pathname)
sigfile = None sigfile = None
checksum = None checksum = None
return (sigfile, None) return (sigfile, None)
def getInodeDBName(inode): def getInodeDBName(inode):
if inode in inodeDB: (_, name) = inodeDB.get(inode)
return inodeDB[inode][1] if name:
return name
else: else:
return "Unknown" return "Unknown"
def processDelta(inode, signatures): def processDelta(inode, signatures):
""" Generate a delta and send it """ """ Generate a delta and send it """
if verbosity > 3: if verbosity > 3:
logger.debug("ProcessDelta: %s %s", inode, getInodeDBName(inode)) logger.debug("ProcessDelta: %s %s", inode, getInodeDBName(inode))
if args.loginodes: if args.loginodes:
args.loginodes.write(f"ProcessDelta {str(inode)} {getInodeDBName(inode)} \n".encode('utf8')) args.loginodes.write(f"ProcessDelta {str(inode)} {getInodeDBName(inode)} \n".encode('utf8'))
try: try:
(_, pathname) = inodeDB[inode] (_, pathname) = inodeDB.get(inode)
setProgress("File [D]:", pathname) setProgress("File [D]:", pathname)
logger.debug("Processing delta: %s :: %s", str(inode), pathname) logger.debug("Processing delta: %s :: %s", str(inode), pathname)
if signatures and inode in signatures: if signatures and inode in signatures:
(sigfile, oldchksum) = signatures[inode] (sigfile, oldchksum) = signatures[inode]
else: else:
(sigfile, oldchksum) = fetchSignature(inode) (sigfile, oldchksum) = fetchSignature(inode)
if sigfile is not None: if sigfile is not None:
try: try:
skipping to change at line 544 skipping to change at line 618
message = { message = {
"message" : "SIG", "message" : "SIG",
"checksum": checksum "checksum": checksum
} }
sendMessage(message) sendMessage(message)
#batchMessage(message, flush=True, batch=False, response=Fal se) #batchMessage(message, flush=True, batch=False, response=Fal se)
# Send the signature, generated above # Send the signature, generated above
(sigsize, _, _) = Util.sendData(conn.sender, newsig, TardisC rypto.NullEncryptor(), chunksize=args.chunksize, compress=False, stats=stats) # Don't bother to encrypt the signature (sigsize, _, _) = Util.sendData(conn.sender, newsig, TardisC rypto.NullEncryptor(), chunksize=args.chunksize, compress=False, stats=stats) # Don't bother to encrypt the signature
newsig.close() newsig.close()
if args.report: if args.report != 'none':
x = { 'type': 'Delta', 'size': sent, 'sigsize': sigsize } x = { 'type': 'Delta', 'size': sent, 'sigsize': sigsize }
# Convert to Unicode, and normalize any characters, so lengt hs become reasonable # Convert to Unicode, and normalize any characters, so lengt hs become reasonable
name = unicodedata.normalize('NFD', pathname) name = unicodedata.normalize('NFD', pathname)
report[os.path.split(pathname)] = x report[os.path.split(pathname)] = x
logger.debug("Completed %s -- Checksum %s -- %s bytes, %s signat ure bytes", Util.shortPath(pathname), checksum, sent, sigsize) logger.debug("Completed %s -- Checksum %s -- %s bytes, %s signat ure bytes", Util.shortPath(pathname), checksum, sent, sigsize)
else: else:
if logger.isEnabledFor(logging.DEBUG): if logger.isEnabledFor(logging.DEBUG):
logger.debug("Delta size for %s is too large. Sending full content: Delta: %d File: %d", Util.shortPath(pathname, 40), deltasize, filesize) logger.debug("Delta size for %s is too large. Sending full content: Delta: %d File: %d", Util.shortPath(pathname, 40), deltasize, filesize)
sendContent(inode, 'Full') sendContent(inode, 'Full')
else: else:
skipping to change at line 573 skipping to change at line 647
def sendContent(inode, reportType): def sendContent(inode, reportType):
""" Send the content of a file. Compress and encrypt, as specified by the o ptions. """ """ Send the content of a file. Compress and encrypt, as specified by the o ptions. """
if verbosity > 3: if verbosity > 3:
logger.debug("SendContent: %s %s %s", inode, reportType, getInodeDBName( inode)) logger.debug("SendContent: %s %s %s", inode, reportType, getInodeDBName( inode))
if args.loginodes: if args.loginodes:
args.loginodes.write(f"SendContent: {inode} {reportType} {getInodeDBName (inode)}\n".encode('utf8')) args.loginodes.write(f"SendContent: {inode} {reportType} {getInodeDBName (inode)}\n".encode('utf8'))
#if inode in inodeDB: #if inode in inodeDB:
try: try:
checksum = None checksum = None
(fileInfo, pathname) = inodeDB[inode] (fileInfo, pathname) = inodeDB.get(inode)
if pathname: if pathname:
mode = fileInfo["mode"] mode = fileInfo["mode"]
filesize = fileInfo["size"] filesize = fileInfo["size"]
if logger.isEnabledFor(logging.DEBUG): if logger.isEnabledFor(logging.DEBUG):
logger.debug("Sending content for %s (%s) -- %s", inode, Util.fm tSize(filesize), Util.shortPath(pathname, 60)) logger.debug("Sending content for %s (%s) -- %s", inode, Util.fm tSize(filesize), Util.shortPath(pathname, 60))
setProgress("File [N]:", pathname) setProgress("File [N]:", pathname)
if stat.S_ISDIR(mode): if stat.S_ISDIR(mode):
skipping to change at line 643 skipping to change at line 717
stats=stats) stats=stats)
if sig: if sig:
sig.seek(0) sig.seek(0)
message = { message = {
"message" : "SIG", "message" : "SIG",
"checksum": checksum "checksum": checksum
} }
sendMessage(message) sendMessage(message)
#batchMessage(message, batch=False, flush=True, response=Fal se) #batchMessage(message, batch=False, flush=True, response=Fal se)
(sigsize, _, _) = Util.sendData(conn, sig, TardisCrypto.Null Encryptor(), chunksize=args.chunksize, stats=stats) # Don't bother to encrypt the signature (sigsize, _, _) = Util.sendData(conn.sender, sig, TardisCryp to.NullEncryptor(), chunksize=args.chunksize, stats=stats) # Don't bo ther to encrypt the signature
except Exception as e: except Exception as e:
logger.error("Caught exception during sending of data in %s: %s" , pathname, e) logger.error("Caught exception during sending of data in %s: %s" , pathname, e)
exceptionLogger.log(e) exceptionLogger.log(e)
#raise e #raise e
finally: finally:
if data is not None: if data is not None:
data.close() data.close()
if sig is not None: if sig is not None:
sig.close() sig.close()
Util.accumulateStat(stats, 'new') Util.accumulateStat(stats, 'new')
if args.report: if args.report != 'none':
repInfo = { 'type': reportType, 'size': size, 'sigsize': sigsize } repInfo = { 'type': reportType, 'size': size, 'sigsize': sigsize }
report[os.path.split(pathname)] = repInfo report[os.path.split(pathname)] = repInfo
logger.debug("Completed %s -- Checksum %s -- %s bytes, %s signature bytes", Util.shortPath(pathname), checksum, size, sigsize) logger.debug("Completed %s -- Checksum %s -- %s bytes, %s signature bytes", Util.shortPath(pathname), checksum, size, sigsize)
except KeyError as e: except KeyError as e:
logger.error("SendContent: No inode entry for %s", inode) logger.error("SendContent: No inode entry for %s", inode)
logger.debug(repr(traceback.format_stack())) logger.debug(repr(traceback.format_stack()))
if args.loginodes: if args.loginodes:
args.loginodes.write(f"SendContent: No inode entry for {inode}\n".en code('utf8')) args.loginodes.write(f"SendContent: No inode entry for {inode}\n".en code('utf8'))
exceptionLogger.log(e) exceptionLogger.log(e)
skipping to change at line 718 skipping to change at line 792
del dirHashes[i] del dirHashes[i]
except KeyError as e: except KeyError as e:
pass pass
# This kindof isn't an error. The BatchMessages call can cause the sen dDirHashes to be sent again, which ends up deleteing # This kindof isn't an error. The BatchMessages call can cause the sen dDirHashes to be sent again, which ends up deleteing
# the message before it's deleted here. # the message before it's deleted here.
#logger.warning("Unable to delete Directory Hash for %s", i) #logger.warning("Unable to delete Directory Hash for %s", i)
#if args.exceptions: #if args.exceptions:
# logger.exception("No directory hash entry for %s", i) # logger.exception("No directory hash entry for %s", i)
def cksize(i, threshhold): def cksize(i, threshhold):
if i in inodeDB: (f, _) = inodeDB.get(i)
(f, _) = inodeDB[i] if f and f['size'] > threshhold:
if f['size'] > threshhold: return True
return True
return False return False
allContent = [] allContent = []
allDelta = [] allDelta = []
allCkSum = [] allCkSum = []
allRefresh = [] allRefresh = []
allDone = []
def handleAckDir(message): def handleAckDir(message):
global allContent, allDelta, allCkSum, allRefresh global allContent, allDelta, allCkSum, allRefresh, allDone
checkMessage(message, 'ACKDIR') checkMessage(message, 'ACKDIR')
content = message.setdefault("content", {}) content = message.setdefault("content", {})
done = message.setdefault("done", {}) done = message.setdefault("done", {})
delta = message.setdefault("delta", {}) delta = message.setdefault("delta", {})
cksum = message.setdefault("cksum", {}) cksum = message.setdefault("cksum", {})
refresh = message.setdefault("refresh", {}) refresh = message.setdefault("refresh", {})
if verbosity > 2: if verbosity > 2:
path = message['path'] path = message['path']
if crypt: if crypt:
path = crypt.decryptPath(path) path = crypt.decryptPath(path)
logger.debug("Processing ACKDIR: Up-to-date: %3d New Content: %3d Delta: %3d ChkSum: %3d -- %s", len(done), len(content), len(delta), len(cksum), Util.s hortPath(path, 40)) logger.debug("Processing ACKDIR: Up-to-date: %3d New Content: %3d Delta: %3d ChkSum: %3d -- %s", len(done), len(content), len(delta), len(cksum), Util.s hortPath(path, 40))
# Prune the messages
for i in [tuple(x) for x in done]:
delInode(i)
if args.loginodes: if args.loginodes:
args.loginodes.write(f"Adding to AllContent: ({len(allContent)}):: {len( content)}: {str(content)}\n".encode('utf8')) args.loginodes.write(f"Adding to AllContent: ({len(allContent)}):: {len( content)}: {str(content)}\n".encode('utf8'))
args.loginodes.write(f"Adding to AllRefresh: ({len(allRefresh)}):: {len( refresh)}: {str(refresh)}\n".encode('utf8')) args.loginodes.write(f"Adding to AllRefresh: ({len(allRefresh)}):: {len( refresh)}: {str(refresh)}\n".encode('utf8'))
args.loginodes.write(f"Adding to AllDelta: ({len(allDelta)}):: {len(de lta)}: {str(delta)}\n".encode('utf8')) args.loginodes.write(f"Adding to AllDelta: ({len(allDelta)}):: {len(de lta)}: {str(delta)}\n".encode('utf8'))
args.loginodes.write(f"Adding to AllCkSum: ({len(allCkSum)}):: {len(ck sum)}: {str(cksum)}\n".encode('utf8')) args.loginodes.write(f"Adding to AllCkSum: ({len(allCkSum)}):: {len(ck sum)}: {str(cksum)}\n".encode('utf8'))
allContent += content allContent += content
allDelta += delta allDelta += delta
allCkSum += cksum allCkSum += cksum
allRefresh += refresh allRefresh += refresh
allDone += done
def pushFiles(): def pushFiles():
global allContent, allDelta, allCkSum, allRefresh global allContent, allDelta, allCkSum, allRefresh, allDone
logger.debug("Pushing files") logger.debug("Pushing files")
# If checksum content in NOT specified, send the data for each file # If checksum content in NOT specified, send the data for each file
if args.loginodes: if args.loginodes:
args.loginodes.write(f"Pushing Files\n".encode('utf8')) args.loginodes.write(f"Pushing Files\n".encode('utf8'))
args.loginodes.write(f"AllContent: {len(allContent)}: {str(allContent)}\ n".encode('utf8')) args.loginodes.write(f"AllContent: {len(allContent)}: {str(allContent)}\ n".encode('utf8'))
args.loginodes.write(f"AllRefresh: {len(allRefresh)}: {str(allRefresh)}\ n".encode('utf8')) args.loginodes.write(f"AllRefresh: {len(allRefresh)}: {str(allRefresh)}\ n".encode('utf8'))
args.loginodes.write(f"AllDelta: {len(allDelta)}: {str(allDelta)}\n".e ncode('utf8')) args.loginodes.write(f"AllDelta: {len(allDelta)}: {str(allDelta)}\n".e ncode('utf8'))
args.loginodes.write(f"AllCkSum: {len(allCkSum)}: {str(allCkSum)}\n".e ncode('utf8')) args.loginodes.write(f"AllCkSum: {len(allCkSum)}: {str(allCkSum)}\n".e ncode('utf8'))
processed = []
for i in [tuple(x) for x in allContent]: for i in [tuple(x) for x in allContent]:
try: try:
if logger.isEnabledFor(logging.FILES): if logger.isEnabledFor(logging.FILES):
logFileInfo(i, 'N') logFileInfo(i, 'N')
sendContent(i, 'New') sendContent(i, 'New')
processed.append(i)
except Exception as e: except Exception as e:
logger.error("Unable to backup %s: %s", str(i), str(e)) logger.error("Unable to backup %s: %s", str(i), str(e))
delInode(i)
# clear it out
allContent = []
for i in [tuple(x) for x in allRefresh]: for i in [tuple(x) for x in allRefresh]:
if logger.isEnabledFor(logging.FILES): if logger.isEnabledFor(logging.FILES):
logFileInfo(i, 'N') logFileInfo(i, 'R')
try: try:
sendContent(i, 'Full') sendContent(i, 'Full')
processed.append(i)
except Exception as e: except Exception as e:
logger.error("Unable to backup %s: %s", str(i), str(e)) logger.error("Unable to backup %s: %s", str(i), str(e))
delInode(i)
# clear it out
allRefresh = []
# If there are any delta files requested, ask for them # If there are any delta files requested, ask for them
signatures = None signatures = None
if not args.full and len(allDelta) != 0: if not args.full and len(allDelta) != 0:
signatures = prefetchSigFiles(allDelta) signatures = prefetchSigFiles(allDelta)
for i in [tuple(x) for x in allDelta]: for i in [tuple(x) for x in allDelta]:
# If doing a full backup, send the full file, else just a delta. # If doing a full backup, send the full file, else just a delta.
try: try:
if args.full: if args.full:
if logger.isEnabledFor(logging.FILES): if logger.isEnabledFor(logging.FILES):
logFileInfo(i, 'N') logFileInfo(i, 'N')
sendContent(i, 'Full') sendContent(i, 'Full')
else: else:
if logger.isEnabledFor(logging.FILES): if logger.isEnabledFor(logging.FILES):
if i in inodeDB: (x, name) = inodeDB.get(i)
(x, name) = inodeDB[i] if name:
logger.log(logging.FILES, "[D]: %s", Util.shortPath(name )) logger.log(logging.FILES, "[D]: %s", Util.shortPath(name ))
processDelta(i, signatures) processDelta(i, signatures)
processed.append(i)
except Exception as e: except Exception as e:
logger.error("Unable to backup %s: ", str(i), str(e)) logger.error("Unable to backup %s: ", str(i), str(e))
delInode(i)
# clear it out
for i in processed:
inodeDB.delete(i)
for i in [tuple(x) for x in allDone]:
inodeDB.delete(i)
allRefresh = []
allContent = []
allDelta = [] allDelta = []
allDone = []
# If checksum content is specified, concatenate the checksums and content re quests, and handle checksums # If checksum content is specified, concatenate the checksums and content re quests, and handle checksums
# for all of them. # for all of them.
if len(allCkSum) > 0: if len(allCkSum) > 0:
cksums = [tuple(x) for x in allCkSum] cksums = [tuple(x) for x in allCkSum]
allCkSum = [] # Clear it out to avoid processing loop allCkSum = [] # Clear it out to avoid processing loop
processChecksums(cksums) processChecksums(cksums)
logger.debug("Done pushing") logger.debug("Done pushing")
skipping to change at line 846 skipping to change at line 921
if meta in metaCache: if meta in metaCache:
return metaCache[meta] return metaCache[meta]
else: else:
m = crypt.getHash() m = crypt.getHash()
m.update(bytes(meta, 'utf8')) m.update(bytes(meta, 'utf8'))
digest = m.hexdigest() digest = m.hexdigest()
metaCache[meta] = digest metaCache[meta] = digest
newmeta.append(digest) newmeta.append(digest)
return digest return digest
def mkFileInfo(dir, name): def mkFileInfo(f):
pathname = os.path.join(dir, name) pathname = f.path
s = f.stat(follow_symlinks=False)
# Cleanup any bogus characters # Cleanup any bogus characters
name = name.encode('utf8', 'backslashreplace').decode('utf8') name = f.name.encode('utf8', 'backslashreplace').decode('utf8')
s = os.lstat(pathname)
mode = s.st_mode mode = s.st_mode
# If we don't want to even create dir entries for things we can't access, ju st return None # If we don't want to even create dir entries for things we can't access, ju st return None
# if we can't access the file itself # if we can't access the file itself
if args.skipNoAccess and (not Util.checkPermission(s.st_uid, s.st_gid, mode) ): if args.skipNoAccess and (not Util.checkPermission(s.st_uid, s.st_gid, mode) ):
return None return None
if stat.S_ISREG(mode) or stat.S_ISDIR(mode) or stat.S_ISLNK(mode): if stat.S_ISREG(mode) or stat.S_ISDIR(mode) or stat.S_ISLNK(mode):
#name = crypt.encryptFilename(name) #name = crypt.encryptFilename(name)
finfo = { finfo = {
'name': name, 'name': name,
'inode': s.st_ino, 'inode': s.st_ino,
skipping to change at line 907 skipping to change at line 983
cks = addMeta(str(acl)) cks = addMeta(str(acl))
finfo['acl'] = cks finfo['acl'] = cks
except: except:
logger.warning("Could not read ACL's from %s. Ignoring", pathn ame.encode('utf8', 'backslashreplace').decode('utf8')) logger.warning("Could not read ACL's from %s. Ignoring", pathn ame.encode('utf8', 'backslashreplace').decode('utf8'))
# Insert into the inode DB # Insert into the inode DB
inode = (s.st_ino, s.st_dev) inode = (s.st_ino, s.st_dev)
if args.loginodes: if args.loginodes:
args.loginodes.write(f"Add {str(inode)} {pathname}\n".encode('utf8') ) args.loginodes.write(f"Add {str(inode)} {pathname}\n".encode('utf8') )
inodeDB[inode] = (finfo, pathname) inodeDB.insert(inode, finfo, pathname)
else: else:
if verbosity: if verbosity:
logger.info("Skipping special file: %s", pathname) logger.info("Skipping special file: %s", pathname)
finfo = None finfo = None
return finfo return finfo
def getDirContents(dir, dirstat, excludes=[]): def getDirContents(dirname, dirstat, excludes=set()):
""" Read a directory, load any new exclusions, delete the excluded files, an d return a list """ Read a directory, load any new exclusions, delete the excluded files, an d return a list
of the files, a list of sub directories, and the new list of excluded pa tterns """ of the files, a list of sub directories, and the new list of excluded pa tterns """
#logger.debug("Processing directory : %s", dir) #logger.debug("Processing directory : %s", dir)
Util.accumulateStat(stats, 'dirs') Util.accumulateStat(stats, 'dirs')
device = dirstat.st_dev device = dirstat.st_dev
# Process an exclude file which will be passed on down to the receivers # Process an exclude file which will be passed on down to the receivers
newExcludes = loadExcludeFile(os.path.join(dir, excludeFile)) newExcludes = loadExcludeFile(os.path.join(dirname, excludeFile))
newExcludes.extend(excludes) newExcludes = newExcludes.union(excludes)
excludes = newExcludes excludes = newExcludes
# Add a list of local files to exclude. These won't get passed to lower dir ectories # Add a list of local files to exclude. These won't get passed to lower dir ectories
localExcludes = list(excludes) localExcludes = excludes.union(loadExcludeFile(os.path.join(dirname, args.lo
localExcludes.extend(loadExcludeFile(os.path.join(dir, args.localexcludefile calexcludefile)))
)))
files = [] files = []
subdirs = [] subdirs = []
try: try:
for f in filelist(dir, localExcludes): for f in filelist(dirname, localExcludes):
try: try:
fInfo = mkFileInfo(dir, f) fInfo = mkFileInfo(f)
if fInfo and (args.crossdev or device == fInfo['dev']): if fInfo and (args.crossdev or device == fInfo['dev']):
mode = fInfo["mode"] mode = fInfo["mode"]
if stat.S_ISLNK(mode): if stat.S_ISLNK(mode):
Util.accumulateStat(stats, 'links') Util.accumulateStat(stats, 'links')
elif stat.S_ISREG(mode): elif stat.S_ISREG(mode):
Util.accumulateStat(stats, 'files') Util.accumulateStat(stats, 'files')
Util.accumulateStat(stats, 'backed', fInfo['size']) Util.accumulateStat(stats, 'backed', fInfo['size'])
if stat.S_ISDIR(mode): if stat.S_ISDIR(mode):
sub = os.path.join(dir, f) sub = os.path.join(dirname, f)
if sub in excludeDirs: if sub in excludeDirs:
logger.debug("%s excluded. Skipping", sub) logger.debug("%s excluded. Skipping", sub)
continue continue
else: else:
subdirs.append(sub) subdirs.append(sub)
files.append(fInfo) files.append(fInfo)
except (IOError, OSError) as e: except (IOError, OSError) as e:
logger.error("Error processing %s: %s", os.path.join(dir, f), st r(e)) logger.error("Error processing %s: %s", os.path.join(dirname, f) , str(e))
except Exception as e: except Exception as e:
## Is this necessary? Fold into above? ## Is this necessary? Fold into above?
logger.error("Error processing %s: %s", os.path.join(dir, f), st r(e)) logger.error("Error processing %s: %s", os.path.join(dirname, f) , str(e))
exceptionLogger.log(e) exceptionLogger.log(e)
except (IOError, OSError) as e: except (IOError, OSError) as e:
logger.error("Error reading directory %s: %s" ,dir, str(e)) logger.error("Error reading directory %s: %s" ,dir, str(e))
return (files, subdirs, excludes) return (files, subdirs, excludes)
def handleAckClone(message): def handleAckClone(message):
checkMessage(message, 'ACKCLN') checkMessage(message, 'ACKCLN')
if verbosity > 2: if verbosity > 2:
logger.debug("Processing ACKCLN: Up-to-date: %d New Content: %d", len(me ssage['done']), len(message['content'])) logger.debug("Processing ACKCLN: Up-to-date: %d New Content: %d", len(me ssage['done']), len(message['content']))
skipping to change at line 983 skipping to change at line 1058
content = message.setdefault('content', {}) content = message.setdefault('content', {})
done = message.setdefault('done', {}) done = message.setdefault('done', {})
# Purge out what hasn't changed # Purge out what hasn't changed
for i in done: for i in done:
inode = tuple(i) inode = tuple(i)
if inode in cloneContents: if inode in cloneContents:
(path, files) = cloneContents[inode] (path, files) = cloneContents[inode]
for f in files: for f in files:
key = (f['inode'], f['dev']) key = (f['inode'], f['dev'])
delInode(key) inodeDB.delete(key)
del cloneContents[inode] del cloneContents[inode]
else: else:
logger.error("Unable to locate info for %s", inode) logger.error("Unable to locate info for %s", inode)
# And the directory. # And the directory.
delInode(inode) inodeDB.delete(inode)
# Process the directories that have changed # Process the directories that have changed
for i in content: for i in content:
finfo = tuple(i) finfo = tuple(i)
if finfo in cloneContents: if finfo in cloneContents:
(path, files) = cloneContents[finfo] (path, files) = cloneContents[finfo]
if logdirs: if logdirs:
logger.log(logging.DIRS, "[R]: %s", Util.shortPath(path)) logger.log(logging.DIRS, "[R]: %s", Util.shortPath(path))
sendDirChunks(path, finfo, files) sendDirChunks(path, finfo, files)
del cloneContents[finfo] del cloneContents[finfo]
skipping to change at line 1126 skipping to change at line 1201
global newmeta global newmeta
message = { message = {
'message': 'META', 'message': 'META',
'metadata': newmeta 'metadata': newmeta
} }
newmeta = [] newmeta = []
return message return message
statusBar = None statusBar = None
def initProgressBar(): def initProgressBar(scheduler):
statusBar = ShortPathStatusBar("{__elapsed__} | Dirs: {dirs} | Files: {files statusBar = ShortPathStatusBar("{__elapsed__} | Dirs: {dirs} | Files: {files
} | Full: {new} | Delta: {delta} | Data: {dataSent!B} | {mode} ", stats) } | Full: {new} | Delta: {delta} | Data: {dataSent!B} | {mode} ", stats, schedul
er=scheduler)
statusBar.setValue('mode', '') statusBar.setValue('mode', '')
statusBar.setTrailer('') statusBar.setTrailer('')
statusBar.start() statusBar.start()
return statusBar return statusBar
def setProgress(mode, name): def setProgress(mode, name):
if statusBar: if statusBar:
statusBar.setValue('mode', mode) statusBar.setValue('mode', mode)
statusBar.setTrailer(name) statusBar.setTrailer(name)
skipping to change at line 1174 skipping to change at line 1249
return return
if args.skipcaches and os.path.lexists(os.path.join(dir, 'CACHEDIR.TAG') ): if args.skipcaches and os.path.lexists(os.path.join(dir, 'CACHEDIR.TAG') ):
logger.debug("CACHEDIR.TAG file found. Analyzing") logger.debug("CACHEDIR.TAG file found. Analyzing")
try: try:
with open(os.path.join(dir, 'CACHEDIR.TAG'), 'r') as f: with open(os.path.join(dir, 'CACHEDIR.TAG'), 'r') as f:
line = f.readline() line = f.readline()
if line.startswith('Signature: 8a477f597d28d172789f06886806b c55'): if line.startswith('Signature: 8a477f597d28d172789f06886806b c55'):
logger.debug("Valid CACHEDIR.TAG file found. Skipping % s", dir) logger.debug("Valid CACHEDIR.TAG file found. Skipping % s", dir)
return return
except: except Exception as e:
logger.warning("Could not read %s. Backing up directory %s", os .path.join(dir, 'CACHEDIR.TAG'), dir) logger.warning("Could not read %s. Backing up directory %s", os .path.join(dir, 'CACHEDIR.TAG'), dir)
exceptionLogger.log(e)
(files, subdirs, subexcludes) = getDirContents(dir, s, excludes) (files, subdirs, subexcludes) = getDirContents(dir, s, excludes)
h = Util.hashDir(crypt, files) h = Util.hashDir(crypt, files)
#logger.debug("Dir: %s (%d, %d): Hash: %s Size: %d.", Util.shortPath(dir ), s.st_ino, s.st_dev, h[0], h[1]) #logger.debug("Dir: %s (%d, %d): Hash: %s Size: %d.", Util.shortPath(dir ), s.st_ino, s.st_dev, h[0], h[1])
dirHashes[(s.st_ino, s.st_dev)] = h dirHashes[(s.st_ino, s.st_dev)] = h
# Figure out which files to clone, and which to update # Figure out which files to clone, and which to update
if files and args.clones: if files and args.clones:
if len(files) > args.clonethreshold: if len(files) > args.clonethreshold:
skipping to change at line 1302 skipping to change at line 1378
purgeTime = args.purgehours * 3600 purgeTime = args.purgehours * 3600
if args.purgetime: if args.purgetime:
cal = parsedatetime.Calendar() cal = parsedatetime.Calendar()
(then, success) = cal.parse(args.purgetime) (then, success) = cal.parse(args.purgetime)
if success: if success:
purgeTime = time.mktime(then) purgeTime = time.mktime(then)
else: else:
#logger.error("Could not parse --keep-time argument: %s", args.p urgetime) #logger.error("Could not parse --keep-time argument: %s", args.p urgetime)
raise Exception("Could not parse --keep-time argument: {} ".form at(args.purgetime)) raise Exception("Could not parse --keep-time argument: {} ".form at(args.purgetime))
@functools.lru_cache(maxsize=128)
def mkExcludePattern(pattern): def mkExcludePattern(pattern):
logger.debug("Excluding {}", pattern) logger.debug("Excluding {}", pattern)
if not pattern.startswith('/'): if not pattern.startswith('/'):
pattern = '/**/' + pattern pattern = '/**/' + pattern
return wildmatch.translate(pattern) return wildmatch.translate(pattern)
def loadExcludeFile(name): def loadExcludeFile(name):
""" Load a list of patterns to exclude from a file. """ """ Load a list of patterns to exclude from a file. """
try: try:
with open(name) as f: with open(name) as f:
excludes = [mkExcludePattern(x.rstrip('\n')) for x in f.readlines()] excludes = [mkExcludePattern(x.rstrip('\n')) for x in f.readlines()]
return list(excludes) return set(excludes)
except IOError as e: except IOError as e:
#traceback.print_exc() #traceback.print_exc()
return [] return set()
# Load all the excludes we might want # Load all the excludes we might want
def loadExcludes(args): def loadExcludes(args):
global excludeFile global excludeFile, globalExcludes
if not args.ignoreglobalexcludes: if not args.ignoreglobalexcludes:
globalExcludes.extend(loadExcludeFile(globalExcludeFile)) globalExcludes = globalExcludes.union(loadExcludeFile(globalExcludeFile) )
if args.cvs: if args.cvs:
globalExcludes.extend(map(mkExcludePattern, cvsExcludes)) globalExcludes = globalExcludes.union(map(mkExcludePattern, cvsExcludes) )
if args.excludes: if args.excludes:
globalExcludes.extend(map(mkExcludePattern, args.excludes)) globalExcludes = globalExcludes.union(map(mkExcludePattern, args.exclude s))
if args.excludefiles: if args.excludefiles:
for f in args.excludefiles: for f in args.excludefiles:
globalExcludes.extend(loadExcludeFile(f)) globalExcludes = globalExcludes.union(loadExcludeFile(f))
excludeFile = args.excludefilename excludeFile = args.excludefilename
def loadExcludedDirs(args): def loadExcludedDirs(args):
global excludeDirs global excludeDirs
if args.excludedirs is not None: if args.excludedirs is not None:
excludeDirs.extend(list(map(Util.fullPath, args.excludedirs))) excludeDirs.extend(list(map(Util.fullPath, args.excludedirs)))
def sendMessage(message): def sendMessage(message):
if verbosity > 4: if verbosity > 4:
logger.debug("Send: %s", str(message)) logger.debug("Send: %s", str(message))
skipping to change at line 1434 skipping to change at line 1511
handleResponse(ack, doPush=False, pause=0) handleResponse(ack, doPush=False, pause=0)
currentBatch = None currentBatch = None
else: else:
logger.error("Unexpected response: %s", msgtype) logger.error("Unexpected response: %s", msgtype)
if doPush: if doPush:
pushFiles() pushFiles()
except Exception as e: except Exception as e:
logger.error("Error handling response %s %s: %s", response.get('msgid'), response.get('message'), e) logger.error("Error handling response %s %s: %s", response.get('msgid'), response.get('message'), e)
logger.exception("Exception: ", exc_info=e) logger.exception("Exception: ", exc_info=e)
logger.error(pprint.pformat(response, width=5000, depth=4)) logger.error(pprint.pformat(response, width=150, depth=5, compact=True))
exceptionLogger.log(e) exceptionLogger.log(e)
_nextMsgId = 0 _nextMsgId = 0
def setMessageID(message): def setMessageID(message):
global _nextMsgId global _nextMsgId
#message['sessionid'] = str(sessionid) #message['sessionid'] = str(sessionid)
message['msgid'] = _nextMsgId message['msgid'] = _nextMsgId
_nextMsgId += 1 _nextMsgId += 1
return message['msgid'] return message['msgid']
skipping to change at line 1495 skipping to change at line 1572
# #
# and send it. # and send it.
batchMessage(message) batchMessage(message)
def splitDirs(x): def splitDirs(x):
root, rest = os.path.split(x) root, rest = os.path.split(x)
if root and rest: if root and rest:
ret = splitDirs(root) ret = splitDirs(root)
ret.append(rest) ret.append(rest)
elif root: elif root:
if root is '/': if root == '/':
ret = [root] ret = [root]
else: else:
ret = splitDirs(root) ret = splitDirs(root)
else: else:
ret = [rest] ret = [rest]
return ret return ret
def createPrefixPath(root, path): def createPrefixPath(root, path):
""" Create common path directories. Will be empty, except for path elements to the repested directories. """ """ Create common path directories. Will be empty, except for path elements to the repested directories. """
rPath = os.path.relpath(path, root) rPath = os.path.relpath(path, root)
logger.debug("Making prefix path for: %s as %s", path, rPath) logger.debug("Making prefix path for: %s as %s", path, rPath)
pathDirs = splitDirs(rPath) pathDirs = splitDirs(rPath)
parent = 0 parent = 0
parentDev = 0 parentDev = 0
current = root current = root
for d in pathDirs: for d in pathDirs:
dirPath = os.path.join(current, d) dirPath = os.path.join(current, d)
st = os.lstat(dirPath) st = os.lstat(dirPath)
f = mkFileInfo(current, d) f = mkFileInfo(FakeDirEntry(current, d))
if crypt.encrypting(): if crypt.encrypting():
f['name'] = crypt.encryptFilename(f['name']) f['name'] = crypt.encryptFilename(f['name'])
if dirPath not in processedDirs: if dirPath not in processedDirs:
logger.debug("Sending dir entry for: %s", dirPath) logger.debug("Sending dir entry for: %s", dirPath)
sendDirEntry(parent, parentDev, [f]) sendDirEntry(parent, parentDev, [f])
processedDirs.add(dirPath) processedDirs.add(dirPath)
parent = st.st_ino parent = st.st_ino
parentDev = st.st_dev parentDev = st.st_dev
current = dirPath current = dirPath
def runServer(cmd, tempfile):
server_cmd = shlex.split(cmd) + ['--single', '--local', tempfile]
logger.debug("Invoking server: " + str(server_cmd))
subp = subprocess.Popen(server_cmd)
# Wait until the subprocess has created the domain socket.
# There's got to be a better way to do this. Oy.
for _ in range(0, 20):
if os.path.exists(tempfile):
return subp
if subp.poll():
raise Exception("Subprocess died: %d" % (subp.returncode))
time.sleep(0.5)
logger.error("Unable to locate socket %s from process %d. Killing subproces
s", tempfile, subp.pid)
subp.terminate()
return None
def setCrypto(confirm, chkStrength=False, version=None): def setCrypto(confirm, chkStrength=False, version=None):
global srpUsr, crypt global srpUsr, crypt
password = Util.getPassword(args.password, args.passwordfile, args.passwordp rog, "Password for %s:" % (args.client), password = Util.getPassword(args.password, args.passwordfile, args.passwordp rog, "Password for %s:" % (args.client),
confirm=confirm, strength=chkStrength, allowNone = False) confirm=confirm, strength=chkStrength, allowNone = False)
srpUsr = srp.User(args.client, password) srpUsr = srp.User(args.client, password)
crypt = TardisCrypto.getCrypto(version, password, args.client) crypt = TardisCrypto.getCrypto(version, password, args.client)
logger.debug("Using %s Crypto scheme", crypt.getCryptoScheme()) logger.debug("Using %s Crypto scheme", crypt.getCryptoScheme())
return password return password
def doSendKeys(password): def doSendKeys(password):
skipping to change at line 1687 skipping to change at line 1747
if args.keys: if args.keys:
(f, c) = Util.loadKeys(args.keys, clientId) (f, c) = Util.loadKeys(args.keys, clientId)
else: else:
f = filenameKey f = filenameKey
c = contentKey c = contentKey
if not (f and c): if not (f and c):
logger.critical("Unable to load keyfile: %s", args.keys) logger.critical("Unable to load keyfile: %s", args.keys)
sys.exit(1) sys.exit(1)
crypt.setKeys(f, c) crypt.setKeys(f, c)
def getConnection(server, port): def getConnection(server, port, maxBandwidth=None):
#if args.protocol == 'json': #if args.protocol == 'json':
# conn = Connection.JsonConnection(server, port, name, priority, client, autoname=auto, token=token, force=args.force, timeout=args.timeout, full=args.fu ll) # conn = Connection.JsonConnection(server, port, name, priority, client, autoname=auto, token=token, force=args.force, timeout=args.timeout, full=args.fu ll)
# setEncoder("base64") # setEncoder("base64")
#elif args.protocol == 'bson': #elif args.protocol == 'bson':
# conn = Connection.BsonConnection(server, port, name, priority, client, autoname=auto, token=token, compress=args.compressmsgs, force=args.force, timeou t=args.timeout, full=args.full) # conn = Connection.BsonConnection(server, port, name, priority, client, autoname=auto, token=token, compress=args.compressmsgs, force=args.force, timeou t=args.timeout, full=args.full)
# setEncoder("bin") # setEncoder("bin")
#elif args.protocol == 'msgp': #elif args.protocol == 'msgp':
throttler = None
#if maxBandwidth:
# throttler = Throttler(maxBandwidth, blocking=True)
conn = Connection.MsgPackConnection(server, port, compress=args.compressmsgs , timeout=args.timeout) conn = Connection.MsgPackConnection(server, port, compress=args.compressmsgs , timeout=args.timeout)
setEncoder("bin") setEncoder("bin")
return conn return conn
def splitList(line): def splitList(line):
if not line: if not line:
return [] return []
else: else:
return shlex.split(line.strip()) return shlex.split(line.strip())
skipping to change at line 1727 skipping to change at line 1790
def _d(help): def _d(help):
""" Only print the help message if --debug is specified """ """ Only print the help message if --debug is specified """
return help if args.debug else argparse.SUPPRESS return help if args.debug else argparse.SUPPRESS
_def = 'Default: %(default)s' _def = 'Default: %(default)s'
# Use the custom arg parser, which handles argument files more cleanly # Use the custom arg parser, which handles argument files more cleanly
parser = CustomArgumentParser(description='Tardis Backup Client', fromfile_p refix_chars='@', formatter_class=Util.HelpFormatter, add_help=False, parser = CustomArgumentParser(description='Tardis Backup Client', fromfile_p refix_chars='@', formatter_class=Util.HelpFormatter, add_help=False,
epilog='Options can be specified in files, wit h the filename specified by an @sign: e.g. "%(prog)s @args.txt" will read argume nts from args.txt') epilog='Options can be specified in files, wit h the filename specified by an @sign: e.g. "%(prog)s @args.txt" will read argume nts from args.txt')
parser.add_argument('--config', dest='config', default=None, parser.add_argument('--config', dest='config', default=Defau
help='Location of the configuration file lts.getDefault('TARDIS_CONFIG'), help='Location of the configuration file
. ' + _def) . ' + _def)
parser.add_argument('--job', dest='job', default='Tardis' parser.add_argument('--job', dest='job', default=Defaults
, help='Job Name within the configuration .getDefault('TARDIS_JOB'), help='Job Name within the configuration
file. ' + _def) file. ' + _def)
parser.add_argument('--debug', dest='debug', default=False, action='store_true', help=argparse.SUPPRESS) parser.add_argument('--debug', dest='debug', default=False, action='store_true', help=argparse.SUPPRESS)
(args, remaining) = parser.parse_known_args() (args, remaining) = parser.parse_known_args()
t = args.job t = args.job
c = configparser.ConfigParser(configDefaults, allow_no_value=True) c = configparser.RawConfigParser(configDefaults, allow_no_value=True)
if args.config: if args.config:
c.read(args.config) c.read(args.config)
if not c.has_section(t): if not c.has_section(t):
sys.stderr.write("WARNING: No Job named %s listed. Using defaults. Jobs available: %s\n" %(t, str(c.sections()).strip('[]'))) sys.stderr.write("WARNING: No Job named %s listed. Using defaults. Jobs available: %s\n" %(t, str(c.sections()).strip('[]')))
c.add_section(t) # Make it safe for reading other values from. c.add_section(t) # Make it safe for reading other values from.
checkConfig(c, t) checkConfig(c, t)
else: else:
c.add_section(t) # Make it safe for reading other values from. c.add_section(t) # Make it safe for reading other values from.
parser.add_argument('--server', '-s', dest='server', default=c.get locgroup = parser.add_argument_group("Local Backup options")
(t, 'Server'), help='Set the destination server. ' + _d locgroup.add_argument('--database', '-D', dest='database', defaul
ef) t=c.get(t, 'BaseDir'), help='Dabatase directory (Default: %(default)s)')
parser.add_argument('--port', '-p', dest='port', type=int, defau locgroup.add_argument('--dbdir', dest='dbdir', defaul
lt=c.getint(t, 'Port'), help='Set the destination server port. ' t=c.get(t, 'DBDir'), help='Location of database files (if different from datab
+ _def) ase directory above) (Default: %(default)s)')
locgroup.add_argument('--dbname', '-N', dest='dbname', defaul
t=c.get(t, 'DBName'), help='Use the database name (Default: %(default)s)')
locgroup.add_argument('--schema', dest='schema', defaul
t=c.get(t, 'Schema'), help='Path to the schema to use (Default: %(default)s)')
remotegroup = parser.add_argument_group("Remote Server options")
remotegroup.add_argument('--server', '-s', dest='server', default=
c.get(t, 'Server'), help='Set the destination server. '
+ _def)
remotegroup.add_argument('--port', '-p', dest='port', type=int,
default=c.getint(t, 'Port'), help='Set the destination server po
rt. ' + _def)
modegroup = parser.add_mutually_exclusive_group()
modegroup.add_argument('--local', dest='local', action='store_
true', default=c.get(t, 'Local'), help='Run as a local job')
modegroup.add_argument('--remote', dest='local', action='store_
false', default=c.get(t, 'Local'), help='Run against a remote server')
parser.add_argument('--log', '-l', dest='logfiles', action='app end', default=splitList(c.get(t, 'LogFiles')), nargs="?", const=sys.stderr, parser.add_argument('--log', '-l', dest='logfiles', action='app end', default=splitList(c.get(t, 'LogFiles')), nargs="?", const=sys.stderr,
help='Send logging output to specified file. Can be rep eated for multiple logs. Default: stderr') help='Send logging output to specified file. Can be rep eated for multiple logs. Default: stderr')
parser.add_argument('--client', '-C', dest='client', default=c.get (t, 'Client'), help='Set the client name. ' + _def) parser.add_argument('--client', '-C', dest='client', default=c.get (t, 'Client'), help='Set the client name. ' + _def)
parser.add_argument('--force', dest='force', action=Util.St oreBoolean, default=c.getboolean(t, 'Force'), parser.add_argument('--force', dest='force', action=Util.St oreBoolean, default=c.getboolean(t, 'Force'),
help='Force the backup to take place, even if others are currently running. ' + _def) help='Force the backup to take place, even if others are currently running. ' + _def)
parser.add_argument('--full', dest='full', action=Util.Sto reBoolean, default=c.getboolean(t, 'Full'), parser.add_argument('--full', dest='full', action=Util.Sto reBoolean, default=c.getboolean(t, 'Full'),
help='Perform a full backup, with no delta information. ' + _def) help='Perform a full backup, with no delta information. ' + _def)
parser.add_argument('--name', '-n', dest='name', default=None, help='Set the backup name. Leave blank to assign name automatically') parser.add_argument('--name', '-n', dest='name', default=None, help='Set the backup name. Leave blank to assign name automatically')
parser.add_argument('--create', dest='create', default=False , action=Util.StoreBoolean, help='Create a new client.') parser.add_argument('--create', dest='create', default=False , action=Util.StoreBoolean, help='Create a new client.')
skipping to change at line 1787 skipping to change at line 1862
parser.add_argument('--compress-min', dest='mincompsize', type=int , default=c.getint(t, 'CompressMin'), help='Minimum size to compress. ' + _de f) parser.add_argument('--compress-min', dest='mincompsize', type=int , default=c.getint(t, 'CompressMin'), help='Minimum size to compress. ' + _de f)
parser.add_argument('--nocompress-types', dest='nocompressfile', defau lt=splitList(c.get(t, 'NoCompressFile')), action='append', parser.add_argument('--nocompress-types', dest='nocompressfile', defau lt=splitList(c.get(t, 'NoCompressFile')), action='append',
help='File containing a list of MIME types to not compre ss. ' + _def) help='File containing a list of MIME types to not compre ss. ' + _def)
parser.add_argument('--nocompress', '-z', dest='nocompress', default=s plitList(c.get(t, 'NoCompress')), action='append', parser.add_argument('--nocompress', '-z', dest='nocompress', default=s plitList(c.get(t, 'NoCompress')), action='append',
help='MIME type to not compress. Can be repeated') help='MIME type to not compress. Can be repeated')
if support_xattr: if support_xattr:
parser.add_argument('--xattr', dest='xattr', default=True, action=Util.StoreBoolean, help='Backup file extended attributes') parser.add_argument('--xattr', dest='xattr', default=True, action=Util.StoreBoolean, help='Backup file extended attributes')
if support_acl: if support_acl:
parser.add_argument('--acl', dest='acl', default=True, ac tion=Util.StoreBoolean, help='Backup file access control lists') parser.add_argument('--acl', dest='acl', default=True, ac tion=Util.StoreBoolean, help='Backup file access control lists')
locgrp = parser.add_argument_group("Arguments for running server locally und
er tardis")
locgrp.add_argument('--local', dest='local', action=Util.StoreB
oolean, default=c.getboolean(t, 'Local'),
help='Run server as a local client')
locgrp.add_argument('--local-server-cmd', dest='serverprog', default=c.get
(t, 'LocalServerCmd'), help='Local server program to run. ' +
_def)
parser.add_argument('--priority', dest='priority', type=int, defau lt=None, help='Set the priority of this backup') parser.add_argument('--priority', dest='priority', type=int, defau lt=None, help='Set the priority of this backup')
parser.add_argument('--maxdepth', '-d', dest='maxdepth', type=int, defau lt=0, help='Maximum depth to search') parser.add_argument('--maxdepth', '-d', dest='maxdepth', type=int, defau lt=0, help='Maximum depth to search')
parser.add_argument('--crossdevice', dest='crossdev', action=Util.Sto reBoolean, default=False, help='Cross devices. ' + _def) parser.add_argument('--crossdevice', dest='crossdev', action=Util.Sto reBoolean, default=False, help='Cross devices. ' + _def)
parser.add_argument('--basepath', dest='basepath', default='full', choices=['none', 'common', 'full'], help='Select style of root path handling ' + _def) parser.add_argument('--basepath', dest='basepath', default='full', choices=['none', 'common', 'full'], help='Select style of root path handling ' + _def)
excgrp = parser.add_argument_group('Exclusion options', 'Options for handlin g exclusions') excgrp = parser.add_argument_group('Exclusion options', 'Options for handlin g exclusions')
excgrp.add_argument('--cvs-ignore', dest='cvs', default=c.ge tboolean(t, 'IgnoreCVS'), action=Util.StoreBoolean, excgrp.add_argument('--cvs-ignore', dest='cvs', default=c.ge tboolean(t, 'IgnoreCVS'), action=Util.StoreBoolean,
help='Ignore files like CVS. ' + _def) help='Ignore files like CVS. ' + _def)
excgrp.add_argument('--skip-caches', dest='skipcaches', defau lt=c.getboolean(t, 'SkipCaches'),action=Util.StoreBoolean, excgrp.add_argument('--skip-caches', dest='skipcaches', defau lt=c.getboolean(t, 'SkipCaches'),action=Util.StoreBoolean,
skipping to change at line 1855 skipping to change at line 1925
purgegroup.add_argument('--purge', dest='purge', action=Util.St oreBoolean, default=c.getboolean(t, 'Purge'), help='Purge old backup sets when backup complete. ' + _def) purgegroup.add_argument('--purge', dest='purge', action=Util.St oreBoolean, default=c.getboolean(t, 'Purge'), help='Purge old backup sets when backup complete. ' + _def)
purgegroup.add_argument('--purge-priority', dest='purgeprior', type=int, default=None, help='Delete below this priority (Default: Backup pr iority)') purgegroup.add_argument('--purge-priority', dest='purgeprior', type=int, default=None, help='Delete below this priority (Default: Backup pr iority)')
prggroup = purgegroup.add_mutually_exclusive_group() prggroup = purgegroup.add_mutually_exclusive_group()
prggroup.add_argument('--keep-days', dest='purgedays', type=int, defa ult=None, help='Number of days to keep') prggroup.add_argument('--keep-days', dest='purgedays', type=int, defa ult=None, help='Number of days to keep')
prggroup.add_argument('--keep-hours', dest='purgehours', type=int, def ault=None, help='Number of hours to keep') prggroup.add_argument('--keep-hours', dest='purgehours', type=int, def ault=None, help='Number of hours to keep')
prggroup.add_argument('--keep-time', dest='purgetime', default=None, help='Purge before this time. Format: YYYY/MM/DD:hh:mm') prggroup.add_argument('--keep-time', dest='purgetime', default=None, help='Purge before this time. Format: YYYY/MM/DD:hh:mm')
parser.add_argument('--stats', action=Util.StoreBoolean, dest=' stats', default=c.getboolean(t, 'Stats'), parser.add_argument('--stats', action=Util.StoreBoolean, dest=' stats', default=c.getboolean(t, 'Stats'),
help='Print stats about the transfer. Default=%(default )s') help='Print stats about the transfer. Default=%(default )s')
parser.add_argument('--report', dest='report', choices=['all', ' parser.add_argument('--report', dest='report', choices=['all', '
dirs', 'none'], const='all', default='none', nargs='?', dirs', 'none'], const='all', default=c.get(t, 'Report'), nargs='?',
help='Print a report on all files or directories transfe help='Print a report on all files or directories transfe
rred. Default=%(default)s') rred. ' + _def)
parser.add_argument('--verbose', '-v', dest='verbose', action='count', default=c.getint(t, 'Verbosity'), parser.add_argument('--verbose', '-v', dest='verbose', action='count', default=c.getint(t, 'Verbosity'),
help='Increase the verbosity') help='Increase the verbosity')
parser.add_argument('--progress', dest='progress', action='store_t rue', help='Show a one-line progress bar.') parser.add_argument('--progress', dest='progress', action='store_t rue', help='Show a one-line progress bar.')
parser.add_argument('--exclusive', dest='exclusive', action=Util.St oreBoolean, default=True, help='Make sure the client only runs one job at a time . ' + _def) parser.add_argument('--exclusive', dest='exclusive', action=Util.St oreBoolean, default=True, help='Make sure the client only runs one job at a time . ' + _def)
parser.add_argument('--exceptions', dest='exceptions', default=False , action=Util.StoreBoolean, help='Log full exception details') parser.add_argument('--exceptions', dest='exceptions', default=False , action=Util.StoreBoolean, help='Log full exception details')
parser.add_argument('--logtime', dest='logtime', default=False, a ction=Util.StoreBoolean, help='Log time') parser.add_argument('--logtime', dest='logtime', default=False, a ction=Util.StoreBoolean, help='Log time')
parser.add_argument('--logcolor', dest='logcolor', default=True, a ction=Util.StoreBoolean, help='Generate colored logs') parser.add_argument('--logcolor', dest='logcolor', default=True, a ction=Util.StoreBoolean, help='Generate colored logs')
parser.add_argument('--version', action='version', version='%(pro g)s ' + Tardis.__versionstring__, help='Show the version') parser.add_argument('--version', action='version', version='%(pro g)s ' + Tardis.__versionstring__, help='Show the version')
parser.add_argument('--help', '-h', action='help') parser.add_argument('--help', '-h', action='help')
Util.addGenCompletions(parser) Util.addGenCompletions(parser)
parser.add_argument('directories', nargs='*', default=splitList(c.g et(t, 'Directories')), help="List of directories to sync") parser.add_argument('directories', nargs='*', default=splitList(c.g et(t, 'Directories')), help="List of directories to sync")
args = parser.parse_args(remaining) args = parser.parse_args(remaining)
return (args, c) return (args, c, t)
def parseServerInfo(args): def parseServerInfo(args):
""" Break up the server info passed in into useable chunks """ """ Break up the server info passed in into useable chunks """
if args.local: serverStr = args.server
sServer = 'localhost' #logger.debug("Got server string: %s", serverStr)
sPort = 'local' if not serverStr.startswith('tardis://'):
sClient = args.client serverStr = 'tardis://' + serverStr
else: try:
serverStr = args.server info = urllib.parse.urlparse(serverStr)
#logger.debug("Got server string: %s", serverStr) if info.scheme != 'tardis':
if not serverStr.startswith('tardis://'): raise Exception("Invalid URL scheme: {}".format(info.scheme))
serverStr = 'tardis://' + serverStr
try: sServer = info.hostname
info = urllib.parse.urlparse(serverStr) sPort = info.port
if info.scheme != 'tardis': sClient = info.path.lstrip('/')
raise Exception("Invalid URL scheme: {}".format(info.scheme))
sServer = info.hostname
sPort = info.port
sClient = info.path.lstrip('/')
except Exception as e: except Exception as e:
raise Exception("Invalid URL: {} -- {}".format(args.server, e.messag raise Exception("Invalid URL: {} -- {}".format(args.server, e.message))
e))
server = sServer or args.server server = sServer or args.server
port = sPort or args.port port = sPort or args.port
client = sClient or args.client client = sClient or args.client
return (server, port, client) return (server, port, client)
def setupLogging(logfiles, verbosity, logExceptions): def setupLogging(logfiles, verbosity, logExceptions):
global logger, exceptionLogger global logger, exceptionLogger
skipping to change at line 2004 skipping to change at line 2069
logger.log(logging.STATS, "Data Sent: Sent: {:} Backed: {:}".format (Util.fmtSize(stats['dataSent']), Util.fmtSize(stats['dataBacked']))) logger.log(logging.STATS, "Data Sent: Sent: {:} Backed: {:}".format (Util.fmtSize(stats['dataSent']), Util.fmtSize(stats['dataBacked'])))
logger.log(logging.STATS, "Messages: Sent: {:,} ({:}) Received: {:,} ({:})".format(connstats['messagesSent'], Util.fmtSize(connstats['bytesSent']), connstats['messagesRecvd'], Util.fmtSize(connstats['bytesRecvd']))) logger.log(logging.STATS, "Messages: Sent: {:,} ({:}) Received: {:,} ({:})".format(connstats['messagesSent'], Util.fmtSize(connstats['bytesSent']), connstats['messagesRecvd'], Util.fmtSize(connstats['bytesRecvd'])))
logger.log(logging.STATS, "Data Sent: {:}".format(Util.fmtSize(stats[ 'dataSent']))) logger.log(logging.STATS, "Data Sent: {:}".format(Util.fmtSize(stats[ 'dataSent'])))
if (stats['denied'] or stats['gone']): if (stats['denied'] or stats['gone']):
logger.log(logging.STATS, "Files Not Sent: Disappeared: {:,} Permissi on Denied: {:,}".format(stats['gone'], stats['denied'])) logger.log(logging.STATS, "Files Not Sent: Disappeared: {:,} Permissi on Denied: {:,}".format(stats['gone'], stats['denied']))
logger.log(logging.STATS, "Wait Times: {:}".format(str(datetime.timedelta( 0, waittime)))) logger.log(logging.STATS, "Wait Times: {:}".format(str(datetime.timedelta( 0, waittime))))
logger.log(logging.STATS, "Sending Time: {:}".format(str(datetime.timedelta( 0, Util._transmissionTime)))) logger.log(logging.STATS, "Sending Time: {:}".format(str(datetime.timedelta( 0, Util._transmissionTime))))
def pickMode():
if args.local != '' and args.local is not None:
if args.local is True or args.local == 'True':
if args.server is None:
raise Exception("Remote mode specied without a server")
return True
elif args.local is False or args.local == 'False':
if args.database is None:
raise Exception("Local mode specied without a database")
return False
else:
if args.server is not None and args.database is not None:
raise Exception("Both database and server specified. Unable to dete
rmine mode. Use --local/--remote switches")
if args.server is not None:
return False
elif args.database is not None:
return True
else:
raise Exception("Neither database nor remote server is set. Unable
to backup")
def printReport(repFormat): def printReport(repFormat):
lastDir = None lastDir = None
length = 0 length = 0
numFiles = 0 numFiles = 0
deltas = 0 deltas = 0
dataSize = 0 dataSize = 0
logger.log(logging.STATS, "") logger.log(logging.STATS, "")
if report: if report:
length = reduce(max, list(map(len, [x[1] for x in report]))) length = reduce(max, list(map(len, [x[1] for x in report])))
length = max(length, 50) length = max(length, 50)
filefmts = ['','KB','MB','GB', 'TB', 'PB'] filefmts = ['','KB','MB','GB', 'TB', 'PB']
dirfmts = ['B','KB','MB','GB', 'TB', 'PB'] dirfmts = ['B','KB','MB','GB', 'TB', 'PB']
fmt = '%-{}s %-6s %-10s %-10s'.format(length + 4) fmt = '%-{}s %-6s %-10s %-10s'.format(length + 4)
fmt2 = ' %-{}s %-6s %-10s %-10s'.format(length) fmt2 = ' %-{}s %-6s %-10s %-10s'.format(length)
fmt3 = ' %-{}s %-6s %-10s'.format(length) fmt3 = ' %-{}s %-6s %-10s'.format(length)
fmt4 = ' %d files (%d full, %d delta, %s)' fmt4 = ' %d files (%d full, %d delta, %s)'
logger.log(logging.STATS, fmt, "FileName", "Type", "Size", "Sig Size") logger.log(logging.STATS, fmt, "FileName", "Type", "Size", "Sig Size")
logger.log(logging.STATS, fmt, '-' * (length + 4), '-' * 6, '-' * 10, '- ' * 10) logger.log(logging.STATS, fmt, '-' * (length + 4), '-' * 6, '-' * 10, '- ' * 10)
for i in sorted(report): for i in sorted(report):
r = report[i] r = report[i]
(d, f) = i (d, f) = i
if d != lastDir: if d != lastDir:
if repFormat == 'dirs' and lastDir: if repFormat == 'dirs' and lastDir:
logger.log(logging.STATS, fmt4, numFiles, numFiles - deltas, deltas, Util.fmtSize(dataSize, formats=dirfmts)) logger.log(logging.STATS, fmt4, numFiles, numFiles - deltas, deltas, Util.fmtSize(dataSize, formats=dirfmts))
numFiles = 0 numFiles = 0
deltas = 0 deltas = 0
dataSize = 0 dataSize = 0
logger.log(logging.STATS, "%s:", Util.shortPath(d, 80)) logger.log(logging.STATS, "%s:", Util.shortPath(d, 80))
lastDir = d lastDir = d
numFiles += 1 numFiles += 1
if r['type'] == 'Delta': if r['type'] == 'Delta':
deltas += 1 deltas += 1
dataSize += r['size'] dataSize += r['size']
if repFormat == 'all': if repFormat == 'all' or repFormat is True:
if r['sigsize']: if r['sigsize']:
logger.log(logging.STATS, fmt2, f, r['type'], Util.fmtSize(r ['size'], formats=filefmts), Util.fmtSize(r['sigsize'], formats=filefmts)) logger.log(logging.STATS, fmt2, f, r['type'], Util.fmtSize(r ['size'], formats=filefmts), Util.fmtSize(r['sigsize'], formats=filefmts))
else: else:
logger.log(logging.STATS, fmt3, f, r['type'], Util.fmtSize(r ['size'], formats=filefmts)) logger.log(logging.STATS, fmt3, f, r['type'], Util.fmtSize(r ['size'], formats=filefmts))
if repFormat == 'dirs' and lastDir: if repFormat == 'dirs' and lastDir:
logger.log(logging.STATS, fmt4, numFiles, numFiles - deltas, deltas, Util.fmtSize(dataSize, formats=dirfmts)) logger.log(logging.STATS, fmt4, numFiles, numFiles - deltas, deltas, Util.fmtSize(dataSize, formats=dirfmts))
else: else:
logger.log(logging.STATS, "No files backed up") logger.log(logging.STATS, "No files backed up")
def lockRun(server, port, client): def lockRun(server, port, client):
skipping to change at line 2063 skipping to change at line 2150
# Create our own pidfile path. We do this in /tmp rather than /var/run as t ardis may not be run by # Create our own pidfile path. We do this in /tmp rather than /var/run as t ardis may not be run by
# the superuser (ie, can't write to /var/run) # the superuser (ie, can't write to /var/run)
pidfile = pid.PidFile(piddir=tempfile.gettempdir(), pidname=lockName) pidfile = pid.PidFile(piddir=tempfile.gettempdir(), pidname=lockName)
try: try:
pidfile.create() pidfile.create()
except pid.PidFileError as e: except pid.PidFileError as e:
raise Exception("Tardis already running: %s" % e) raise Exception("Tardis already running: %s" % e)
return pidfile return pidfile
def mkBackendConfig(jobname):
bc = Backend.BackendConfig()
j = jobname
bc.umask = Util.parseInt(config.get(j, 'Umask'))
bc.cksContent = config.getint(j, 'CksContent')
bc.serverSessionID = socket.gethostname() + time.strftime("-%Y-%m-%d::%H:%M:
%S%Z", time.gmtime())
bc.formats = list(map(str.strip, config.get(j, 'Formats').split(',')
))
bc.priorities = list(map(int, config.get(j, 'Priorities').split(',')))
bc.keep = list(map(int, config.get(j, 'KeepDays').split(',')))
bc.forceFull = list(map(int, config.get(j, 'ForceFull').split(',')))
bc.journal = config.get(j, 'JournalFile')
bc.savefull = config.getboolean(j, 'SaveFull')
bc.maxChain = config.getint(j, 'MaxDeltaChain')
bc.deltaPercent = float(config.getint(j, 'MaxChangePercent')) / 100.0
# Convert to a ratio
bc.autoPurge = config.getboolean(j, 'AutoPurge')
bc.saveConfig = config.getboolean(j, 'SaveConfig')
bc.dbbackups = config.getint(j, 'DBBackups')
bc.user = None
bc.group = None
bc.dbname = args.dbname
bc.basedir = args.database
bc.allowNew = True
bc.allowUpgrades = True
if args.dbdir:
bc.dbdir = args.dbdir
else:
bc.dbdir = bc.basedir
bc.allowOverrides = True
bc.linkBasis = config.getboolean(j, 'LinkBasis')
bc.requirePW = config.getboolean(j, 'RequirePassword')
bc.sxip = args.skipfile
bc.exceptions = args.exceptions
return bc
def runBackend(jobname):
conn = Connection.DirectConnection(args.timeout)
beConfig = mkBackendConfig(jobname)
backend = Backend.Backend(conn.serverMessages, beConfig, logSession=False)
backendThread = threading.Thread(target=backend.runBackup, name="Backend")
backendThread.start()
return conn, backend, backendThread
def main(): def main():
global starttime, args, config, conn, verbosity, crypt, noCompTypes, srpUsr, statusBar global starttime, args, config, conn, verbosity, crypt, noCompTypes, srpUsr, statusBar
# Read the command line arguments. # Read the command line arguments.
commandLine = ' '.join(sys.argv) + '\n' commandLine = ' '.join(sys.argv) + '\n'
(args, config) = processCommandLine() (args, config, jobname) = processCommandLine()
# Memory debugging. # Memory debugging.
# Enable only if you really need it. # Enable only if you really need it.
#from dowser import launch_memory_usage_server #from dowser import launch_memory_usage_server
#launch_memory_usage_server() #launch_memory_usage_server()
# Set up logging # Set up logging
verbosity=args.verbose if args.verbose else 0 verbosity=args.verbose if args.verbose else 0
setupLogging(args.logfiles, verbosity, args.exceptions) setupLogging(args.logfiles, verbosity, args.exceptions)
skipping to change at line 2172 skipping to change at line 2312
names[x] = i names[x] = i
if errors: if errors:
raise Exception('All paths must have a unique final directory na me if basepath is none') raise Exception('All paths must have a unique final directory na me if basepath is none')
rootdir = None rootdir = None
logger.debug("Rootdir is: %s", rootdir) logger.debug("Rootdir is: %s", rootdir)
except Exception as e: except Exception as e:
logger.critical("Unable to initialize: %s", (str(e))) logger.critical("Unable to initialize: %s", (str(e)))
exceptionLogger.log(e) exceptionLogger.log(e)
sys.exit(1) sys.exit(1)
# determine mode:
localmode = pickMode()
# Open the connection # Open the connection
# If we're using a local connection, create the domain socket, and start the backend = None
server running. backendThread = None
if args.local:
tempsocket = os.path.join(tempfile.gettempdir(), "tardis_local_" + str(o # Create a scheduler thread, if need be
s.getpid())) scheduler = ThreadedScheduler.ThreadedScheduler() if args.progress else None
port = tempsocket
server = None
subserver = runServer(args.serverprog, tempsocket)
if subserver is None:
logger.critical("Unable to create server")
sys.exit(1)
# Get the connection object # Get the connection object
try: try:
conn = getConnection(server, port) if localmode:
(conn, backend, backendThread) = runBackend(jobname)
else:
conn = getConnection(server, port)
startBackup(name, args.priority, args.client, auto, args.force, args.ful l, args.create, password) startBackup(name, args.priority, args.client, auto, args.force, args.ful l, args.create, password)
except Exception as e: except Exception as e:
logger.critical("Unable to start session with %s:%s: %s", server, port, str(e)) logger.critical("Unable to start session with %s:%s: %s", server, port, str(e))
exceptionLogger.log(e) exceptionLogger.log(e)
sys.exit(1) sys.exit(1)
if verbosity or args.stats or args.report: if verbosity or args.stats or args.report != 'none':
logger.log(logging.STATS, "Name: {} Server: {}:{} Session: {}".format(ba ckupName, server, port, sessionid)) logger.log(logging.STATS, "Name: {} Server: {}:{} Session: {}".format(ba ckupName, server, port, sessionid))
# Initialize the progress bar, if requested # Initialize the progress bar, if requested
if args.progress: if args.progress:
statusBar = initProgressBar() statusBar = initProgressBar(scheduler)
if scheduler:
scheduler.start()
# Send a command line # Send a command line
clHash = crypt.getHash() clHash = crypt.getHash()
clHash.update(bytes(commandLine, 'utf8')) clHash.update(bytes(commandLine, 'utf8'))
h = clHash.hexdigest() h = clHash.hexdigest()
encrypt, iv = makeEncryptor() encrypt, iv = makeEncryptor()
if iv is None: if iv is None:
iv = b'' iv = b''
data = iv + encrypt.encrypt(bytes(commandLine, 'utf8')) + encrypt.finish() + encrypt.digest() data = iv + encrypt.encrypt(bytes(commandLine, 'utf8')) + encrypt.finish() + encrypt.digest()
skipping to change at line 2231 skipping to change at line 2377
if a['password']: if a['password']:
a['password'] = '-- removed --' a['password'] = '-- removed --'
jsonArgs = json.dumps(a, cls=Util.ArgJsonEncoder, sort_keys=True) jsonArgs = json.dumps(a, cls=Util.ArgJsonEncoder, sort_keys=True)
message = { message = {
"message": "CLICONFIG", "message": "CLICONFIG",
"args": jsonArgs "args": jsonArgs
} }
batchMessage(message) batchMessage(message)
# Now, do the actual work here. # Now, do the actual work here.
exc = None
try: try:
# Now, process all the actual directories # Now, process all the actual directories
for directory in directories: for directory in directories:
# skip if already processed. # skip if already processed.
if directory in processedDirs: if directory in processedDirs:
continue continue
# Create the fake directory entry(s) for this. # Create the fake directory entry(s) for this.
if rootdir: if rootdir:
createPrefixPath(rootdir, directory) createPrefixPath(rootdir, directory)
root = rootdir root = rootdir
else: else:
(root, name) = os.path.split(directory) (root, name) = os.path.split(directory)
f = mkFileInfo(root, name) f = mkFileInfo(FakeDirEntry(root, name))
sendDirEntry(0, 0, [f]) sendDirEntry(0, 0, [f])
# And run the directory # And run the directory
recurseTree(directory, root, depth=args.maxdepth, excludes=globalExc ludes) recurseTree(directory, root, depth=args.maxdepth, excludes=globalExc ludes)
# If any metadata, clone or batch requests still lying around, send them now # If any metadata, clone or batch requests still lying around, send them now
if newmeta: if newmeta:
batchMessage(makeMetaMessage()) batchMessage(makeMetaMessage())
flushClones() flushClones()
while flushBatchMsgs(): while flushBatchMsgs():
pass pass
# Send a purge command, if requested. # Send a purge command, if requested.
if args.purge: if args.purge:
if args.purgetime: if args.purgetime:
sendPurge(False) sendPurge(False)
else: else:
sendPurge(True) sendPurge(True)
conn.close()
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
logger.warning("Backup Interupted") logger.warning("Backup Interupted")
exc = "Backup Interrupted"
#exceptionLogger.log(e) #exceptionLogger.log(e)
except ExitRecursionException as e: except ExitRecursionException as e:
root = e.rootException root = e.rootException
logger.error("Caught exception: %s, %s", root.__class__.__name__, root) logger.error("Caught exception: %s, %s", root.__class__.__name__, root)
exc = str(e)
#exceptionLogger.log(root) #exceptionLogger.log(root)
except Exception as e: except Exception as e:
logger.error("Caught exception: %s, %s", e.__class__.__name__, e) logger.error("Caught exception: %s, %s", e.__class__.__name__, e)
exc = str(e)
exceptionLogger.log(e) exceptionLogger.log(e)
finally:
setProgress("Finishing backup", "")
conn.close(exc)
if localmode:
conn.send(Exception("Terminate connection"))
if args.progress: if localmode:
statusBar.shutdown()
if args.local:
logger.info("Waiting for server to complete") logger.info("Waiting for server to complete")
subserver.wait() # Should I do communicate? backendThread.join() # Should I do communicate?
endtime = datetime.datetime.now() endtime = datetime.datetime.now()
if args.sanity: if args.sanity:
# Sanity checks. Enable for debugging. # Sanity checks. Enable for debugging.
if len(cloneContents) != 0: if len(cloneContents) != 0:
logger.warning("Some cloned directories not processed: %d", len(clon eContents)) logger.warning("Some cloned directories not processed: %d", len(clon eContents))
for key in cloneContents: for key in cloneContents:
(path, files) = cloneContents[key] (path, files) = cloneContents[key]
print("{}:: {}".format(path, len(files))) print("{}:: {}".format(path, len(files)))
# This next one is usually non-zero, for some reason. Enable to debug. # This next one is usually non-zero, for some reason. Enable to debug.
if len(inodeDB) != 0: #if len(inodeDB) != 0:
logger.warning("%d InodeDB entries not processed", len(inodeDB)) #logger.warning("%d InodeDB entries not processed", len(inodeDB))
for key in list(inodeDB.keys()): #for key in list(inodeDB.keys()):
(_, path) = inodeDB[key] #(_, path) = inodeDB[key]
print("{}:: {}".format(key, path)) #print("{}:: {}".format(key, path))
if args.progress:
statusBar.shutdown()
# Print stats and files report # Print stats and files report
if args.stats: if args.stats:
printStats(starttime, endtime) printStats(starttime, endtime)
if args.report != 'none': if args.report != 'none':
printReport(args.report) printReport(args.report)
if args.local:
os.unlink(tempsocket)
print('') print('')
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(main()) sys.exit(main())
 End of changes. 109 change blocks. 
230 lines changed or deleted 384 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)