"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "src/Tardis/Daemon.py" between
Tardis-1.1.5.tar.gz and Tardis-1.2.1.tar.gz

About: Tardis is a system for making encrypted, incremental backups of filesystems.

Daemon.py  (Tardis-1.1.5):Daemon.py  (Tardis-1.2.1)
skipping to change at line 32 skipping to change at line 32
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
import os import os
import types
import sys import sys
import string
import pwd import pwd
import grp import grp
import argparse import argparse
import uuid import uuid
import logging import logging
import logging.config import logging.config
import configparser import configparser
import socketserver import socketserver
import ssl import ssl
import pprint
import tempfile
import shutil
import traceback import traceback
import signal import signal
import threading import threading
import json import json
import base64 import base64
from datetime import datetime from datetime import datetime
# For profiling # For profiling
import cProfile import cProfile
import io import io
import pstats import pstats
import daemonize import daemonize
import colorlog import colorlog
import Tardis import Tardis
import Tardis.Backend as Backend
import Tardis.ConnIdLogAdapter as ConnIdLogAdapter import Tardis.ConnIdLogAdapter as ConnIdLogAdapter
import Tardis.Messages as Messages import Tardis.Messages as Messages
import Tardis.CacheDir as CacheDir
import Tardis.TardisDB as TardisDB
import Tardis.Regenerator as Regenerator
import Tardis.Util as Util import Tardis.Util as Util
import Tardis.Defaults as Defaults import Tardis.Defaults as Defaults
import Tardis.Connection as Connection import Tardis.Connection as Connection
import Tardis.CompressedBuffer as CompressedBuffer
import Tardis.TardisCrypto as TardisCrypto
import Tardis.librsync as librsync
DONE = 0 DONE = 0
CONTENT = 1 CONTENT = 1
CKSUM = 2 CKSUM = 2
DELTA = 3 DELTA = 3
REFRESH = 4 # Perform a full content update REFRESH = 4 # Perform a full content update
LINKED = 5 # Check if it's already linked LINKED = 5 # Check if it's already linked
config = None config = None
args = None args = None
skipping to change at line 154 skipping to change at line 144
'CksContent' : '65536', 'CksContent' : '65536',
'AutoPurge' : str(False), 'AutoPurge' : str(False),
'SaveConfig' : str(True), 'SaveConfig' : str(True),
'AllowClientOverrides' : str(True), 'AllowClientOverrides' : str(True),
'AllowSchemaUpgrades' : str(False), 'AllowSchemaUpgrades' : str(False),
} }
server = None server = None
logger = None logger = None
pp = pprint.PrettyPrinter(indent=2, width=1000)
logging.TRACE = logging.DEBUG - 1 logging.TRACE = logging.DEBUG - 1
logging.MSGS = logging.DEBUG - 2 logging.MSGS = logging.DEBUG - 2
def makeDict(row):
if row:
d = {}
for i in list(row.keys()):
d[i] = row[i]
return d
return None
class InitFailedException(Exception): class InitFailedException(Exception):
pass pass
class ProtocolError(Exception): class ProtocolError(Exception):
pass pass
class TardisServerHandler(socketserver.BaseRequestHandler): class TardisServerHandler(socketserver.BaseRequestHandler):
numfiles = 0 numfiles = 0
logger = None logger = None
sessionid = None sessionid = None
skipping to change at line 190 skipping to change at line 170
purged = False purged = False
full = False full = False
statNewFiles = 0 statNewFiles = 0
statUpdFiles = 0 statUpdFiles = 0
statDirs = 0 statDirs = 0
statBytesReceived = 0 statBytesReceived = 0
statPurgedFiles = 0 statPurgedFiles = 0
statPurgedSets = 0 statPurgedSets = 0
statCommands = {} statCommands = {}
address = '' address = ''
regenerator = None
basedir = None basedir = None
autoPurge = False autoPurge = False
saveConfig = False saveConfig = False
deltaPercent = 80
forceFull = False forceFull = False
saveFull = False saveFull = False
lastCompleted = None lastCompleted = None
maxChain = 0
def checkMessage(self, message, expected):
""" Check that a message is of the expected type. Throw an exception if
not """
if not message['message'] == expected:
logger.critical("Expected {} message, received {}".format(expected,
message['message']))
raise ProtocolError("Expected {} message, received {}".format(expect
ed, message['message']))
def setup(self): def setup(self):
self.statCommands = {}
self.sessionid = str(uuid.uuid1())
log = logging.getLogger('Tardis')
self.idstr = self.sessionid[0:13] # Leading portion (ie, timestamp) o
f the UUID. Sufficient for logging.
self.logger = ConnIdLogAdapter.ConnIdLogAdapter(log, {'connid': self.ids
tr})
self.printMessages = True if self.logger.isEnabledFor(logging.TRACE) els
e False
if self.client_address: if self.client_address:
self.address = self.client_address[0] self.address = self.client_address[0]
else: else:
self.address = 'localhost' self.address = 'localhost'
self.logger.info("Request received from: %s Session: %s", self.address, log = logging.getLogger('Tardis')
self.sessionid) self.sessionid = str(uuid.uuid1())
self.tempPrefix = self.sessionid + "-" self.logger = ConnIdLogAdapter.ConnIdLogAdapter(log, {'connid': self.ses
# Not quite sure why I do this here. But just in case. sionid[0:13]})
os.umask(self.server.umask) self.logger.info("Session created from: %s", self.address)
def finish(self): def finish(self):
self.logger.info("Ending session %s from %s", self.sessionid, self.addre ss) self.logger.info("Ending session %s from %s", self.sessionid, self.addre ss)
self.server.rmSession(self.sessionid)
def setXattrAcl(self, inode, device, xattr, acl):
self.logger.debug("Setting Xattr and ACL info: %d %s %s", inode, xattr,
acl)
if xattr:
self.db.setXattrs(inode, device, xattr)
if acl:
self.db.setAcl(inode, device, acl)
def sendMessage(self, message):
if not 'message' in message:
self.logger.error("No `message` block in message: %s", message)
if self.printMessages:
self.logger.log(logging.TRACE, "Sending:\n" + pp.pformat(message))
self.messenger.sendMessage(message)
def recvMessage(self):
message = self.messenger.recvMessage()
if self.printMessages:
self.logger.log(logging.TRACE, "Received:\n" + pp.pformat(message))
return message
sizes = set()
sizesLoaded = False
def checkForSize(self, size):
if not self.sizesLoaded:
self.logger.debug("Loading sizes")
for i in self.db.getFileSizes(self.server.cksContent):
self.sizes.add(i[0])
self.logger.debug("Size loading complete: %d", len(self.sizes))
self.sizesLoaded = True
if (size > self.server.cksContent) and (size in self.sizes):
return CKSUM
else:
return CONTENT
def checkFile(self, parent, f, dirhash):
"""
Process an individual file. Check to see if it's different from what's
there already
"""
xattr = None
acl = None
self.logger.debug("Processing file: %s %s", str(f), str(parent))
name = f["name"]
inode = f["inode"]
device = f["dev"]
if 'xattr' in f:
xattr = f['xattr']
if 'acl' in f:
acl = f['acl']
#self.logger.debug("Processing Inode: %8d %d -- File: %s -- Parent: %s",
inode, device, name, str(parent))
#self.logger.debug("DirHash: %s", str(dirhash))
if name in dirhash:
old = dirhash[name]
else:
old = None
fromPartial = False
if f["dir"] == 1:
#self.logger.debug("Is a directory: %s", name)
if old:
if (old["inode"] == inode) and (old["device"] == device) and (ol
d["mtime"] == f["mtime"]):
self.db.extendFileInode(parent, (inode, device))
else:
self.db.insertFile(f, parent)
self.setXattrAcl(inode, device, xattr, acl)
else:
self.db.insertFile(f, parent)
self.setXattrAcl(inode, device, xattr, acl)
retVal = DONE
else: # Not a directory, it's a file
# Check to see if there's an updated version.
if not self.lastCompleted:
# not in this directory, but lets look further in any incomplete
sets if there are any
#self.logger.debug("Looking up file in partial backup(s): %s (%s
)", name, inode)
tmp = self.db.getFileFromPartialBackup(f)
if tmp:
old = tmp
fromPartial = old['lastset']
self.logger.debug("Found %s in partial backup set: %d", name
, old['lastset'])
if old:
#self.logger.debug("Comparing version: New: %s", str(f))
#self.logger.debug("Comparing version: Old: %s", str(makeDict(o
ld)))
# Got something. If the inode, size, and mtime are the same, ju
st keep it
fsize = f['size']
osize = old['size']
if (old["inode"] == inode) and (old['device'] == device) and (os
ize == fsize) and (old["mtime"] == f["mtime"]):
#self.logger.debug("Main info matches: %s", name)
#if ("checksum" in old.keys()) and not (old["checksum"] is N
one):
if not old["checksum"] is None:
#self.db.setChecksum(inode, device, old['checksum'])
if (old['mode'] == f['mode']) and (old['ctime'] == f['ct
ime']) and (old['xattrs'] == xattr) and (old['acl'] == acl):
# nothing has changed, just extend it
#self.logger.debug("Extending %s", name)
self.db.extendFileInode(parent, (inode, device), old
=fromPartial)
else:
# Some metadata has changed, so let's insert the new
record, and set it's checksum
#self.logger.debug("Inserting new version %s", name)
self.db.insertFile(f, parent)
self.db.setChecksum(inode, device, old['checksum'])
self.setXattrAcl(inode, device, xattr, acl)
if self.full and old['chainlength'] != 0:
retVal = REFRESH
else:
retVal = DONE # we're done either way
else:
# Otherwise we need a whole new file
#self.logger.debug("No checksum: Get new file %s", name)
self.db.insertFile(f, parent)
self.setXattrAcl(inode, device, xattr, acl)
retVal = CONTENT
#elif (osize == fsize) and ("checksum" in old.keys()) and not (o
ld["checksum"] is None):
elif (osize == fsize) and (not old["checksum"] is None):
#self.logger.debug("Secondary match, requesting checksum: %s
", name)
# Size hasn't changed, but something else has. Ask for a ch
ecksum
self.db.insertFile(f, parent)
self.setXattrAcl(inode, device, xattr, acl)
retVal = CKSUM
elif (f["size"] < 4096) or (old["size"] is None) or \
not ((old['size'] * self.deltaPercent) < f['size'] < (old['
size'] * (1.0 + self.deltaPercent))) or \
((old["basis"] is not None) and (old["chainlength"]) >= sel
f.maxChain):
#self.logger.debug("Third case. Weirdos: %s", name)
# Couple conditions that can cause it to always load
# File is less than 4K
# Old file had now size
# File has changed size by more than a certain amount (typic
ally 50%)
# Chain of delta's is too long.
self.db.insertFile(f, parent)
self.setXattrAcl(inode, device, xattr, acl)
retVal = REFRESH
else:
# Otherwise, let's just get the delta
#self.logger.debug("Fourth case. Should be a delta: %s", na
me)
self.db.insertFile(f, parent)
self.setXattrAcl(inode, device, xattr, acl)
if self.full:
# Full backup, request the full version anyhow.
retVal = CONTENT
else:
retVal = DELTA
else:
# Create a new record for this file
#self.logger.debug("No file found: %s", name)
self.db.insertFile(f, parent)
self.setXattrAcl(inode, device, xattr, acl)
if f["nlinks"] > 1:
# We're a file, and we have hard links. Check to see if I'v
e already been handled this inode.
# self.logger.debug('Looking for file with same inode %d: %s
', inode, f['name'])
# TODO: Check that the file hasn't changed since it was last
written. If file is in flux,
# it's a problem.
checksum = self.db.getChecksumByInode(inode, device, True)
#self.logger.debug('Checksum for inode %d: %s -- %s', inode,
f['name'], checksum)
if checksum:
#self.logger.debug('Setting linked inode %d: %s to check
sum %s', inode, f['name'], checksum)
self.db.setChecksum(inode, device, checksum)
retVal = LINKED # special value, allowing th
e caller to determine that this was handled as a link
else:
#self.logger.debug('No link data found for inode %d: %s.
Requesting new content', inode, f['name'])
retVal = CONTENT
else:
#Check to see if it's been moved or copied
#self.logger.debug(u'Looking for similar file: %s (%s)', nam
e, inode)
# BUG: Don't we need to extend or insert the file here?
old = self.db.getFileInfoBySimilar(f)
if old:
if (old["name"] == f["name"]) and (old["parent"] == pare
nt) and (old['device'] == f['parentdev']):
# If the name and parent ID are the same, assume it'
s the same
#if ("checksum" in old.keys()) and not (old["checksu
m"] is None):
if old["checksum"] is not None:
self.db.setChecksum(inode, device, old['checksum
'])
retVal = DONE
else:
retVal = CONTENT
else:
# otherwise
retVal = CKSUM
else:
# TODO: Lookup based on inode.
#self.logger.debug("No old file.")
retVal = self.checkForSize(f['size'])
return retVal
lastDirNode = None
lastDirHash = {}
def processDir(self, data):
""" Process a directory message. Lookup each file in the previous backu
p set, and determine if it's changed. """
#self.logger.debug(u'Processing directory entry: {} : {}'.format(data["p
ath"], str(data["inode"])))
# Create some sets that we'll collect the inodes into
# Use sets to remove duplicates due to hard links in a directory
done = set()
cksum = set()
content = set()
delta = set()
refresh = set()
attrs = set()
# Keep the order
queues = [done, content, cksum, delta, refresh]
parentInode = tuple(data['inode']) # Contains both inode and device
in message
files = data['files']
dirhash = {}
oldDir = None
# Get the old directory info
# If we're still in the same directory, use cached info
if self.lastDirNode == parentInode:
dirhash = self.lastDirHash
else:
# Lookup the old directory based on the path
if 'path' in data and data['path']:
oldDir = self.db.getFileInfoByPath(data['path'], current=False)
# If found, read that' guys directory
if oldDir and oldDir['dir'] == 1:
#### TODO: FIXME: Get actual Device
dirInode = (oldDir['inode'], oldDir['device'])
else:
# Otherwise
dirInode = parentInode
directory = self.db.readDirectory(dirInode)
for i in directory:
dirhash[i["name"]] = i
self.lastDirHash = dirhash
self.lastDirNode = parentInode
self.logger.debug("Got directory: %s", str(dirhash))
for f in files:
fileId = (f['inode'], f['dev'])
self.logger.debug('Processing file: %s %s', f['name'], str(fileId))
res = self.checkFile(parentInode, f, dirhash)
# Shortcut for this:
#if res == 0: done.append(inode)
#elif res == 1: content.append(inode)
#elif res == 2: cksum.append(inode)
#elif res == 3: delta.append(inode)
if res == LINKED:
# Determine if this fileid is already in one of the queues
if not filter(lambda x: fileId in x, queues):
queues[DONE].add(fileId)
else:
queues[res].add(fileId)
if 'xattr' in f:
xattr = f['xattr']
# Check to see if we have this checksum
info = self.db.getChecksumInfo(xattr)
if (not info) or (info['size'] == -1):
attrs.add(xattr)
response = {
"message" : "ACKDIR",
"status" : "OK",
"path" : data["path"],
"inode" : data["inode"],
"last" : data["last"],
"done" : list(done),
"cksum" : list(cksum),
"content" : list(content),
"delta" : list(delta),
"refresh" : list(refresh),
"xattrs" : list(attrs)
}
return (response, True)
def processDirHash(self, message):
checksum = message['hash']
inode = tuple(message['inode'])
ckinfo = self.db.getChecksumInfo(checksum)
if ckinfo:
cksid = ckinfo['checksumid']
else:
cksid = self.db.insertChecksumFile(checksum, encrypted=False, size=m
essage['size'], isFile=False)
self.db.updateDirChecksum(inode, cksid)
response = {
"message" : "ACKDHSH",
"status" : "OK"
}
return (response, False)
def processManySigsRequest(self, message):
inodes = message['inodes']
for i in inodes:
(inode, dev) = i
self.sendSignature(inode, dev)
response = {
'message': "SIG",
'status' : "DONE"
}
return(response, True)
def processSigRequest(self, message):
""" Generate and send a signature for a file """
#self.logger.debug("Processing signature request message: %s"format(str(
message)))
(inode, dev) = message["inode"]
return self.sendSignature(inode, dev)
def sendSignature(self, inode, dev):
response = None
chksum = None
errmsg = None
### TODO: Remove this function. Clean up.
info = self.db.getFileInfoByInode((inode, dev), current=True)
if info:
chksum = self.db.getChecksumByName(info["name"], (info["parent"], in
fo["parentdev"])) ### Assumption: Current parent is same as old
else:
self.logger.error("No Checksum Info available for %s in (%d, %d)", i
nfo['name'], info['parent'], info['parentdev'])
if chksum:
try:
sigfile = chksum + ".sig"
if self.cache.exists(sigfile):
sigfile = self.cache.open(sigfile, "rb")
sig = sigfile.read() # TODO: Does this always read the
entire file?
sigfile.close()
else:
rpipe = self.regenerator.recoverChecksum(chksum)
#pipe = subprocess.Popen(["rdiff", "signature"], stdin=rpipe
, stdout=subprocess.PIPE)
#pipe = subprocess.Popen(["rdiff", "signature", self.cache.p
ath(chksum)], stdout=subprocess.PIPE)
#(sig, err) = pipe.communicate()
# Cache the signature for later use. Just in case.
# TODO: Better logic on this?
if rpipe:
try:
s = librsync.signature(rpipe)
sig = s.read()
outfile = self.cache.open(sigfile, "wb")
outfile.write(sig)
outfile.close()
except (librsync.LibrsyncError, Regenerator.RegenerateEx
ception) as e:
self.logger.error("Unable to generate signature for
inode: {}, checksum: {}: {}".format(inode, chksum, e))
# TODO: Break the signature out of here.
response = {
"message": "SIG",
"inode": (inode, dev),
"status": "OK",
"encoding": self.messenger.getEncoding(),
"checksum": chksum,
"size": len(sig) }
self.sendMessage(response)
sigio = io.BytesIO(sig)
Util.sendDataPlain(self.messenger, sigio, compress=None)
return (None, False)
except Exception as e:
self.logger.error("Could not recover data for checksum: %s: %s",
chksum, str(e))
if args.exceptions:
logger.exception(e)
errmsg = str(e)
if response is None:
response = {
"message": "SIG",
"inode": inode,
"status": "FAIL"
}
if errmsg:
response['errmsg'] = errmsg
return (response, False)
def processDelta(self, message):
""" Receive a delta message. """
self.logger.debug("Processing delta message: %s", message)
output = None
temp = None
checksum = message["checksum"]
basis = message["basis"]
size = message["size"] # size of the original file, not the
content
(inode, dev) = message["inode"]
deltasize = message['deltasize'] if 'deltasize' in message else None
encrypted = message.get('encrypted', False)
savefull = self.server.savefull and not encrypted
if self.cache.exists(checksum):
self.logger.debug("Checksum file %s already exists", checksum)
# Abort read
else:
if not savefull:
chainLength = self.db.getChainLength(basis)
if chainLength >= self.maxChain:
self.logger.debug("Chain length %d. Converting %s (%s) to f
ull save", chainLength, basis, inode)
savefull = True
if savefull:
# Save the full output, rather than just a delta. Save the delt
a to a file
#output = tempfile.NamedTemporaryFile(dir=self.tempdir, delete=T
rue)
output = tempfile.SpooledTemporaryFile(dir=self.tempdir, prefix=
self.tempPrefix)
else:
output = self.cache.open(checksum, "wb")
(bytesReceived, status, deltaSize, deltaChecksum, compressed) = Util.rec
eiveData(self.messenger, output)
self.logger.debug("Data Received: %d %s %d %s %s", bytesReceived, status
, deltaSize, deltaChecksum, compressed)
if status != 'OK':
self.logger.warning("Received invalid status on data reception")
if deltasize is None:
# BUG: should actually be the uncompressed size, but hopefully we wo
n't get here.
deltasize = bytesReceived
self.statBytesReceived += bytesReceived
Util.recordMetaData(self.cache, checksum, size, compressed, encrypted, b
ytesReceived, basis=basis, logger=self.logger)
# update the database stats
self.db.setStats(self.statNewFiles, self.statUpdFiles, self.statBytesRec
eived)
if output:
try:
if savefull:
output.seek(0)
if compressed:
delta = CompressedBuffer.UncompressedBufferedReader(outp
ut)
# HACK: Monkeypatch the buffer reader object to have a s
eek function to keep librsync happy. Never gets called
delta.seek = lambda x, y: 0
else:
delta = output
# Process the delta file into the new file.
#subprocess.call(["rdiff", "patch", self.cache.path(basis),
output.name], stdout=self.cache.open(checksum, "wb"))
basisFile = self.regenerator.recoverChecksum(basis)
# Can't use isinstance
if type(basisFile) != types.FileType:
# TODO: Is it possible to get here? Is this just dead c
ode?
temp = basisFile
basisFile = tempfile.TemporaryFile(dir=self.tempdir, pre
fix=self.tempPrefix)
shutil.copyfileobj(temp, basisFile)
patched = librsync.patch(basisFile, delta)
shutil.copyfileobj(patched, self.cache.open(checksum, "wb"))
self.db.insertChecksumFile(checksum, encrypted, size=size, d
isksize=bytesReceived)
else:
if self.server.linkBasis:
self.cache.link(basis, checksum + ".basis")
self.db.insertChecksumFile(checksum, encrypted, size=size, d
eltasize=deltasize, basis=basis, compressed=compressed, disksize=bytesReceived)
# Track that we've added a file of this size.
self.sizes.add(size)
self.statUpdFiles += 1
self.logger.debug("Setting checksum for inode %s to %s", inode,
checksum)
self.db.setChecksum(inode, dev, checksum)
except Exception as e:
self.logger.error("Could not insert checksum %s: %s", checksum,
str(e))
output.close()
# TODO: This has gotta be wrong.
flush = True if size > 1000000 else False
return (None, flush)
def processSignature(self, message):
""" Receive a signature message. """
self.logger.debug("Processing signature message: %s", message)
output = None
checksum = message["checksum"]
# If a signature is specified, receive it as well.
sigfile = checksum + ".sig"
if self.cache.exists(sigfile):
self.logger.debug("Signature file %s already exists", sigfile)
# Abort read
else:
output = self.cache.open(sigfile, "wb")
# TODO: Record these in stats
(bytesReceived, status, size, checksum, compressed) = Util.receiveData(s
elf.messenger, output)
if output is not None:
output.close()
#self.db.setChecksum(inode, device, checksum)
return (None, False)
def processChecksum(self, message):
""" Process a list of checksums """
self.logger.debug("Processing checksum message: %s", message)
done = []
delta = []
content = []
for f in message["files"]:
(inode, dev) = f["inode"]
cksum = f["checksum"]
# Check to see if the checksum exists
# TODO: Is this faster than checking if the file exists? Probably,
but should test.
info = self.db.getChecksumInfo(cksum)
if info and info['isfile'] and info['size'] >= 0:
self.db.setChecksum(inode, dev, cksum)
done.append(f['inode'])
else:
# FIXME: TODO: If no checksum, should we request a delta???
#old = self.db.getFileInfoByInode((inode, dev))
#if old and old['chainlength'] < self.maxChain:
#delta.append(f['inode'])
#else:
content.append(f['inode'])
message = {
"message": "ACKSUM",
"status" : "OK",
"done" : done,
"content": content,
"delta" : delta
}
return (message, False)
def processMeta(self, message):
""" Check metadata messages """
metadata = message['metadata']
encrypted = message.get('encrypted', False)
done = []
content = []
for cksum in metadata:
info = self.db.getChecksumInfo(cksum)
if info and info['size'] != -1:
done.append(cksum)
else:
# Insert a placeholder with a negative size
# But only if we don't already have one, left over from a previo
us failing build.
if not info:
self.db.insertChecksumFile(cksum, encrypted, -1)
content.append(cksum)
message = {
'message': 'ACKMETA',
'content': content,
'done': done
}
return (message, False)
def processMetaData(self, message):
""" Process a content message, including all the data content chunks """
self.logger.debug("Processing metadata message: %s", message)
checksum = message['checksum']
if self.cache.exists(checksum):
self.logger.debug("Checksum file %s already exists", checksum)
output = io.BytesIO() # Accumulate into a throwaway string
else:
output = self.cache.open(checksum, "wb")
encrypted = message.get('encrypted', False)
(bytesReceived, status, size, cks, compressed) = Util.receiveData(self.m
essenger, output)
self.logger.debug("Data Received: %d %s %d %s %s", bytesReceived, status
, size, checksum, compressed)
output.close()
self.db.updateChecksumFile(checksum, encrypted, size, compressed=compres
sed, disksize=bytesReceived)
self.statNewFiles += 1
self.statBytesReceived += bytesReceived
return (None, False)
def processPurge(self, message = {}):
self.logger.debug("Processing purge message: {}".format(str(message)))
prevTime = None
if 'time' in message:
if message['relative']:
prevTime = float(self.db.prevBackupDate) - float(message['time']
)
else:
prevTime = float(message['time'])
elif self.serverKeepTime:
prevTime = float(self.db.prevBackupDate) - float(self.serverKeepTime
)
if 'priority' in message:
priority = message['priority']
else:
priority = self.serverPriority
# Purge the files
if prevTime:
(files, sets) = self.db.purgeSets(priority, prevTime)
self.statPurgedSets += sets
self.statPurgedFiles += files
self.logger.info("Purged %d files in %d backup sets", files, sets)
if files:
self.purged = True
return ({"message": "ACKPRG", "status": "OK"}, True)
else:
return ({"message": "ACKPRG", "status": "FAIL"}, True)
def processClone(self, message):
""" Clone an entire directory """
done = []
content = []
for d in message['clones']:
inode = d['inode']
device = d['dev']
inoDev = (inode, device)
info = self.db.getFileInfoByInode(inoDev, current=False)
if not info and not self.lastCompleted:
# Check for copies in a partial directory backup, if some exist
and we didn't find one here..
# This should only happen in rare circumstances, namely if the l
ist of directories to backup
# has changed, and a directory which is older than the last comp
leted backup is added to the backup.
info = self.db.getFileInfoByInodeFromPartial(inoDev)
if info and info['checksum'] is not None:
numFiles = self.db.getDirectorySize(inoDev)
if numFiles is not None:
#logger.debug("Clone info: %s %s %s %s", info['size'], type(
info['size']), info['checksum'], type(info['checksum']))
if (numFiles == d['numfiles']) and (info['checksum'] == d['c
ksum']):
self.db.cloneDir(inoDev)
if self.full:
numDeltas = self.db.getNumDeltaFilesInDirectory(inoD
ev, current=False)
if numDeltas > 0:
# Oops, there's a delta file in here on a full b
ackup.
#self.logger.debug("Inode %d contains %d deltas
on full backup. Requesting refresh.", inode, numDeltas)
content.append(inoDev)
else:
# No delta files for full backup. We're done
done.append(inoDev)
else:
done.append(inoDev)
else:
#self.logger.debug("No match on clone. Inode: %d Rows:
%d %d Checksums: %s %s", inode, int(info['size']), d['numfiles'], info['checksum
'], d['cksum'])
content.append(inoDev)
else:
#self.logger.debug("Unable to get number of files to process
clone (%d %d)", inode, device)
content.append(inoDev)
else:
#self.logger.debug("No info available to process clone (%d %d)",
inode, device)
content.append(inoDev)
return ({"message" : "ACKCLN", "done" : done, 'content' : content }, Tru
e)
_sequenceNumber = 0
def processContent(self, message):
""" Process a content message, including all the data content chunks """
self.logger.debug("Processing content message: %s", message)
tempName = None
checksum = None
if "checksum" in message:
checksum = message["checksum"]
if self.cache.exists(checksum):
self.logger.debug("Checksum file %s already exists", checksum)
output = self.cache.open(checksum, "w")
else:
#temp = tempfile.NamedTemporaryFile(dir=self.tempdir, delete=False,
prefix=self.tempPrefix)
tempName = os.path.join(self.tempdir, self.tempPrefix + str(self._se
quenceNumber))
self._sequenceNumber += 1
self.logger.debug("Sending output to temporary file %s", tempName)
output = open(tempName, 'wb')
encrypted = message.get('encrypted', False)
(bytesReceived, status, size, checksum, compressed) = Util.receiveData(s
elf.messenger, output)
self.logger.debug("Data Received: %d %s %d %s %s", bytesReceived, status
, size, checksum, compressed)
output.close()
try:
if tempName:
if self.cache.exists(checksum):
# Error check. Sometimes files can get into the cachedir wi
thout being recorded.
ckInfo = self.db.getChecksumInfo(checksum)
if ckInfo is None:
self.logger.warning("Checksum file %s exists, but no DB
entry. Reinserting", checksum)
self.cache.insert(checksum, tempName)
self.db.insertChecksumFile(checksum, encrypted, size, co
mpressed=compressed, disksize=bytesReceived)
else:
if self.full:
self.logger.debug("Replacing existing checksum file
for %s", checksum)
self.cache.insert(checksum, tempName)
self.db.updateChecksumFile(checksum, encrypted, size
, compressed=compressed, disksize=bytesReceived)
else:
# Check to make sure it's recorded in the DB. If no
t, reinsert
self.logger.debug("Checksum file %s already exists.
Deleting temporary version", checksum)
os.remove(tempName)
else:
self.cache.insert(checksum, tempName)
self.db.insertChecksumFile(checksum, encrypted, size, compre
ssed=compressed, disksize=bytesReceived)
else:
self.db.insertChecksumFile(checksum, encrypted, size, compressed
=compressed, disksize=bytesReceived)
(inode, dev) = message['inode']
self.logger.debug("Setting checksum for inode %d to %s", inode, chec
ksum)
self.db.setChecksum(inode, dev, checksum)
self.statNewFiles += 1
# Record the metadata. Do it here after we've inserted the file bec
ause on a full backup we could overwrite
# a version which had a basis without updating the base file.
Util.recordMetaData(self.cache, checksum, size, compressed, encrypte
d, bytesReceived, logger=self.logger)
# Update the database stats
self.db.setStats(self.statNewFiles, self.statUpdFiles, self.statByte
sReceived)
except Exception as e:
self.logger.error("Could insert checksum %s info: %s", checksum, str
(e))
if self.server.exceptions:
self.logger.exception(e)
self.statBytesReceived += bytesReceived
#return {"message" : "OK", "inode": message["inode"]}
#flush = True if bytesReceived > 1000000 else False
return (None, False)
def processBatch(self, message):
batch = message['batch']
responses = []
for mess in batch:
(response, _) = self.processMessage(mess, transaction=False)
if response:
responses.append(response)
response = {
'message': 'ACKBTCH',
'responses': responses
}
return (response, True)
def processSetKeys(self, message):
filenameKey = message['filenameKey']
contentKey = message['contentKey']
srpSalt = message['srpSalt']
srpVkey = message['srpVkey']
cryptoScheme = message['cryptoScheme']
ret = self.db.setKeys(srpSalt, srpVkey, filenameKey, contentKey)
self.db.setConfigValue('CryptoScheme', cryptoScheme)
response = {
'message': 'ACKSETKEYS',
'response': 'OK' if ret else 'FAIL'
}
return (response, True)
def processClientConfig(self, message):
if self.saveConfig:
clientConfig = message['args']
self.logger.debug("Received client config: %s", clientConfig)
self.db.setClientConfig(clientConfig)
response = {
'message': 'ACKCLICONFIG',
'saved': self.saveConfig
}
return (response, False)
def processCommandLine(self, message):
cksum = message['hash']
self.logger.debug("Received command line")
ckInfo = self.db.getChecksumInfo(cksum)
if ckInfo is None:
self.logger.debug("Inserting command line file")
f = self.cache.open(cksum, 'wb')
if type(message['line']) == bytes:
f.write(message['line'])
else:
f.write(bytes(message['line'], 'utf8'))
cksid = self.db.insertChecksumFile(cksum, message['encrypted'], size
=message['size'], disksize=f.tell())
f.close()
else:
cksid = ckInfo['checksumid']
self.logger.debug("Command Line stored as checksum: %s => %d", cksum, ck
sid)
self.db.setCommandLine(cksid)
response = {
'message': 'ACKCMDLN'
}
return (response, False)
def processMessage(self, message, transaction=True):
""" Dispatch a message to the correct handlers """
messageType = message['message']
# Stats
self.statCommands[messageType] = self.statCommands.get(messageType, 0) +
1
#if transaction:
# self.db.beginTransaction()
if messageType == "DIR":
(response, flush) = self.processDir(message)
elif messageType == "DHSH":
(response, flush) = self.processDirHash(message)
elif messageType == "SGR":
(response, flush) = self.processSigRequest(message)
elif messageType == "SGS":
(response, flush) = self.processManySigsRequest(message)
elif messageType == "SIG":
(response, flush) = self.processSignature(message)
elif messageType == "DEL":
(response, flush) = self.processDelta(message)
elif messageType == "CON":
(response, flush) = self.processContent(message)
elif messageType == "CKS":
(response, flush) = self.processChecksum(message)
elif messageType == "CLN":
(response, flush) = self.processClone(message)
elif messageType == "BATCH":
(response, flush) = self.processBatch(message)
elif messageType == "PRG":
(response, flush) = self.processPurge(message)
elif messageType == "CLICONFIG":
(response, flush) = self.processClientConfig(message)
elif messageType == "COMMANDLINE":
(response, flush) = self.processCommandLine(message)
elif messageType == "META":
(response, flush) = self.processMeta(message)
elif messageType == "METADATA":
(response, flush) = self.processMetaData(message)
elif messageType == "SETKEYS":
(response, flush) = self.processSetKeys(message)
else:
raise Exception("Unknown message type", messageType)
if response and 'msgid' in message:
response['respid'] = message['msgid']
self.db.commit()
return (response, flush)
def genPaths(self):
self.basedir = os.path.join(self.server.basedir, self.client)
dbdir = os.path.join(self.server.dbdir, self.client)
dbname = self.server.dbname.format({'client': self.client})
dbfile = os.path.join(dbdir, dbname)
return (dbdir, dbfile)
def getCacheDir(self, create):
try:
self.logger.debug("Using cache dir: %s", self.basedir)
return CacheDir.CacheDir(self.basedir, 1, 2,
create=(self.server.allowNew and create),
user=self.server.user,
group=self.server.group,
skipFile=self.server.skip)
except CacheDir.CacheDirDoesNotExist as e:
if not self.server.allowNew:
raise InitFailedException("Server does not allow new clients")
else:
raise InitFailedException("Must request new client (--create))")
def getDB(self, client, create):
script = None
ret = "EXISTING"
journal = None
(dbdir, dbfile) = self.genPaths()
if create and os.path.exists(dbfile):
raise InitFailedException("Cannot create client %s. Already exists"
% (client))
self.cache = self.getCacheDir(create)
connid = {'connid': self.idstr }
if not os.path.exists(dbfile):
if not os.path.exists(dbdir):
os.makedirs(dbdir)
self.logger.debug("Initializing database for %s with file %s", clien
t, schemaFile)
script = schemaFile
ret = "NEW"
if self.server.journal:
journal = os.path.join(dbdir, self.server.journal)
self.db = TardisDB.TardisDB(dbfile,
initialize=script,
backup=(self.server.dbbackups > 0),
connid=connid,
user=self.server.user,
group=self.server.group,
numbackups=self.server.dbbackups,
journal=journal,
allow_upgrade = self.server.allowUpgrades)
self.regenerator = Regenerator.Regenerator(self.cache, self.db)
return ret
def setConfig(self):
self.formats = self.server.formats
self.priorities = self.server.priorities
self.keep = self.server.keep
self.forceFull = self.server.forceFull
self.savefull = self.server.savefull
self.maxChain = self.server.maxChain
self.deltaPercent = self.server.deltaPercent
self.autoPurge = self.server.autoPurge
self.saveConfig = self.server.saveConfig
if self.server.allowOverrides:
try:
formats = self.db.getConfigValue('Formats')
priorities = self.db.getConfigValue('Priorities')
keepDays = self.db.getConfigValue('KeepDays')
forceFull = self.db.getConfigValue('ForceFull')
if formats:
self.logger.debug("Overriding global name formats: %s", form
ats)
self.formats = list(map(string.strip, formats.split('
,')))
if priorities:
self.logger.debug("Overriding global priorities: %s", priori
ties)
self.priorities = list(map(int, priorities.split(',')))
if keepDays:
self.logger.debug("Overriding global keep days: %s", keepDay
s)
self.keep = list(map(int, keepDays.split(',')))
if forceFull:
self.logger.debug("Overriding global force full: %s", forceF
ull)
self.forceFull = list(map(int, forceFull.split(',')))
numFormats = len(self.formats)
if len(self.priorities) != numFormats or len(self.keep) != numFo
rmats or len(self.forceFull) != numFormats:
self.logger.warning("Client %s has different sizes for the l
ists of formats: Formats: %d Priorities: %d KeepDays: %d ForceFull: %d",
self.client, len(self.formats), len(self
.priorities), len(self.keep), len(self.forceFull))
savefull = self.db.getConfigValue('SaveFull')
maxChain = self.db.getConfigValue('MaxDeltaChain')
deltaPercent = self.db.getConfigValue('MaxChangePercent')
autoPurge = self.db.getConfigValue('AutoPurge')
saveConfig = self.db.getConfigValue('SaveConfig')
if savefull is not None:
self.logger.debug("Overriding global save full: %s", saveful
l)
self.savefull = bool(savefull)
if maxChain is not None:
self.logger.debug("Overriding global max chain length: %s",
maxChain)
self.maxChain = int(maxChain)
if deltaPercent is not None:
self.logger.debug("Overriding global max change percentage:
%s", deltaPercent)
self.deltaPercent = float(deltaPercent) / 100.0
if autoPurge is not None:
self.logger.debug("Overriding global autopurge value: %s", b
ool(autoPurge))
self.autoPurge = bool(autoPurge)
if saveConfig is not None:
self.logger.debug("Overriding global saveconfig value: %s",
bool(autoPurge))
self.saveconfig = bool(saveconfig)
except Exception as e:
self.logger.error("Client %s: Unable to override global configur
ation: %s", self.client, str(e))
def startSession(self, name, force):
self.name = name
# Check if the previous backup session completed.
prev = self.db.lastBackupSet(completed=False)
if prev['endtime'] is None or self.server.checkSession(prev['session']):
if force:
self.logger.warning("Staring session %s while previous backup st
ill warning: %s", name, prev['name'])
else:
if self.server.checkSession(prev['session']):
raise InitFailedException("Previous backup session still run
ning: {}. Run with --force to force starting the new backup".format(prev['name'
]))
else:
self.logger.warning('Previous session for client %s (%s) did
not complete.', self.client, prev['session'])
# Mark if the last secssion was completed
self.lastCompleted = prev['completed']
self.tempdir = os.path.join(self.basedir, "tmp")
if not os.path.exists(self.tempdir):
os.makedirs(self.tempdir)
def endSession(self):
try:
pass
#if (self.tempdir):
# Clean out the temp dir
#`shutil.rmtree(self.tempdir)
except OSError as error:
self.logger.warning("Unable to delete temporary directory: %s: %s",
self.tempdir, error.strerror)
def calcAutoInfo(self, clienttime):
"""
Calculate a name if autoname is passed in.
"""
starttime = datetime.fromtimestamp(clienttime)
# Walk the automatic naming formats until we find one that's free
for (fmt, prio, keep, full) in zip(self.formats, self.priorities, self.k
eep, self.forceFull):
name = starttime.strftime(fmt)
if self.db.checkBackupSetName(name):
return (name, prio, keep, full)
# Oops, nothing worked. Create a temporary name
name = starttime.strftime("Backup_%Y-%m-%d_%H:%M:%S")
return (name, 0, 0, False)
def mkMessenger(self, sock, encoding, compress): def mkMessenger(self, sock, encoding, compress):
""" """
Create the messenger object to handle communications with the client Create the messenger object to handle communications with the client
""" """
if encoding == "JSON": if encoding == "JSON":
self.messenger = Messages.JsonMessages(sock, compress=compress) return Messages.JsonMessages(sock, compress=compress)
elif encoding == 'MSGP': elif encoding == 'MSGP':
self.messenger = Messages.MsgPackMessages(sock, compress=compress) return Messages.MsgPackMessages(sock, compress=compress)
elif encoding == "BSON": elif encoding == "BSON":
self.messenger = Messages.BsonMessages(sock, compress=compress) return Messages.BsonMessages(sock, compress=compress)
else: else:
message = {"status": "FAIL", "error": "Unknown encoding: {}".format( encoding)} message = {"status": "FAIL", "error": "Unknown encoding: {}".format( encoding)}
sock.sendall(bytes(json.dumps(message), 'utf-8')) sock.sendall(bytes(json.dumps(message), 'utf-8'))
raise InitFailedException("Unknown encoding: ", encoding) raise InitFailedException("Unknown encoding: ", encoding)
def doGetKeys(self):
try:
message = {"status": "NEEDKEYS"}
self.sendMessage(message)
resp = self.recvMessage()
self.checkMessage(resp, "SETKEYS")
filenameKey = resp['filenameKey']
contentKey = resp['contentKey']
srpSalt = resp['srpSalt']
srpVkey = resp['srpVkey']
cryptoScheme = resp['cryptoScheme']
# ret = self.db.setKeys(srpSalt, srpVkey, filenameKey, contentKey)
return(srpSalt, srpVkey, filenameKey, contentKey, cryptoScheme)
except KeyError as e:
raise InitFailedException(e.message)
def doSrpAuthentication(self):
"""
Perform the SPR authentication steps Start with the name and value A pa
ssed in from the
connection call.
"""
self.logger.debug("Beginning Authentication")
try:
cryptoScheme = self.db._getConfigValue('CryptoScheme', '1')
message = {"message": "AUTH", "status": "AUTH", 'cryptoScheme': cryp
toScheme, "client": self.db.clientId}
self.sendMessage(message)
autha = self.recvMessage()
self.checkMessage(autha, "AUTH1")
name = base64.b64decode(autha['srpUname'])
srpValueA = base64.b64decode(autha['srpValueA'])
srpValueS, srpValueB = self.db.authenticate1(name, srpValueA)
if srpValueS is None or srpValueB is None:
raise TardisDB.AuthenticationFailed
self.logger.debug("Sending Challenge values")
message = {
'message': 'AUTH1',
'status': 'OK',
'srpValueS': base64.b64encode(srpValueS),
'srpValueB': base64.b64encode(srpValueB)
}
self.sendMessage(message)
resp = self.recvMessage()
self.logger.debug("Received challenge response")
self.checkMessage(resp, "AUTH2")
srpValueM = base64.b64decode(resp['srpValueM'])
srpValueHAMK = self.db.authenticate2(srpValueM)
message = {
'message': 'AUTH2',
'status': 'OK',
'srpValueHAMK': base64.b64encode(srpValueHAMK)
}
self.logger.debug("Authenticated")
return message
except TardisDB.AuthenticationFailed as e:
message = {
'status': 'AUTHFAIL',
'message': str(e)
}
self.sendMessage(message)
raise e
def handle(self): def handle(self):
started = False started = False
completed = False completed = False
starttime = datetime.now() starttime = datetime.now()
client = ""
if self.server.profiler: if self.server.profiler:
self.logger.info("Starting Profiler") self.logger.info("Starting Profiler")
self.server.profiler.enable() self.server.profiler.enable()
try: try:
sock = self.request sock = self.request
sock.settimeout(args.timeout) sock.settimeout(args.timeout)
if self.server.ssl: if self.server.ssl:
skipping to change at line 1317 skipping to change at line 235
# Should be : { "encoding": "MSGP", "compress": "snappy" } # Should be : { "encoding": "MSGP", "compress": "snappy" }
message = sock.recv(1024) message = sock.recv(1024)
self.logger.debug(message) self.logger.debug(message)
message = str(message, 'utf-8').strip() message = str(message, 'utf-8').strip()
fields = json.loads(message) fields = json.loads(message)
resp = {'status': 'OK'} resp = {'status': 'OK'}
sock.sendall(bytes(json.dumps(resp), 'utf-8')) sock.sendall(bytes(json.dumps(resp), 'utf-8'))
#self.addSession(self.sessionid, fields['host'])
# Create the messenger object. From this point on, ALL communicatio ns should # Create the messenger object. From this point on, ALL communicatio ns should
# go through messenger, not director to the socket # go through messenger, not director to the socket
self.mkMessenger(sock, fields['encoding'], fields['compress']) messenger = self.mkMessenger(sock, fields['encoding'], fields['compr ess'])
try: # Create a backend, and run it.
fields = self.recvMessage() backend = Backend.Backend(messenger, self.server, sessionid=self.ses
messType = fields['message'] sionid)
if not messType == 'BACKUP':
raise InitFailedException("Unknown message type: {}".format(
messType))
client = fields['host'] # TODO: Change at client
as well.
clienttime = fields['time']
version = fields['version']
autoname = fields.get('autoname', True)
name = fields.get('name', None)
full = fields.get('full', False)
priority = fields.get('priority', 0)
force = fields.get('force', False)
create = fields.get('create', False)
self.logger.info("Creating backup for %s: %s (Autoname: %s) %s %
s", client, name, str(autoname), version, clienttime)
except ValueError as e:
raise InitFailedException("Cannot parse JSON field: {}".format(m
essage))
except KeyError as e:
raise InitFailedException(str(e))
self.client = client
self.server.addSession(self.sessionid, client)
serverName = None
serverForceFull = False
authResp = {}
keys = None
try: (started, completed, endtime, orphansRemoved, orphanSize) = backend.
(_, dbfile) = self.genPaths() runBackup()
if create and os.path.exists(dbfile):
raise Exception("Client %s already exists" % client)
elif not create and not os.path.exists(dbfile):
raise Exception("Unknown client: %s" % client)
# If we're creating, and we need keys, do the thing.
if self.server.requirePW and create and self.server.allowNew:
keys = self.doGetKeys()
newBackup = self.getDB(client, create)
if self.server.requirePW and not self.db.needsAuthentication():
raise InitFailedException("Passwords required on this server
. Please add a password (sonic setpass) and encrypt the DB if necessary")
# Store the Cryptography Info
if create:
if keys:
self.logger.debug("Setting keys into new client DB")
(srpSalt, srpVkey, filenameKey, contentKey, cryptoScheme
) = keys
ret = self.db.setKeys(srpSalt, srpVkey, filenameKey, con
tentKey)
self.db.setConfigValue('CryptoScheme', cryptoScheme)
keys = None
else:
self.db.setConfigValue('CryptoScheme', TardisCrypto.noCr
yptoScheme)
self.logger.debug("Ready for authentication")
if self.db.needsAuthentication():
authResp = self.doSrpAuthentication()
disabled = self.db.getConfigValue('Disabled')
if disabled is not None and int(disabled) != 0:
raise InitFailedException("Client %s is currently disabled."
% client)
self.setConfig()
self.startSession(name, force)
# Create a name
if autoname:
(serverName, serverPriority, serverKeepDays, serverForceFull
) = self.calcAutoInfo(clienttime)
self.logger.debug("Setting name, priority, keepdays to %s",
(serverName, serverPriority, serverKeepDays))
if serverName:
self.serverKeepTime = serverKeepDays * 3600 * 24
self.serverPriority = serverPriority
else:
self.serverKeepTime = None
self.serverPriority = None
else:
self.serverKeepTime = None
self.serverPriority = None
# Either the server or the client can specify a full backup.
self.full = full or serverForceFull
if priority is None:
priority = 0
# Create the actual backup set
self.db.newBackupSet(name, self.sessionid, priority, clienttime,
version, self.address, self.full, self.server.serverSessionID)
except Exception as e:
message = {"status": "FAIL", "error": str(e)}
self.sendMessage(message)
if self.server.exceptions:
self.logger.exception(e)
raise InitFailedException(str(e))
response = {
"message": "INIT",
"status": "OK",
"sessionid": self.sessionid,
"prevDate": str(self.db.prevBackupDate),
"new": newBackup,
"name": serverName if serverName else name,
"clientid": str(self.db.clientId)
}
if authResp:
response.update(authResp)
filenameKey = self.db.getConfigValue('FilenameKey')
contentKey = self.db.getConfigValue('ContentKey')
if (filenameKey is None) ^ (contentKey is None):
self.logger.warning("Name Key and Data Key are both not in t
he same state. FilenameKey: %s ContentKey: %s", filenameKey, contentKey)
if filenameKey:
response['filenameKey'] = filenameKey
if contentKey:
response['contentKey'] = contentKey
self.sendMessage(response)
started = True
#sock.sendall("OK {} {} {}".format(str(self.sessionid), str(self.db.
prevBackupDate), serverName if serverName else name))
done = False
while not done:
flush = False
message = self.recvMessage()
if message["message"] == "BYE":
done = True
else:
(response, flush) = self.processMessage(message)
if response:
self.sendMessage(response)
if flush:
self.db.commit()
self.db.completeBackup()
if autoname and serverName is not None:
self.logger.debug("Changing backupset name from %s to %s. Prior
ity is %s", name, serverName, serverPriority)
self.db.setBackupSetName(serverName, serverPriority)
#self.db.renameBackupSet(newName, newPriority)
completed = True
except InitFailedException as e:
self.logger.error("Connection initialization failed: %s", e)
if self.server.exceptions:
self.logger.exception(e)
except Exception as e:
self.logger.error("Caught exception %s: %s", type(e), e)
if self.server.exceptions:
self.logger.exception(e)
finally:
sock.close()
if started:
self.db.setClientEndTime()
# Autopurge if it's set.
if self.autoPurge and not self.purged and completed:
self.processPurge()
self.endSession()
self.db.setStats(self.statNewFiles, self.statUpdFiles, self.stat
BytesReceived)
if self.server.profiler: if self.server.profiler:
self.logger.info("Stopping Profiler") self.logger.info("Stopping Profiler")
self.server.profiler.disable() self.server.profiler.disable()
s = io.StringIO() s = io.StringIO()
sortby = 'cumulative' sortby = 'cumulative'
ps = pstats.Stats(self.server.profiler, stream=s).sort_stats(sor tby) ps = pstats.Stats(self.server.profiler, stream=s).sort_stats(sor tby)
ps.print_stats() ps.print_stats()
print(s.getvalue()) print(s.getvalue())
except InitFailedException as e:
self.logger.error("Connection initialization failed: %s", e)
if self.server.exceptions:
self.logger.exception(e)
except Exception as e:
self.logger.error("Caught exception %s: %s", type(e), e)
if self.server.exceptions:
self.logger.exception(e)
finally:
if started: if started:
(count, size, _) = Util.removeOrphans(self.db, self.cache)
endtime = datetime.now()
self.logger.info("Connection completed successfully: %s Runtime : %s", str(completed), str(endtime - starttime)) self.logger.info("Connection completed successfully: %s Runtime : %s", str(completed), str(endtime - starttime))
self.logger.info("New or replaced files: %d", self.statNewFil self.logger.info("New or replaced files: %d", backend.statNew
es) Files)
self.logger.info("Updated files: %d", self.statUpdFil self.logger.info("Updated files: %d", backend.statUpd
es) Files)
self.logger.info("Total file data received: %s (%d)", Util.fmtSi self.logger.info("Total file data received: %s (%d)", Util.fmtSi
ze(self.statBytesReceived), self.statBytesReceived) ze(backend.statBytesReceived), backend.statBytesReceived)
self.logger.info("Command breakdown: %s", self.statComman self.logger.info("Command breakdown: %s", backend.statCom
ds) mands)
self.logger.info("Purged Sets and File: %d %d", self.statPur self.logger.info("Purged Sets and File: %d %d", backend.stat
gedSets, self.statPurgedFiles) PurgedSets, backend.statPurgedFiles)
self.logger.info("Removed Orphans %d (%s)", count, Uti self.logger.info("Removed Orphans %d (%s)", orphansRem
l.fmtSize(size)) oved, Util.fmtSize(orphanSize))
self.logger.debug("Removing orphans")
self.db.commit()
self.db.compact()
if self.db:
self.db.close(started)
self.logger.info("Session from %s {%s} Ending: %s: %s", client, self.ses sionid, str(completed), str(datetime.now() - starttime)) self.logger.info("Session from %s {%s} Ending: %s: %s", backend.clie nt, self.sessionid, str(completed), str(datetime.now() - starttime))
class TardisServer(object): class TardisServer(object):
# HACK. Operate on an object, but not in the class. # HACK. Operate on an object, but not in the class.
# Want to do this in multiple classes. # Want to do this in multiple classes.
def __init__(self): def __init__(self):
self.basedir = args.database self.basedir = args.database
if args.dbdir: if args.dbdir:
self.dbdir = args.dbdir self.dbdir = args.dbdir
else: else:
self.dbdir = self.basedir self.dbdir = self.basedir
skipping to change at line 1539 skipping to change at line 296
self.deltaPercent = float(config.getint(configSection, 'MaxChangePerce nt')) / 100.0 # Convert to a ratio self.deltaPercent = float(config.getint(configSection, 'MaxChangePerce nt')) / 100.0 # Convert to a ratio
self.cksContent = config.getint(configSection, 'CksContent') self.cksContent = config.getint(configSection, 'CksContent')
self.dbname = args.dbname self.dbname = args.dbname
self.allowNew = args.newhosts self.allowNew = args.newhosts
self.schemaFile = args.schema self.schemaFile = args.schema
self.journal = args.journal self.journal = args.journal
self.linkBasis = config.getboolean(configSection, 'LinkBasis') self.linkBasis = config.getboolean(configSection, 'LinkBasis')
self.timeout = args.timeout
self.requirePW = config.getboolean(configSection, 'RequirePassword' ) self.requirePW = config.getboolean(configSection, 'RequirePassword' )
self.allowOverrides = config.getboolean(configSection, 'AllowClientOverr ides') self.allowOverrides = config.getboolean(configSection, 'AllowClientOverr ides')
self.allowUpgrades = config.getboolean(configSection, 'AllowSchemaUpgra des') self.allowUpgrades = config.getboolean(configSection, 'AllowSchemaUpgra des')
self.formats = list(map(str.strip, config.get(configSection, 'For mats').split(','))) self.formats = list(map(str.strip, config.get(configSection, 'For mats').split(',')))
self.priorities = list(map(int, config.get(configSection, 'Prioritie s').split(','))) self.priorities = list(map(int, config.get(configSection, 'Prioritie s').split(',')))
self.keep = list(map(int, config.get(configSection, 'KeepDays' ).split(','))) self.keep = list(map(int, config.get(configSection, 'KeepDays' ).split(',')))
self.forceFull = list(map(int, config.get(configSection, 'ForceFull ').split(','))) self.forceFull = list(map(int, config.get(configSection, 'ForceFull ').split(',')))
self.timeout = args.timeout
numFormats = len(self.formats) numFormats = len(self.formats)
if len(self.priorities) != numFormats or len(self.keep) != numFormats or len(self.forceFull) != numFormats: if len(self.priorities) != numFormats or len(self.keep) != numFormats or len(self.forceFull) != numFormats:
logger.warning("Different sizes for the lists of formats: Formats: % d Priorities: %d KeepDays: %d ForceFull: %d", logger.warning("Different sizes for the lists of formats: Formats: % d Priorities: %d KeepDays: %d ForceFull: %d",
len(self.formats), len(self.priorities), len(self.kee p), len(self.forceFull)) len(self.formats), len(self.priorities), len(self.kee p), len(self.forceFull))
self.dbbackups = config.getint(configSection, 'DBBackups') self.dbbackups = config.getint(configSection, 'DBBackups')
self.exceptions = args.exceptions self.exceptions = args.exceptions
self.umask = Util.parseInt(config.get(configSection, 'Umask')) self.umask = Util.parseInt(config.get(configSection, 'Umask'))
skipping to change at line 1595 skipping to change at line 352
self.keyfile = args.keyfile self.keyfile = args.keyfile
# Create a session ID # Create a session ID
self.serverSessionID = str(uuid.uuid1()) self.serverSessionID = str(uuid.uuid1())
if args.profile: if args.profile:
self.profiler = cProfile.Profile() self.profiler = cProfile.Profile()
else: else:
self.profiler = None self.profiler = None
def addSession(self, sessionId, client):
self.sessions[sessionId] = client
def rmSession(self, sessionId):
del self.sessions[sessionId]
def checkSession(self, sessionId):
return sessionId in self.sessions
#class TardisSocketServer(SocketServer.TCPServer): #class TardisSocketServer(SocketServer.TCPServer):
class TardisSocketServer(socketserver.ThreadingMixIn, socketserver.TCPServer, Ta rdisServer): class TardisSocketServer(socketserver.ThreadingMixIn, socketserver.TCPServer, Ta rdisServer):
def __init__(self): def __init__(self):
socketserver.TCPServer.__init__(self, ("", args.port), TardisServerHandl er) socketserver.TCPServer.__init__(self, ("", args.port), TardisServerHandl er)
TardisServer.__init__(self) TardisServer.__init__(self)
logger.info("TCP Server %s Running", Tardis.__versionstring__) logger.info("TCP Server %s Running", Tardis.__versionstring__)
class TardisSingleThreadedSocketServer(socketserver.TCPServer, TardisServer): class TardisSingleThreadedSocketServer(socketserver.TCPServer, TardisServer):
def __init__(self): def __init__(self):
skipping to change at line 1640 skipping to change at line 388
logging.raiseExceptions = False logging.raiseExceptions = False
if args.logcfg: if args.logcfg:
logging.config.fileConfig(args.logcfg) logging.config.fileConfig(args.logcfg)
logger = logging.getLogger('') logger = logging.getLogger('')
else: else:
logger = logging.getLogger('') logger = logging.getLogger('')
if args.logfile or args.daemon: if args.logfile or args.daemon:
logFormat = logging.Formatter("%(asctime)s %(levelname)s : %(message )s") logFormat = logging.Formatter("%(asctime)s %(levelname)s : %(message )s")
else: else:
logFormat = colorlog.TTYColoredFormatter("%(asctime)s %(log_color)s% # Create some default colors
(levelname)s%(reset)s : %(message)s", stream=sys.stdout) colors = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
logFormat = colorlog.TTYColoredFormatter("%(asctime)s %(log_color)s%
(levelname)s%(reset)s : %(message)s", log_colors=colors, stream=sys.stdout)
verbosity = args.verbose verbosity = args.verbose
if args.local: if args.local:
# Always send output to stderr for local connections # Always send output to stderr for local connections
handler = logging.StreamHandler() handler = logging.StreamHandler()
elif args.logfile: elif args.logfile:
handler = logging.handlers.WatchedFileHandler(args.logfile) handler = logging.handlers.WatchedFileHandler(args.logfile)
elif args.daemon: elif args.daemon:
handler = logging.handlers.SysLogHandler() handler = logging.handlers.SysLogHandler()
skipping to change at line 1662 skipping to change at line 418
handler = logging.StreamHandler() handler = logging.StreamHandler()
handler.setFormatter(logFormat) handler.setFormatter(logFormat)
logger.addHandler(handler) logger.addHandler(handler)
loglevel = levels[verbosity] if verbosity < len(levels) else levels[-1] loglevel = levels[verbosity] if verbosity < len(levels) else levels[-1]
logger.setLevel(loglevel) logger.setLevel(loglevel)
return logger return logger
def run_server(): def runServer():
global server global server
try: try:
if args.reuseaddr: if args.reuseaddr:
# Allow reuse of the address before timeout if requested. # Allow reuse of the address before timeout if requested.
socketserver.TCPServer.allow_reuse_address = True socketserver.TCPServer.allow_reuse_address = True
if args.local: if args.local:
logger.info("Starting Server. Socket: %s", args.local) logger.info("Starting Server. Socket: %s", args.local)
server = TardisDomainSocketServer() server = TardisDomainSocketServer()
skipping to change at line 1695 skipping to change at line 451
try: try:
server.serve_forever() server.serve_forever()
except: except:
logger.info("Socket server completed") logger.info("Socket server completed")
logger.info("Ending") logger.info("Ending")
except Exception as e: except Exception as e:
logger.critical("Unable to run server: {}".format(e)) logger.critical("Unable to run server: {}".format(e))
if args.exceptions: if args.exceptions:
logger.exception(e) logger.exception(e)
def stop_server(): def stopServer():
logger.info("Stopping server") logger.info("Stopping server")
server.shutdown() server.shutdown()
def signal_term_handler(signal, frame): def signalTermHandler(signal, frame):
logger.info("Caught term signal. Stopping") logger.info("Caught term signal. Stopping")
t = threading.Thread(target = shutdownHandler) t = threading.Thread(target = shutdownHandler)
t.start() t.start()
logger.info("Server stopped") logger.info("Server stopped")
def shutdownHandler(): def shutdownHandler():
stop_server() stopServer()
def processArgs(): def processArgs():
parser = argparse.ArgumentParser(description='Tardis Backup Server', formatt er_class=Util.HelpFormatter, add_help=False) parser = argparse.ArgumentParser(description='Tardis Backup Server', formatt er_class=Util.HelpFormatter, add_help=False)
parser.add_argument('--config', dest='config', default=configName, h elp="Location of the configuration file (Default: %(default)s)") parser.add_argument('--config', dest='config', default=configName, h elp="Location of the configuration file (Default: %(default)s)")
(args, remaining) = parser.parse_known_args() (args, remaining) = parser.parse_known_args()
t = configSection t = configSection
config = configparser.RawConfigParser(configDefaults, default_section='Tardi s') config = configparser.RawConfigParser(configDefaults, default_section='Tardi s')
config.add_section(t) # Make it safe for reading other val ues from. config.add_section(t) # Make it safe for reading other val ues from.
skipping to change at line 1768 skipping to change at line 524
Util.addGenCompletions(parser) Util.addGenCompletions(parser)
args = parser.parse_args(remaining) args = parser.parse_args(remaining)
return(args, config) return(args, config)
def main(): def main():
global logger, args, config global logger, args, config
(args, config) = processArgs() (args, config) = processArgs()
# Set up a handler # Set up a handler
signal.signal(signal.SIGTERM, signal_term_handler) signal.signal(signal.SIGTERM, signalTermHandler)
try: try:
logger = setupLogging() logger = setupLogging()
except Exception as e: except Exception as e:
print("Unable to initialize logging: {}".format(str(e)), file=sys.stderr ) print("Unable to initialize logging: {}".format(str(e)), file=sys.stderr )
if args.exceptions: if args.exceptions:
traceback.print_exc() traceback.print_exc()
sys.exit(1) sys.exit(1)
if args.daemon and not args.local: if args.daemon and not args.local:
user = args.user user = args.user
group = args.group group = args.group
pidfile = args.pidfile pidfile = args.pidfile
fds = [h.stream.fileno() for h in logger.handlers if isinstance(h, loggi ng.StreamHandler)] fds = [h.stream.fileno() for h in logger.handlers if isinstance(h, loggi ng.StreamHandler)]
logger.info("About to daemonize") logger.info("About to daemonize")
try: try:
daemon = daemonize.Daemonize(app="tardisd", pid=pidfile, action=run_ server, user=user, group=group, keep_fds=fds) daemon = daemonize.Daemonize(app="tardisd", pid=pidfile, action=runS erver, user=user, group=group, keep_fds=fds)
daemon.start() daemon.start()
except Exception as e: except Exception as e:
logger.critical("Caught Exception on Daemonize call: {}".format(e)) logger.critical("Caught Exception on Daemonize call: {}".format(e))
if args.exceptions: if args.exceptions:
logger.exception(e) logger.exception(e)
else: else:
try: try:
run_server() runServer()
except KeyboardInterrupt: except KeyboardInterrupt:
logger.warning("Killed by Keyboard") logger.warning("Killed by Keyboard")
pass pass
except Exception as e: except Exception as e:
logger.critical("Unable to run server: {}".format(e)) logger.critical("Unable to run server: {}".format(e))
if args.exceptions: if args.exceptions:
logger.exception(e) logger.exception(e)
if __name__ == "__main__": if __name__ == "__main__":
try: try:
 End of changes. 39 change blocks. 
1454 lines changed or deleted 59 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)