"Fossies" - the Fresh Open Source Software Archive

Member "Tardis-1.2.1/src/Tardis/TardisDB.py" (9 Jun 2021, 57651 Bytes) of package /linux/privat/Tardis-1.2.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "TardisDB.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 1.1.5_vs_1.2.1.

    1 # vim: set et sw=4 sts=4 fileencoding=utf-8:
    2 #
    3 # Tardis: A Backup System
    4 # Copyright 2013-2020, Eric Koldinger, All Rights Reserved.
    5 # kolding@washington.edu
    6 #
    7 # Redistribution and use in source and binary forms, with or without
    8 # modification, are permitted provided that the following conditions are met:
    9 #
   10 #     * Redistributions of source code must retain the above copyright
   11 #       notice, this list of conditions and the following disclaimer.
   12 #     * Redistributions in binary form must reproduce the above copyright
   13 #       notice, this list of conditions and the following disclaimer in the
   14 #       documentation and/or other materials provided with the distribution.
   15 #     * Neither the name of the copyright holder nor the
   16 #       names of its contributors may be used to endorse or promote products
   17 #       derived from this software without specific prior written permission.
   18 #
   19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
   20 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
   23 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29 # POSSIBILITY OF SUCH DAMAGE.
   30 
   31 import sqlite3
   32 import logging
   33 import os
   34 import os.path
   35 import time
   36 import sys
   37 import uuid
   38 import srp
   39 import functools
   40 import importlib
   41 import gzip
   42 
   43 from binascii import hexlify, unhexlify
   44 
   45 import Tardis
   46 import Tardis.ConnIdLogAdapter as ConnIdLogAdapter
   47 import Tardis.Rotator as Rotator
   48 
   49 # Exception classes
   50 class AuthenticationException(Exception):
   51     pass
   52 
   53 class AuthenticationFailed(AuthenticationException):
   54     pass
   55 
   56 class NotAuthenticated(AuthenticationException):
   57     pass
   58 
   59 # Utility functions
   60 def authenticate(func):
   61     @functools.wraps(func)
   62     def doit(self, *args, **kwargs):
   63         if self._isAuthenticated():
   64             return func(self, *args, **kwargs)
   65         else:
   66             raise NotAuthenticated("Not authenticated to database.")
   67     return doit
   68 
   69 # Be sure to end all these lists with a space.
   70 
   71 _fileInfoFields =  "Name AS name, Inode AS inode, Device AS device, Dir AS dir, Link AS link, " \
   72                    "Parent AS parent, ParentDev AS parentdev, C1.Size AS size, " \
   73                    "MTime AS mtime, CTime AS ctime, ATime AS atime, Mode AS mode, UID AS uid, GID AS gid, NLinks AS nlinks, " \
   74                    "FirstSet AS firstset, LastSet AS lastset, C1.Checksum AS checksum, C1.ChainLength AS chainlength, " \
   75                    "C2.Checksum AS xattrs, C3.Checksum AS acl "
   76 
   77 _fileInfoJoin =    "FROM Files " \
   78                    "JOIN Names ON Files.NameId = Names.NameId " \
   79                    "LEFT OUTER JOIN Checksums AS C1 ON Files.ChecksumId = C1.ChecksumId " \
   80                    "LEFT OUTER JOIN Checksums AS C2 ON Files.XattrId = C2.ChecksumId " \
   81                    "LEFT OUTER JOIN Checksums AS C3 ON Files.AclId = C3.ChecksumId "
   82 
   83 _backupSetInfoFields = "BackupSet AS backupset, StartTime AS starttime, EndTime AS endtime, ClientTime AS clienttime, " \
   84                        "Priority AS priority, Completed AS completed, Session AS session, Name AS name, " \
   85                        "ClientVersion AS clientversion, ClientIP AS clientip, ServerVersion AS serverversion, Full AS full, " \
   86                        "FilesFull AS filesfull, FilesDelta AS filesdelta, BytesReceived AS bytesreceived, Checksum AS commandline, "\
   87                        "Exception AS exception, ErrorMsg AS errormsg "
   88 
   89 _backupSetInfoJoin = "FROM Backups LEFT OUTER JOIN Checksums ON Checksums.ChecksumID = Backups.CmdLineId "
   90 
   91 _checksumInfoFields = "Checksum AS checksum, ChecksumID AS checksumid, Basis AS basis, Encrypted AS encrypted, " \
   92                       "Size AS size, DeltaSize AS deltasize, DiskSize AS disksize, IsFile AS isfile, Compressed AS compressed, ChainLength AS chainlength "
   93 
   94 _schemaVersion = 18
   95 
   96 def _addFields(x, y):
   97     """ Add fields to the end of a dict """
   98     return dict(list(y.items()) + x)
   99 
  100 def _splitpath(path):
  101     """ Split a path into chunks, recursively """
  102     (head, tail) = os.path.split(path)
  103     return _splitpath(head) + [ tail ] if head and head != path else [ head or tail ]
  104 
  105 def _fetchEm(cursor):
  106     while True:
  107         batch = cursor.fetchmany(10000)
  108         if not batch:
  109             break
  110         for row in batch:
  111             yield row
  112 
  113 conversionModules = {}
  114 
  115 # Class TardisDB
  116 
  117 class TardisDB(object):
  118     """ Main source for all interaction with the Tardis DB """
  119     conn    = None
  120     cursor  = None
  121     dbName  = None
  122     db              = None
  123     currBackupSet   = None
  124     prevBackupSet   = None
  125     dirinodes       = {}
  126     backup          = False
  127     clientId        = None
  128     chunksize       = 1000
  129     journal         = None
  130     srpSrv          = None
  131     authenticated   = False
  132 
  133     def __init__(self, dbname, backup=False, prevSet=None, initialize=None, connid=None, user=-1, group=-1, chunksize=1000, numbackups=2, journal=None, allow_upgrade=False, check_threads=True):
  134         """ Initialize the connection to a per-machine Tardis Database"""
  135         self.logger  = logging.getLogger("DB")
  136         self.logger.debug("Initializing connection to %s", dbname)
  137         self.dbName = dbname
  138         self.chunksize = chunksize
  139         self.prevSet = prevSet
  140         self.journalName = journal
  141         self.allow_upgrade = allow_upgrade
  142 
  143         if user  is None: user = -1
  144         if group is None: group = -1
  145 
  146         self.user = user
  147         self.group = group
  148 
  149         if connid:
  150             self.logger = ConnIdLogAdapter.ConnIdLogAdapter(self.logger, connid)
  151 
  152         self.backup = backup
  153         self.numbackups = numbackups
  154 
  155         conn = sqlite3.connect(self.dbName, check_same_thread=check_threads)
  156         conn.text_factory = lambda x: x.decode('utf-8', 'backslashreplace')
  157         conn.row_factory= sqlite3.Row
  158 
  159         self.conn = conn
  160         self.cursor = self.conn.cursor()
  161 
  162         if initialize:
  163             self.logger.info("Creating database from schema: %s", initialize)
  164             try:
  165                 with open(initialize, "r") as f:
  166                     script = f.read()
  167                     self.conn.executescript(script)
  168             except IOError as e:
  169                 self.logger.error("Could not read initialization script %s", initialize)
  170                 #self.logger.exception(e)
  171                 raise
  172             except sqlite3.Error as e:
  173                 self.logger.error("Could not execute initialization script %s", initialize)
  174                 #self.logger.exception(e)
  175                 raise
  176             self._setConfigValue('ClientID', str(uuid.uuid1()))
  177             newDB = True
  178         else:
  179             newDB = False
  180 
  181         # Start authentication here.
  182         self.logger.debug("Authentication status: %s %s", not newDB, self.needsAuthentication())
  183         if newDB or not self.needsAuthentication():
  184             self.logger.debug("Setting authenticated true")
  185             self.authenticated = True
  186             self._completeInit()
  187         else:
  188             self.logger.debug("Setting authenticated false")
  189             self.authenticated = False
  190             
  191     
  192     def needsAuthentication(self):
  193         """ Return true if a database needs to be authenticated """
  194         salt, vkey = self.getSrpValues()
  195         if salt:
  196             return True
  197         else:
  198             return False
  199 
  200     def authenticate1(self, uname, srpValueA):
  201         salt, vkey = self.getSrpValues()
  202         if salt is None or vkey is None:
  203             raise AuthenticationFailed("Password doesn't match")
  204         #self.logger.debug("Beginning authentication: %s %s %s %s", hexlify(uname), hexlify(salt), hexlify(vkey), hexlify(srpValueA))
  205         self.srpSrv = srp.Verifier(uname, salt, vkey, srpValueA)
  206         s, B = self.srpSrv.get_challenge()
  207         if s is None or B is None:
  208             raise AuthenticationFailed("Password doesn't match")
  209         #self.logger.debug("Authentication Challenge: %s %s", hexlify(s), hexlify(B))
  210         return s, B
  211 
  212     def authenticate2(self, srpValueM):
  213         self.logger.debug("Authentication 2: Verify Session %s", hexlify(srpValueM))
  214         HAMK = self.srpSrv.verify_session(srpValueM)
  215         if HAMK is None:
  216             raise AuthenticationFailed("Password doesn't match")
  217         self.logger.debug("Authentication HAMK: %s", hexlify(HAMK))
  218         if not self.srpSrv.authenticated():
  219             raise AuthenticationFailed("Password doesn't match")
  220         self.authenticated = True
  221         self._completeInit()
  222         return HAMK
  223 
  224     def _completeInit(self):
  225         self.logger.debug("Completing DB Init")
  226 
  227         version = self._getConfigValue('SchemaVersion')
  228         if int(version) != _schemaVersion:
  229             if self.allow_upgrade:
  230                 self.logger.warning("Schema version mismatch: Upgrading.  Database %s is %d:  Expected %d.", self.dbName, int(version), _schemaVersion)
  231                 self.upgradeSchema(int(version))
  232             else:
  233                 self.logger.error("Schema version mismatch: Database %s is %d:  Expected %d.   Please convert", self.dbName, int(version), _schemaVersion)
  234                 raise Exception("Schema version mismatch: Database {} is {}:  Expected {}.   Please convert".format(self.dbName, version, _schemaVersion))
  235 
  236         if self.prevSet:
  237             f = self.getBackupSetInfo(self.prevSet)
  238             if f:
  239                 self.prevBackupSet  = f['backupset']
  240                 self.prevBackupDate = f['starttime']
  241                 self.lastClientTime = f['clienttime']
  242                 self.prevBackupName = self.prevSet
  243             #self.cursor.execute = ("SELECT Name, BackupSet FROM Backups WHERE Name = :backup", {"backup": prevSet})
  244         else:
  245             b = self.lastBackupSet()
  246             self.prevBackupName = b['name']
  247             self.prevBackupSet  = b['backupset']
  248             self.prevBackupDate = b['starttime']
  249             self.lastClientTime = b['clienttime']
  250             #self.cursor.execute("SELECT Name, BackupSet FROM Backups WHERE Completed = 1 ORDER BY BackupSet DESC LIMIT 1")
  251 
  252         self.clientId = self.getConfigValue('ClientID')
  253 
  254         self.logger.debug("Last Backup Set: %s %d ", self.prevBackupName, self.prevBackupSet)
  255 
  256         self.conn.commit()
  257 
  258         self.conn.execute("PRAGMA synchronous=false")
  259         self.conn.execute("PRAGMA foreignkeys=true")
  260 
  261         if self.journalName:
  262             if self.journalName.endswith('.gz'):
  263                 self.journal = gzip.open(self.journalName, 'at')
  264             else:
  265                 self.journal = open(self.journalName, 'a')
  266 
  267         # Make sure the permissions are set the way we want, if that's specified.
  268         if self.user != -1 or self.group != -1:
  269             os.chown(self.dbName, self.user, self.group)
  270 
  271     def _bset(self, current):
  272         """ Determine the backupset we're being asked about.
  273             True == current, False = previous, otherwise a number is returned
  274         """
  275         if type(current) is bool:
  276             return self.currBackupSet if current else self.prevBackupSet
  277         else:
  278             return current
  279 
  280     def _getConverter(self, name):
  281         try:
  282             converter = conversionModules[name]
  283         except KeyError:
  284             converter = importlib.import_module('Tardis.Converters.' + name)
  285             conversionModules[name] = converter
  286         return converter
  287 
  288     def upgradeSchema(self, baseVersion):
  289         for i in range(baseVersion, _schemaVersion):
  290             name = 'convert%dto%d' % (i, i + 1)
  291             #from schema import name name
  292             converter = self._getConverter(name)
  293             self.logger.debug("Running conversion script from version %d, %s", i, name)
  294             converter.upgrade(self.conn, self.logger)
  295             self.logger.warning("Upgraded schema to version %d", i + 1)
  296 
  297     @authenticate
  298     def lastBackupSet(self, completed=True):
  299         """ Select the last backup set. """
  300         if completed:
  301             c = self.cursor.execute("SELECT " +
  302                                     _backupSetInfoFields +
  303                                     _backupSetInfoJoin +
  304                                     "WHERE Completed = 1 ORDER BY BackupSet DESC LIMIT 1")
  305         else:
  306             c = self.cursor.execute("SELECT " +
  307                                     _backupSetInfoFields +
  308                                     _backupSetInfoJoin +
  309                                     "ORDER BY BackupSet DESC LIMIT 1")
  310         row = c.fetchone()
  311         return row
  312 
  313     def _execute(self, query, data):
  314         try:
  315             ret = self.conn.execute(query, data)
  316             return ret
  317         except sqlite3.IntegrityError as e:
  318             self.logger.warning("Error processing data: %s %s", data, e)
  319             raise e
  320 
  321     def _executeWithResult(self, query, data):
  322         c = self._execute(query, data)
  323         r = c.fetchone()
  324         return r
  325 
  326     @authenticate
  327     def newBackupSet(self, name, session, priority, clienttime, version=None, ip=None, full=False, serverID=None):
  328         """ Create a new backupset.  Set the current backup set to be that set. """
  329         c = self.cursor
  330         now = time.time()
  331         try:
  332             c.execute("INSERT INTO Backups (Name, Completed, StartTime, Session, Priority, Full, ClientTime, ClientVersion, ServerVersion, SchemaVersion, ClientIP, ServerSession) "
  333                       "             VALUES (:name, 0, :now, :session, :priority, :full, :clienttime, :clientversion, :serverversion, :schemaversion, :clientip, :serversessionid)",
  334                       {"name": name, "now": now, "session": session, "priority": priority, "full": full,
  335                        "clienttime": clienttime, "clientversion": version, "clientip": ip, "schemaversion": _schemaVersion,
  336                        "serversessionid": serverID,
  337                        "serverversion": (Tardis.__buildversion__ or Tardis.__version__)})
  338         except sqlite3.IntegrityError:
  339             raise Exception("Backupset {} already exists".format(name))
  340 
  341         self.currBackupSet = c.lastrowid
  342 
  343         if name == None:
  344             name = "INCOMPLETE-{}".format(self.currBackupSet)
  345             self.setBackupSetName(name, priority)
  346 
  347         self.currBackupName = name
  348         self.conn.commit()
  349         self.logger.info("Created new backup set: %d: %s %s", self.currBackupSet, name, session)
  350         if self.journal:
  351             self.journal.write("===== S: {} {} {} D: {} V:{} {}\n".format(self.currBackupSet, name, session, time.strftime("%Y-%m-%d %H:%M:%S"), version, Tardis.__buildversion__))
  352 
  353         return self.currBackupSet
  354 
  355     @authenticate
  356     def setBackupSetName(self, name, priority, current=True):
  357         """ Change the name of a backupset.  Return True if it can be changed, false otherwise. """
  358         backupset = self._bset(current)
  359         try:
  360             self.conn.execute("UPDATE Backups SET Name = :name, Priority = :priority WHERE BackupSet = :backupset",
  361                               {"name": name, "priority": priority, "backupset": backupset})
  362             return True
  363         except sqlite3.IntegrityError:
  364             return False
  365 
  366     @authenticate
  367     def setClientConfig(self, config, current=True):
  368         """ Store the full client configuration in the database """
  369         backupset = self._bset(current)
  370         r = self._executeWithResult("SELECT ClientConfigID FROM ClientConfig WHERE ClientConfig = :config", {"config": config})
  371         if r is None:
  372             c = self._execute("INSERT INTO ClientConfig (ClientConfig) VALUES (:config)", {"config": config})
  373             clientConfigId = c.lastrowid
  374         else:
  375             clientConfigId = r[0]
  376         self._execute("UPDATE Backups SET ClientConfigID = :configId WHERE BackupSet = :backupset", {"configId": clientConfigId, "backupset": backupset})
  377 
  378     @authenticate
  379     def setCommandLine(self, cksum, current=True):
  380         """ Set a command line variable in the database """
  381         backupset = self._bset(current)
  382         self._execute("UPDATE Backups SET CmdLineID = :cksid WHERE BackupSet = :backupset", {'cksid': cksum, 'backupset': backupset})
  383 
  384     @authenticate
  385     def checkBackupSetName(self, name):
  386         """ Check to see if a backupset by this name exists. Return TRUE if it DOESN'T exist. """
  387         c = self.conn.execute("SELECT COUNT(*) FROM Backups WHERE Name = :name",
  388                               { "name": name })
  389         row = c.fetchone()
  390         return True if row[0] == 0 else False
  391 
  392     @authenticate
  393     def getFileInfoByName(self, name, parent, current=True):
  394         """ Lookup a file in a directory in the previous backup set"""
  395         backupset = self._bset(current)
  396         (inode, device) = parent
  397         self.logger.debug("Looking up file by name {} {} {}".format(name, parent, backupset))
  398         c = self.cursor
  399         c.execute("SELECT " +
  400                   _fileInfoFields +
  401                   #"FROM Files "
  402                   #"JOIN Names ON Files.NameId = Names.NameId "
  403                   #"LEFT OUTER JOIN Checksums ON Files.ChecksumId = Checksums.ChecksumId "
  404                   _fileInfoJoin +
  405                   "WHERE Name = :name AND Parent = :parent AND ParentDev = :parentDev AND "
  406                   ":backup BETWEEN FirstSet AND LastSet",
  407                   {"name": name, "parent": inode, "parentDev": device, "backup": backupset})
  408         return c.fetchone()
  409 
  410     @authenticate
  411     def getFileInfoByPath(self, path, current=False, permchecker=None):
  412         """ Lookup a file by a full path. """
  413         ### TODO: Could be a LOT faster without the repeated calls to getFileInfoByName
  414         backupset = self._bset(current)
  415         self.logger.debug("Looking up file by path {} {}".format(path, backupset))
  416         parent = (0, 0)         # Root directory value
  417         info = None
  418 
  419         #(dirname, name) = os.path.split(path)
  420         # Walk the path
  421         for name in _splitpath(path):
  422             if name == '/':
  423                 continue
  424             info = self.getFileInfoByName(name, parent, backupset)
  425             if info:
  426                 parent = (info["inode"], info["device"])
  427                 if permchecker:
  428                     if not permchecker(info['uid'], info['gid'], info['mode']):
  429                         raise Exception("File permission denied: " + name)
  430             else:
  431                 break
  432         return info
  433 
  434     @authenticate
  435     def getFileInfoByPathForRange(self, path, first, last, permchecker=None):
  436         sets = self._execute('SELECT BackupSet FROM Backups WHERE BackupSet BETWEEN :first AND :last ORDER BY BackupSet ASC', {'first': first, 'last': last})
  437         for row in sets.fetchall():
  438             yield (row[0], self.getFileInfoByPath(path, row[0], permchecker))
  439 
  440     @authenticate
  441     def getFileInfoForPath(self, path, current=False):
  442         """ Return the FileInfo structures for each file along a path """
  443         backupset = self._bset(current)
  444         #self.logger.debug("Looking up file by path {} {}".format(path, backupset))
  445         parent = (0, 0)         # Root directory value
  446         info = None
  447         for name in _splitpath(path):
  448             if name == '/':
  449                 continue
  450             info = self.getFileInfoByName(name, parent, backupset)
  451             if info:
  452                 yield info
  453                 parent = (info["inode"], info["device"])
  454             else:
  455                 break
  456 
  457     @authenticate
  458     def getFileInfoByInode(self, info, current=False):
  459         backupset = self._bset(current)
  460         (inode, device) = info
  461         self.logger.debug("Looking up file by inode (%d %d) %d", inode, device, backupset)
  462         c = self.cursor
  463         c.execute("SELECT " +
  464                   _fileInfoFields + _fileInfoJoin +
  465                   "WHERE Inode = :inode AND Device = :device AND "
  466                   ":backup BETWEEN FirstSet AND LastSet",
  467                   {"inode": inode, "device": device, "backup": backupset})
  468         return c.fetchone()
  469 
  470     @authenticate
  471     def getFileInfoBySimilar(self, fileInfo, current=False):
  472         """ Find a file which is similar, namely the same size, inode, and mtime.  Identifies files which have moved. """
  473         backupset = self._bset(current)
  474         self.logger.debug("Looking up file for similar info: %s", fileInfo)
  475         temp = fileInfo.copy()
  476         temp["backup"] = backupset
  477         c = self.cursor.execute("SELECT " +
  478                                 _fileInfoFields + _fileInfoJoin +
  479                                 "WHERE Inode = :inode AND Device = :dev AND Mtime = :mtime AND C1.Size = :size AND "
  480                                 ":backup BETWEEN Files.FirstSet AND Files.LastSet",
  481                                 temp)
  482         return c.fetchone()
  483 
  484     @authenticate
  485     def getFileInfoByChecksum(self, checksum, current=False):
  486         """ Find a file which is similar, namely the same size, inode, and mtime.  Identifies files which have moved. """
  487         backupset = self._bset(current)
  488         self.logger.debug("Looking up file for similar info: %s", checksum)
  489         c = self.cursor.execute("SELECT " + 
  490                                  _fileInfoFields + _fileInfoJoin +
  491                                  "WHERE C1.Checksum = :cksum AND :backup BETWEEN Files.FirstSet AND Files.LastSet",
  492                                  {'cksum': checksum, 'backup': backupset})
  493         while True:
  494             batch = c.fetchmany(self.chunksize)
  495             if not batch:
  496                 break
  497             for row in batch:
  498                 yield row
  499 
  500     @authenticate
  501     def getFileFromPartialBackup(self, fileInfo):
  502         """ Find a file which is similar, namely the same size, inode, and mtime.  Identifies files which have moved. """
  503         #self.logger.debug("Looking up file for similar info: %s", fileInfo)
  504         temp = fileInfo.copy()
  505         temp["backup"] = self.prevBackupSet         ### Only look for things newer than the last backup set
  506         #self.logger.info("getFileFromPartialBackup: %s", str(fileInfo))
  507         c = self.cursor.execute("SELECT " +
  508                                 _fileInfoFields + _fileInfoJoin +
  509                                 "WHERE Inode = :inode AND Device = :dev AND Mtime = :mtime AND C1.Size = :size AND "
  510                                 "Files.LastSet >= :backup "
  511                                 "ORDER BY Files.LastSet DESC LIMIT 1",
  512                                 temp)
  513         return c.fetchone()
  514 
  515     @authenticate
  516     def getFileInfoByInodeFromPartial(self, inode):
  517         (ino, dev) = inode
  518         c = self.cursor.execute("SELECT " +
  519                                 _fileInfoFields + _fileInfoJoin +
  520                                 "WHERE Inode = :inode AND Device = :device AND "
  521                                 "Files.LastSet >= :backup "
  522                                 "ORDER BY Files.LastSet DESC LIMIT 1",
  523                                 {"inode": ino, "device": dev, "backup": self.prevBackupSet })
  524 
  525         return c.fetchone()
  526 
  527     @authenticate
  528     def setChecksum(self, inode, device, checksum):
  529         self.cursor.execute("UPDATE Files SET ChecksumId = (SELECT ChecksumId FROM CheckSums WHERE CheckSum = :checksum) "
  530                             "WHERE Inode = :inode AND Device = :device AND "
  531                             ":backup BETWEEN FirstSet AND LastSet",
  532                             {"inode": inode, "device": device, "checksum": checksum, "backup": self.currBackupSet})
  533         return self.cursor.rowcount
  534 
  535     @authenticate
  536     def setXattrs(self, inode, device, checksum):
  537         self.cursor.execute("UPDATE Files SET XattrId = (SELECT ChecksumId FROM CheckSums WHERE CheckSum = :checksum) "
  538                             "WHERE Inode = :inode AND Device = :device AND "
  539                             ":backup BETWEEN FirstSet AND LastSet",
  540                             {"inode": inode, "device": device, "checksum": checksum, "backup": self.currBackupSet})
  541         #self.logger.info("Setting XAttr ID for %d to %s, %d rows changed", inode, checksum, self.cursor.rowcount)
  542         return self.cursor.rowcount
  543 
  544     @authenticate
  545     def setAcl(self, inode, device, checksum):
  546         self.cursor.execute("UPDATE Files SET AclId = (SELECT ChecksumId FROM CheckSums WHERE CheckSum = :checksum) "
  547                             "WHERE Inode = :inode AND Device = :device AND "
  548                             ":backup BETWEEN FirstSet AND LastSet",
  549                             {"inode": inode, "device": device, "checksum": checksum, "backup": self.currBackupSet})
  550         #self.logger.info("Setting ACL ID for %d to %s, %d rows changed", inode, checksum, self.cursor.rowcount)
  551         return self.cursor.rowcount
  552 
  553 
  554     @authenticate
  555     def getChecksumByInode(self, inode, device, current=True):
  556         backupset = self._bset(current)
  557         c = self.cursor.execute("SELECT "
  558                                 "CheckSums.Checksum AS checksum "
  559                                 "FROM Files JOIN CheckSums ON Files.ChecksumId = Checksums.ChecksumId "
  560                                 "WHERE Files.INode = :inode AND Device = :device AND "
  561                                 ":backup BETWEEN Files.FirstSet AND Files.LastSet",
  562                                 { "backup" : backupset, "inode" : inode, "device": device })
  563         row = c.fetchone()
  564         return row[0] if row else None
  565         #if row: return row[0] else: return None
  566 
  567     @authenticate
  568     def getChecksumByName(self, name, parent, current=False):
  569         backupset = self._bset(current)
  570         (inode, device) = parent
  571         self.logger.debug("Looking up checksum for file %s (%d %d) in %d", name, inode, device, backupset)
  572         c = self._execute("SELECT CheckSums.CheckSum AS checksum "
  573                           "FROM Files "
  574                           "JOIN Names ON Files.NameID = Names.NameId "
  575                           "JOIN CheckSums ON Files.ChecksumId = CheckSums.ChecksumId "
  576                           "WHERE Names.Name = :name AND Files.Parent = :parent AND ParentDev = :parentDev AND "
  577                           ":backup BETWEEN Files.FirstSet AND Files.LastSet",
  578                           { "name": name, "parent": inode, "parentDev": device, "backup": backupset })
  579         row = c.fetchone()
  580         return row[0] if row else None
  581         #if row: return row[0] else: return None
  582 
  583     @authenticate
  584     def getChecksumByPath(self, name, current=False, permchecker=None):
  585         backupset = self._bset(current)
  586         self.logger.debug("Looking up checksum for path %s %d", name, backupset)
  587         f = self.getFileInfoByPath(name, current, permchecker=permchecker)
  588         if f:
  589             return self.getChecksumByName(f["name"], (f["parent"], f["parentdev"]), current)
  590         else:
  591             return None
  592 
  593     @authenticate
  594     def getChecksumInfoByPath(self, name, current=False, permchecker=None):
  595         backupset = self._bset(current)
  596         cksum = self.getChecksumByPath(name, backupset, permchecker)
  597         if cksum:
  598             return self.getChecksumInfo(cksum)
  599         else:
  600             return None
  601 
  602     @authenticate
  603     def getChecksumInfoChainByPath(self, name, current=False, permchecker=None):
  604         backupset = self._bset(current)
  605         self.logger.debug("Getting Checksum Info for %s", name)
  606         cksum = self.getChecksumByPath(name, backupset, permchecker)
  607         self.logger.debug("Got checksum %s", name)
  608         if cksum:
  609             return self.getChecksumInfoChain(cksum)
  610         else:
  611             return None
  612 
  613     @authenticate
  614     def getFirstBackupSet(self, name, current=False):
  615         backupset = self._bset(current)
  616         self.logger.debug("getFirstBackupSet (%d) %s", backupset, name)
  617         f = self.getFileInfoByPath(name, backupset)
  618         if f:
  619             c = self.conn.execute("SELECT Name FROM Backups WHERE BackupSet >= :first ORDER BY BackupSet ASC LIMIT 1",
  620                                   {"first": f["firstset"]})
  621             row = c.fetchone()
  622             if row:
  623                 return row[0]
  624         # General purpose failure
  625         return None
  626 
  627     @authenticate
  628     def insertFile(self, fileInfo, parent):
  629         self.logger.debug("Inserting file: %s", fileInfo)
  630         (parIno, parDev) = parent
  631         fields = list({"backup": self.currBackupSet, "parent": parIno, "parentDev": parDev}.items())
  632         temp = _addFields(fields, fileInfo)
  633         self.setNameID([temp])
  634         self._execute("INSERT INTO Files "
  635                       "(NameId, FirstSet, LastSet, Inode, Device, Parent, ParentDev, Dir, Link, MTime, CTime, ATime,  Mode, UID, GID, NLinks) "
  636                       "VALUES  "
  637                       "(:nameid, :backup, :backup, :inode, :dev, :parent, :parentDev, :dir, :link, :mtime, :ctime, :atime, :mode, :uid, :gid, :nlinks)",
  638                       temp)
  639 
  640     @authenticate
  641     def updateDirChecksum(self, directory, cksid, current=True):
  642         bset = self._bset(current)
  643         (inode, device) = directory
  644         self._execute("UPDATE FILES "
  645                       "SET ChecksumID = :cksid "
  646                       "WHERE Inode = :inode AND DEVICE = :device AND :bset BETWEEN FirstSet AND LastSet",
  647                       {"inode": inode, "device": device, "cksid": cksid, "bset": bset})
  648 
  649     @authenticate
  650     def extendFile(self, parent, name, old=False, current=True):
  651         old = self._bset(old)
  652         current = self._bset(current)
  653         (parIno, parDev) = parent
  654         cursor = self._execute("UPDATE FILES "
  655                                "SET LastSet = :new "
  656                                "WHERE Parent = :parent AND ParentDev = :parentDev AND NameID = (SELECT NameID FROM Names WHERE Name = :name) AND "
  657                                ":old BETWEEN FirstSet AND LastSet",
  658                                { "parent": parIno, "parentDev": parDev , "name": name, "old": old, "new": current })
  659         return cursor.rowcount
  660 
  661     @authenticate
  662     def extendFileInode(self, parent, inode, old=False, current=True):
  663         old = self._bset(old)
  664         current = self._bset(current)
  665         (parIno, parDev) = parent
  666         (ino, dev) = inode
  667         #self.logger.debug("ExtendFileInode: %s %s %s %s", parent, inode, current, old)
  668         cursor = self._execute("UPDATE FILES "
  669                                "SET LastSet = :new "
  670                                "WHERE Parent = :parent AND ParentDev = :parentDev AND Inode = :inode AND Device = :device AND "
  671                                ":old BETWEEN FirstSet AND LastSet",
  672                                { "parent": parIno, "parentDev": parDev , "inode": ino, "device": dev, "old": old, "new": current })
  673         return cursor.rowcount
  674 
  675     @authenticate
  676     def cloneDir(self, parent, new=True, old=False):
  677         newBSet = self._bset(new)
  678         oldBSet = self._bset(old)
  679         (parIno, parDev) = parent
  680         self.logger.debug("Cloning directory inode %d, %d from %d to %d", parIno, parDev, oldBSet, newBSet)
  681         cursor = self._execute("UPDATE FILES "
  682                                "SET LastSet = :new "
  683                                "WHERE Parent = :parent AND ParentDev = :parentDev AND "
  684                                ":old BETWEEN FirstSet AND LastSet",
  685                                { "new": newBSet, "old": oldBSet, "parent": parIno, "parentDev": parDev })
  686         return cursor.rowcount
  687 
  688     @authenticate
  689     def setNameID(self, files):
  690         for f in files:
  691             c = self.cursor.execute("SELECT NameId FROM Names WHERE Name = :name", f)
  692             row = c.fetchone()
  693             if row:
  694                 f["nameid"] = row[0]
  695             else:
  696                 self.cursor.execute("INSERT INTO Names (Name) VALUES (:name)", f)
  697                 f["nameid"] = self.cursor.lastrowid
  698 
  699     @authenticate
  700     def insertChecksum(self, checksum, encrypted=False, size=0, basis=None, deltasize=None, compressed='None', disksize=None, current=True, isFile=True):
  701         self.logger.debug("Inserting checksum file: %s -- %d bytes, Compressed %s", checksum, size, str(compressed))
  702         added = self._bset(current)
  703         def _xstr(x):
  704             return x if x is not None else ''
  705 
  706         if self.journal:
  707             self.journal.write("{}:{}:{}:{}\n".format(checksum, _xstr(basis), int(encrypted), compressed))
  708 
  709         if basis is None:
  710             chainlength = 0
  711         else:
  712             chainlength = self.getChainLength(basis) + 1
  713 
  714         self.cursor.execute("INSERT INTO CheckSums (CheckSum,  Size,  Basis,  Encrypted,  DeltaSize,  Compressed,  DiskSize,  ChainLength,  Added,  IsFile) "
  715                             "VALUES                (:checksum, :size, :basis, :encrypted, :deltasize, :compressed, :disksize, :chainlength, :added, :isfile)",
  716                             {"checksum": checksum, "size": size, "basis": basis, "encrypted": encrypted, "deltasize": deltasize,
  717                              "compressed": str(compressed), "disksize": disksize, "chainlength": chainlength, "added": added, "isfile": int(isFile)})
  718         return self.cursor.lastrowid
  719 
  720     @authenticate
  721     def updateChecksumFile(self, checksum, encrypted=False, size=0, basis=None, deltasize=None, compressed=False, disksize=None, chainlength=0):
  722         self.logger.debug("Updating checksum file: %s -- %d bytes, Compressed %s", checksum, size, str(compressed))
  723 
  724         self.cursor.execute("UPDATE CheckSums SET "
  725                             "Size = :size, Encrypted = :encrypted, Basis = :basis, DeltaSize = :deltasize, ChainLength = :chainlength, "
  726                             "Compressed = :compressed, DiskSize = :disksize "
  727                             "WHERE Checksum = :checksum",
  728                             {"checksum": checksum, "size": size, "basis": basis, "encrypted": encrypted, "deltasize": deltasize,
  729                              "compressed": str(compressed), "chainlength": chainlength, "disksize": disksize})
  730 
  731     @authenticate
  732     def getChecksumInfo(self, checksum):
  733         self.logger.debug("Getting checksum info on: %s", checksum)
  734         c = self._execute("SELECT " +
  735                           _checksumInfoFields  +
  736                           "FROM Checksums WHERE CheckSum = :checksum",
  737                           {"checksum": checksum})
  738         row = c.fetchone()
  739         if row:
  740             return row
  741         else:
  742             self.logger.debug("No checksum found for %s", checksum)
  743             return None
  744 
  745     @authenticate
  746     def getChecksumInfoChain(self, checksum):
  747         """ Recover a list of all the checksums which need to be used to generate a file """
  748         self.logger.debug("Getting checksum info chain on: %s", checksum)
  749         chain = []
  750         while checksum:
  751             row = self.getChecksumInfo(checksum)
  752             if row:
  753                 chain.append(row)
  754             else:
  755                 return chain
  756             checksum = row['basis']
  757 
  758         return chain
  759 
  760     @authenticate
  761     def getNamesForChecksum(self, checksum):
  762         """ Recover a list of names that represent a checksum """
  763         self.logger.debug("Recovering name(s) for checksum %s", checksum)
  764         c = self._execute('SELECT DISTINCT Name FROM Names JOIN Files ON Names.NameID = Files.NameID JOIN Checksums ON Checksums.ChecksumID = Files.ChecksumID '
  765                           'WHERE Checksums.Checksum = :checksum',
  766                           {'checksum': checksum})
  767         names = []
  768         for row in c.fetchall():
  769             self.logger.debug("Found name %s", row[0])
  770             names.append(row[0])
  771         return names
  772 
  773     @authenticate
  774     def getChainLength(self, checksum):
  775         data = self.getChecksumInfo(checksum)
  776         if data:
  777             return data['chainlength']
  778         else:
  779             return -1
  780         """
  781         Could do this, but not all versions of SQLite3 seem to support "WITH RECURSIVE" statements
  782         c = self._execute("WITH RECURSIVE x(n) AS (VALUES(:checksum) UNION SELECT Basis FROM Checksums, x WHERE x.n=Checksums.Checksum) "
  783                          "SELECT COUNT(*) FROM Checksums WHERE Checksum IN x",
  784                          {"checksum": checksum})
  785         r = c.fetchone()
  786         if r:
  787             return int(r[0])
  788         else:
  789             return -1
  790         """
  791 
  792     @authenticate
  793     def readDirectory(self, dirNode, current=False):
  794         (inode, device) = dirNode
  795         backupset = self._bset(current)
  796         #self.logger.debug("Reading directory values for (%d, %d) %d", inode, device, backupset)
  797 
  798         c = self._execute("SELECT " + _fileInfoFields + ", C1.Basis AS basis, C1.Encrypted AS encrypted " +
  799                           _fileInfoJoin +
  800                           "WHERE Parent = :parent AND ParentDev = :parentDev AND "
  801                           ":backup BETWEEN Files.FirstSet AND Files.LastSet",
  802                           {"parent": inode, "parentDev": device, "backup": backupset})
  803         return _fetchEm(c)
  804         #while True:
  805         #    batch = c.fetchmany(self.chunksize)
  806         #    if not batch:
  807         #        break
  808         #    for row in batch:
  809         #        yield row
  810 
  811     @authenticate
  812     def getNumDeltaFilesInDirectory(self, dirNode, current=False):
  813         (inode, device) = dirNode
  814         backupset = self._bset(current)
  815         row = self._executeWithResult("SELECT COUNT(*) FROM Files " \
  816                                       "JOIN Names ON Files.NameId = Names.NameId " \
  817                                       "LEFT OUTER JOIN Checksums AS C1 ON Files.ChecksumId = C1.ChecksumId " \
  818                                       "WHERE Parent = :parent AND ParentDev = :parentDev AND "
  819                                       ":backup BETWEEN Files.FirstSet AND Files.LastSet AND "
  820                                       "C1.ChainLength != 0",
  821                                       {"parent": inode, "parentDev": device, "backup": backupset})
  822         if row:
  823             return row[0]
  824         else:
  825             return 0
  826 
  827     @authenticate
  828     def getDirectorySize(self, dirNode, current=False):
  829         (inode, device) = dirNode
  830         backupset = self._bset(current)
  831         row = self._executeWithResult("SELECT COUNT(*) FROM Files "
  832                                       "WHERE Parent = :parent AND ParentDev = :parentDev AND "
  833                                       ":backup BETWEEN Files.FirstSet AND Files.LastSet AND "
  834                                       "(Dir = 1 OR ChecksumId IS NOT NULL)",
  835                                       { "parent": inode, "parentDev": device, "backup": backupset })
  836         if row:
  837             return row[0]
  838         else:
  839             return 0
  840 
  841     @authenticate
  842     def readDirectoryForRange(self, dirNode, first, last):
  843         (inode, device) = dirNode
  844         #self.logger.debug("Reading directory values for (%d, %d) in range (%d, %d)", inode, device, first, last)
  845         c = self._execute("SELECT " + _fileInfoFields + ", "
  846                           "C1.Basis AS basis, C1.Encrypted AS encrypted " +
  847                           _fileInfoJoin +
  848                           "WHERE Parent = :parent AND ParentDev = :parentDev AND "
  849                           "Files.LastSet >= :first AND Files.FirstSet <= :last",
  850                           {"parent": inode, "parentDev": device, "first": first, "last": last})
  851         while True:
  852             batch = c.fetchmany(self.chunksize)
  853             if not batch:
  854                 break
  855             for row in batch:
  856                 yield row
  857 
  858     @authenticate
  859     def listBackupSets(self):
  860         #self.logger.debug("list backup sets")
  861         #                 "Name AS name, BackupSet AS backupset "
  862         c = self._execute("SELECT " +
  863                           _backupSetInfoFields +
  864                           _backupSetInfoJoin +
  865                           "ORDER BY backupset ASC", {})
  866         while True:
  867             batch = c.fetchmany(self.chunksize)
  868             if not batch:
  869                 break
  870             for row in batch:
  871                 yield row
  872 
  873     @authenticate
  874     def getBackupSetInfoById(self, bset):
  875         c = self._execute("SELECT " +
  876                           _backupSetInfoFields +
  877                           _backupSetInfoJoin +
  878                           "WHERE BackupSet = :bset",
  879                           { "bset": bset })
  880         row = c.fetchone()
  881         return row
  882 
  883     @authenticate
  884     def getBackupSetInfo(self, name):
  885         c = self._execute("SELECT " +
  886                           _backupSetInfoFields +
  887                           _backupSetInfoJoin +
  888                           "WHERE Name = :name",
  889                           { "name": name })
  890         row = c.fetchone()
  891         return row
  892 
  893     @authenticate
  894     def getBackupSetInfoForTime(self, time):
  895         c = self._execute("SELECT " +
  896                           _backupSetInfoFields +
  897                           _backupSetInfoJoin +
  898                           "WHERE BackupSet = (SELECT MAX(BackupSet) FROM Backups WHERE StartTime <= :time)",
  899                           { "time": time })
  900         row = c.fetchone()
  901         return row
  902 
  903     @authenticate
  904     def getBackupSetDetails(self, bset):
  905         row = self._executeWithResult("SELECT COUNT(*), SUM(Size) FROM Files JOIN Checksums ON Files.ChecksumID = Checksums.ChecksumID WHERE Dir = 0 AND :bset BETWEEN FirstSet AND LastSet", {'bset': bset})
  906         files = row[0]
  907         size = row[1] if row[1] else 0
  908 
  909         row = self._executeWithResult("SELECT COUNT(*) FROM Files WHERE Dir = 1 AND :bset BETWEEN FirstSet AND LastSet", {'bset': bset})
  910         dirs = row[0]
  911 
  912         # Figure out the first set after this one, and the last set before this one
  913         row = self._executeWithResult("SELECT MAX(BackupSet) FROM Backups WHERE BackupSet < :bset", {'bset': bset})
  914         prevSet = row[0] if row else 0
  915 
  916         row = self._executeWithResult("SELECT MIN(BackupSet) FROM Backups WHERE BackupSet > :bset", {'bset': bset})
  917         nextSet = row[0] if row[0] else sys.maxsize
  918 
  919         self.logger.debug("PrevSet: %s, NextSet: %s", prevSet, nextSet)
  920         # Count of files that first appeared in this version.  May be delta's
  921         row = self._executeWithResult("SELECT COUNT(*), SUM(Size), SUM(DiskSize) FROM Files JOIN Checksums ON Files.ChecksumID = Checksums.ChecksumID "
  922                                       "WHERE Dir = 0 AND FirstSet > :prevSet",
  923                                       {'prevSet': prevSet})
  924         newFiles = row[0] if row[0] else 0
  925         newSize  = row[1] if row[1] else 0
  926         newSpace = row[2] if row[2] else 0
  927 
  928         # Count of files that are last seen in this set, and are not part of somebody else's basis
  929         row = self._executeWithResult("SELECT COUNT(*), SUM(Size), SUM(DiskSize) FROM Files JOIN Checksums ON Files.ChecksumID = Checksums.ChecksumID "
  930                                       "WHERE Dir = 0 AND LastSet < :nextSet "
  931                                       "AND Checksum NOT IN (SELECT Basis FROM Checksums WHERE Basis IS NOT NULL)",
  932                                       {'nextSet': nextSet})
  933         endFiles = row[0] if row[0] else 0
  934         endSize  = row[1] if row[1] else 0
  935         endSpace = row[2] if row[2] else 0
  936 
  937         return (files, dirs, size, (newFiles, newSize, newSpace), (endFiles, endSize, endSpace))
  938 
  939     @authenticate
  940     def getNewFiles(self, bSet, other):
  941         if other:
  942             row = self._executeWithResult("SELECT max(BackupSet) FROM Backups WHERE BackupSet < :bset", {'bset': bSet})
  943             pSet = row[0]
  944         else:
  945             pSet = bSet
  946         self.logger.debug("Getting new files for changesets %s -> %s", pSet, bSet)
  947         cursor = self._execute("SELECT " + _fileInfoFields + _fileInfoJoin + 
  948                                "WHERE Files.FirstSet >= :pSet AND Files.LastSet >= :bSet",
  949                                {'bSet': bSet, 'pSet': pSet})
  950         return _fetchEm(cursor)
  951 
  952     @authenticate
  953     def getFileSizes(self, minsize):
  954         cursor = self._execute("SELECT DISTINCT(Size) FROM Checksums WHERE Size > :minsize", {"minsize": minsize })
  955         return _fetchEm(cursor)
  956 
  957     @authenticate
  958     def setStats(self, newFiles, deltaFiles, bytesReceived, current=True):
  959         bset = self._bset(current)
  960         self._execute("UPDATE Backups SET FilesFull = :full, FilesDelta = :delta, BytesReceived = :bytes WHERE BackupSet = :bset",
  961                       {"bset": bset, "full": newFiles, "delta": deltaFiles, "bytes": bytesReceived})
  962 
  963     @authenticate
  964     def getConfigValue(self, key, default=None):
  965         return self._getConfigValue(key, default)
  966 
  967     def _getConfigValue(self, key, default=None):
  968         self.logger.debug("Getting Config Value %s", key)
  969         c = self._execute("SELECT Value FROM Config WHERE Key = :key", {'key': key })
  970         row = c.fetchone()
  971         return row[0] if row else default
  972 
  973     @authenticate
  974     def setConfigValue(self, key, value):
  975         self._setConfigValue(key, value)
  976 
  977     def _setConfigValue(self, key, value):
  978         if value is None:
  979             self._execute("DELETE FROM Config WHERE Key LIKE :key", {'key': key})
  980         else:
  981             self._execute("INSERT OR REPLACE INTO Config (Key, Value) VALUES(:key, :value)", {'key': key, 'value': value})
  982 
  983     @authenticate
  984     def delConfigValue(self, key):
  985         self._execute("DELETE FROM Config WHERE Key = :key", {'key': key})
  986 
  987     @authenticate
  988     def setPriority(self, bSet, priority):
  989         backup = self._bset(bSet)
  990         self.logger.debug("Setting backupset priority to %d for backupset %s", priority, backup)
  991         self._execute("UPDATE Backups SET Priority = :priority WHERE BackupSet = :backup",
  992                       {'priority': priority, 'backup': backup})
  993 
  994 
  995     @authenticate
  996     def setSrpValues(self, salt, vkey):
  997         self.setConfigValue('SRPSalt', hexlify(salt))
  998         self.setConfigValue('SRPVkey', hexlify(vkey))
  999 
 1000     def getSrpValues(self):
 1001         self.logger.debug("Getting SRP Values")
 1002         salt = self._getConfigValue('SRPSalt')
 1003         vkey = self._getConfigValue('SRPVkey')
 1004         if salt:
 1005             salt = unhexlify(salt)
 1006         if vkey:
 1007             vkey = unhexlify(vkey)
 1008         return salt, vkey
 1009 
 1010     def getCryptoScheme(self):
 1011         self.logger.debug("Getting CryptoScheme")
 1012         return self._getConfigValue('CryptoScheme')
 1013 
 1014     @authenticate
 1015     def setKeys(self, salt, vkey, filenameKey, contentKey, backup=True):
 1016         import Tardis.Util as Util      # Import it here, as Util imports TardisDB
 1017         try:
 1018             os.rename
 1019             self.beginTransaction()
 1020             self.setSrpValues(salt, vkey)
 1021             if filenameKey:
 1022                 self.setConfigValue('FilenameKey', filenameKey)
 1023             else:
 1024                 self.delConfigValue('FilenameKey')
 1025             if contentKey:
 1026                 self.setConfigValue('ContentKey', contentKey)
 1027             else:
 1028                 self.delConfigValue('ContentKey')
 1029             if backup:
 1030                 # Attempt to save the keys away
 1031                 backupName = self.dbName + ".keys"
 1032                 r = Rotator.Rotator(rotations=0)
 1033                 r.backup(backupName)
 1034                 Util.saveKeys(backupName, self.clientId, filenameKey, contentKey, salt, vkey)
 1035             self.commit()
 1036             if backup:
 1037                 r.rotate(backupName)
 1038             return True
 1039         except Exception as e:
 1040             self.logger.error("Setkeys failed: %s", e)
 1041             self.logger.exception(e)
 1042             return False
 1043 
 1044     @authenticate
 1045     def getKeys(self):
 1046         return (self.getConfigValue('FilenameKey'), self.getConfigValue('ContentKey'))
 1047 
 1048     @authenticate
 1049     def beginTransaction(self):
 1050         self.cursor.execute("BEGIN")
 1051 
 1052     @authenticate
 1053     def completeBackup(self):
 1054         self._execute("UPDATE Backups SET Completed = 1 WHERE BackupSet = :backup", { "backup": self.currBackupSet })
 1055         self.commit()
 1056 
 1057     def _purgeFiles(self):
 1058         self.cursor.execute("DELETE FROM Files WHERE "
 1059                             "0 = (SELECT COUNT(*) FROM Backups WHERE Backups.BackupSet BETWEEN Files.FirstSet AND Files.LastSet)")
 1060         filesDeleted = self.cursor.rowcount
 1061         return filesDeleted
 1062 
 1063     @authenticate
 1064     def listPurgeSets(self, priority, timestamp, current=False):
 1065         backupset = self._bset(current)
 1066         # Select all sets that are purgeable.
 1067         c = self.cursor.execute("SELECT " +
 1068                                 _backupSetInfoFields + 
 1069                                 _backupSetInfoJoin +
 1070                                 " WHERE Priority <= :priority AND EndTime <= :timestamp AND BackupSet < :backupset",
 1071                                 {"priority": priority, "timestamp": str(timestamp), "backupset": backupset})
 1072         for row in c:
 1073             yield row
 1074 
 1075     @authenticate
 1076     def listPurgeIncomplete(self, priority, timestamp, current=False):
 1077         backupset = self._bset(current)
 1078         # Select all sets that are both purgeable and incomplete
 1079         # Note: For some reason that I don't understand, the timestamp must be cast into a string here, to work with the coalesce operator
 1080         # If it comes from the HTTPInterface as a string, the <= timestamp doesn't seem to work.
 1081         c = self.cursor.execute("SELECT " +
 1082                                 _backupSetInfoFields +
 1083                                 _backupSetInfoJoin +
 1084                                 "WHERE Priority <= :priority AND COALESCE(EndTime, StartTime) <= :timestamp AND BackupSet < :backupset AND Completed = 0",
 1085                                 {"priority": priority, "timestamp": str(timestamp), "backupset": backupset})
 1086         for row in c:
 1087             yield row
 1088 
 1089     @authenticate
 1090     def purgeSets(self, priority, timestamp, current=False):
 1091         """ Purge old files from the database.  Needs to be followed up with calls to remove the orphaned files """
 1092         backupset = self._bset(current)
 1093         self.logger.debug("Purging backupsets below priority %d, before %s, and backupset: %d", priority, timestamp, backupset)
 1094         # First, purge out the backupsets that don't match
 1095         self.cursor.execute("DELETE FROM Backups WHERE Priority <= :priority AND EndTime <= :timestamp AND BackupSet < :backupset",
 1096                             {"priority": priority, "timestamp": str(timestamp), "backupset": backupset})
 1097         setsDeleted = self.cursor.rowcount
 1098         # Then delete the files which are no longer referenced
 1099         filesDeleted = self._purgeFiles()
 1100 
 1101         return (filesDeleted, setsDeleted)
 1102 
 1103     @authenticate
 1104     def purgeIncomplete(self, priority, timestamp, current=False):
 1105         """ Purge old files from the database.  Needs to be followed up with calls to remove the orphaned files """
 1106         backupset = self._bset(current)
 1107         self.logger.debug("Purging incomplete backupsets below priority %d, before %s, and backupset: %d", priority, timestamp, backupset)
 1108         # First, purge out the backupsets that don't match
 1109         self.cursor.execute("DELETE FROM Backups WHERE Priority <= :priority AND COALESCE(EndTime, StartTime) <= :timestamp AND BackupSet < :backupset AND Completed = 0",
 1110                             {"priority": priority, "timestamp": str(timestamp), "backupset": backupset})
 1111         setsDeleted = self.cursor.rowcount
 1112 
 1113         # Then delete the files which are no longer referenced
 1114         filesDeleted = self._purgeFiles()
 1115 
 1116         return (filesDeleted, setsDeleted)
 1117 
 1118     @authenticate
 1119     def deleteBackupSet(self, current=False):
 1120         bset = self._bset(current)
 1121         self.cursor.execute("DELETE FROM Backups WHERE BackupSet = :backupset", {"backupset": bset})
 1122         # TODO: Move this to the removeOrphans phase
 1123         # Then delete the files which are no longer referenced
 1124         filesDeleted = self._purgeFiles()
 1125 
 1126         return filesDeleted
 1127 
 1128     @authenticate
 1129     def listOrphanChecksums(self, isFile):
 1130         c = self.conn.execute("SELECT Checksum FROM Checksums "
 1131                               "WHERE ChecksumID NOT IN (SELECT DISTINCT(ChecksumID) FROM Files WHERE ChecksumID IS NOT NULL) "
 1132                               "AND   ChecksumID NOT IN (SELECT DISTINCT(XattrId) FROM Files WHERE XattrID IS NOT NULL) "
 1133                               "AND   ChecksumID NOT IN (SELECT DISTINCT(AclId) FROM Files WHERE AclId IS NOT NULL) "
 1134                               "AND   ChecksumID NOT IN (SELECT DISTINCT(CmdLineID) FROM Backups WHERE CmdLineID IS NOT NULL) "
 1135                               "AND   Checksum   NOT IN (SELECT DISTINCT(Basis) FROM Checksums WHERE Basis IS NOT NULL) "
 1136                               "AND IsFile = :isfile",
 1137                               { 'isfile': int(isFile)} )
 1138         while True:
 1139             batch = c.fetchmany(self.chunksize)
 1140             if not batch:
 1141                 break
 1142             for row in batch:
 1143                 yield row[0]
 1144 
 1145     @authenticate
 1146     def deleteOrphanChecksums(self, isFile):
 1147         self.cursor.execute("DELETE FROM Checksums "
 1148                             "WHERE ChecksumID NOT IN (SELECT DISTINCT(ChecksumID) FROM Files WHERE ChecksumID IS NOT NULL) "
 1149                             "AND   ChecksumID NOT IN (SELECT DISTINCT(XattrId) FROM Files WHERE XattrID IS NOT NULL) "
 1150                             "AND   ChecksumID NOT IN (SELECT DISTINCT(AclId) FROM Files WHERE AclId IS NOT NULL) "
 1151                             "AND   ChecksumID NOT IN (SELECT DISTINCT(CmdLineID) FROM Backups WHERE CmdLineID IS NOT NULL) "
 1152                             "AND   Checksum   NOT IN (SELECT DISTINCT(Basis) FROM Checksums WHERE Basis IS NOT NULL) "
 1153                             "AND IsFile = :isfile",
 1154                             { 'isfile': int(isFile)} )
 1155         return self.cursor.rowcount
 1156 
 1157     @authenticate
 1158     def compact(self):
 1159         self.logger.debug("Removing unused names")
 1160         # Purge out any unused names
 1161         self.conn.execute("DELETE FROM Names WHERE NameID NOT IN (SELECT NameID FROM Files)")
 1162         vacuumed = False
 1163 
 1164         # Check if we've hit an interval where we want to do a vacuum
 1165         bset = self._bset(True)
 1166         interval = self.getConfigValue("VacuumInterval")
 1167         if interval and (bset % int(interval)) == 0:
 1168             self.logger.debug("Vaccuuming database")
 1169             # And clean up the database
 1170             self.conn.commit()  # Just in case there's a transaction outstanding, for no apparent reason
 1171             self.conn.execute("VACUUM")
 1172             vacuumed = True
 1173         self.conn.execute("UPDATE Backups SET Vacuumed = :vacuumed WHERE BackupSet = :backup", {"backup": self.currBackupSet, "vacuumed": vacuumed})
 1174 
 1175     @authenticate
 1176     def deleteChecksum(self, checksum):
 1177         self.logger.debug("Deleting checksum: %s", checksum)
 1178         self.cursor.execute("DELETE FROM Checksums WHERE Checksum = :checksum", {"checksum": checksum})
 1179         return self.cursor.rowcount
 1180 
 1181     @authenticate
 1182     def commit(self):
 1183         self.conn.commit()
 1184 
 1185     @authenticate
 1186     def setClientEndTime(self):
 1187         if self.currBackupSet:
 1188             self.conn.execute("UPDATE Backups SET ClientEndTime = :now WHERE BackupSet = :backup",
 1189                               { "now": time.time(), "backup": self.currBackupSet })
 1190 
 1191     @authenticate
 1192     def setFailure(self, ex):
 1193         if self.currBackupSet:
 1194             self.conn.execute("UPDATE Backups SET Exception = :ex, ErrorMsg = :msg WHERE BackupSet = :backup",
 1195                               { "ex": type(ex).__name__, "msg": str(ex), "backup": self.currBackupSet})
 1196 
 1197 
 1198     def close(self, completeBackup=False):
 1199         #self.logger.debug("Closing DB: %s", self.dbName)
 1200         # Apparently logger will get shut down if we're executing in __del__, so leave the debugging message out
 1201         if self.currBackupSet:
 1202             self.conn.execute("UPDATE Backups SET EndTime = :now WHERE BackupSet = :backup",
 1203                               { "now": time.time(), "backup": self.currBackupSet })
 1204         self.conn.commit()
 1205         self.conn.close()
 1206         self.conn = None
 1207 
 1208         if self.backup and completeBackup:
 1209             r = Rotator.Rotator(rotations=self.numbackups)
 1210             try:
 1211                 r.backup(self.dbName)
 1212                 r.rotate(self.dbName)
 1213             except Exception as e:
 1214                 self.logger.error("Error detected creating database backup: %s", e)
 1215 
 1216     def __del__(self):
 1217         if self.conn:
 1218             self.close()
 1219 
 1220     def _isAuthenticated(self):
 1221         if self.authenticated:
 1222             return True
 1223         elif self.srpSrv is not None:
 1224             return self.sprSrv.authenticated()
 1225         else:
 1226             return False
 1227 
 1228 if __name__ == "__main__":
 1229     db = TardisDB(sys.argv[1])
 1230     db.newBackupSet(sys.argv[2], str(uuid.uuid1()))
 1231     rec =  db.getFileInfoByName("File1", 1)
 1232     print(rec)
 1233     print(db.getFileInfoByInode(2))
 1234     info = {
 1235         "name"  : "Dir",
 1236         "inode" : 1,
 1237         "dir"   : 0,
 1238         "size"  : 1,
 1239         "mtime" : 1111,
 1240         "ctime" : 1111,
 1241         "atime" : 1111,
 1242         "mode"  : 666,
 1243         "uid"   : 99,
 1244         "gid"   : 100,
 1245         "cksum" : None
 1246         }
 1247     db.insertFile(info)
 1248     info = {
 1249         "name"  : "File1",
 1250         "inode" : 2,
 1251         "dir"   : 1,
 1252         "size"  : 1,
 1253         "mtime" : 2222,
 1254         "ctime" : 2222,
 1255         "atime" : 2222,
 1256         "mode"  : 444,
 1257         "uid"   : 99,
 1258         "gid"   : 100,
 1259         "cksum" : None
 1260         }
 1261     db.insertFile(info)
 1262     db.completeBackup()
 1263     db.commit()