"Fossies" - the Fresh Open Source Software Archive

Member "roundup-2.0.0/roundup/backends/back_sqlite.py" (29 Feb 2020, 17919 Bytes) of package /linux/www/roundup-2.0.0.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. See also the latest Fossies "Diffs" side-by-side code changes report for "back_sqlite.py": 1.6.1_vs_2.0.0.

    1 """Implements a backend for SQLite.
    2 
    3 See https://pysqlite.sourceforge.net/ for pysqlite info
    4 
    5 
    6 NOTE: we use the rdbms_common table creation methods which define datatypes
    7 for the columns, but sqlite IGNORES these specifications.
    8 """
    9 __docformat__ = 'restructuredtext'
   10 
   11 import os, marshal, shutil, time, logging
   12 
   13 from roundup import hyperdb, date, password
   14 from roundup.backends import rdbms_common
   15 from roundup.backends.sessions_dbm import Sessions, OneTimeKeys
   16 from roundup.anypy.strings import uany2s
   17 
   18 sqlite_version = None
   19 try:
   20     import sqlite3 as sqlite
   21     sqlite_version = 3
   22 except ImportError:
   23     try:
   24         from pysqlite2 import dbapi2 as sqlite
   25         if sqlite.version_info < (2, 1, 0):
   26             raise ValueError('pysqlite2 minimum version is 2.1.0+ '
   27                              '- %s found' % sqlite.version)
   28         sqlite_version = 2
   29     except ImportError:
   30         import sqlite
   31         sqlite_version = 1
   32 
   33 
   34 def db_exists(config):
   35     return os.path.exists(os.path.join(config.DATABASE, 'db'))
   36 
   37 
   38 def db_nuke(config):
   39     shutil.rmtree(config.DATABASE)
   40 
   41 
   42 class Database(rdbms_common.Database):
   43     """Sqlite DB backend implementation
   44 
   45     attributes:
   46       dbtype:
   47         holds the value for the type of db. It is used by indexer to
   48         identify the database type so it can import the correct indexer
   49         module when using native text search mode.
   50     """
   51 
   52     # char to use for positional arguments
   53     if sqlite_version in (2, 3):
   54         arg = '?'
   55     else:
   56         arg = '%s'
   57 
   58     dbtype = "sqlite"
   59 
   60     # used by some code to switch styles of query
   61     implements_intersect = 1
   62 
   63     # used in generic backend to determine if db supports
   64     # 'DOUBLE PRECISION' for floating point numbers. Note that sqlite
   65     # already has double precision as its standard 'REAL' type. So this
   66     # is set to False here.
   67 
   68     implements_double_precision = False
   69 
   70     hyperdb_to_sql_datatypes = {
   71         hyperdb.String    : 'VARCHAR(255)',
   72         hyperdb.Date      : 'VARCHAR(30)',
   73         hyperdb.Link      : 'INTEGER',
   74         hyperdb.Interval  : 'VARCHAR(255)',
   75         hyperdb.Password  : 'VARCHAR(255)',
   76         hyperdb.Boolean   : 'BOOLEAN',
   77         hyperdb.Number    : 'REAL',
   78         hyperdb.Integer   : 'INTEGER',
   79     }
   80     hyperdb_to_sql_value = {
   81         hyperdb.String    : str,
   82         hyperdb.Date      : lambda x: x.serialise(),
   83         hyperdb.Link      : int,
   84         hyperdb.Interval  : str,
   85         hyperdb.Password  : str,
   86         hyperdb.Boolean   : int,
   87         hyperdb.Integer   : int,
   88         hyperdb.Number    : lambda x: x,
   89         hyperdb.Multilink : lambda x: x,    # used in journal marshalling
   90     }
   91     sql_to_hyperdb_value = {
   92         hyperdb.String    : uany2s,
   93         hyperdb.Date      : lambda x: date.Date(str(x)),
   94         hyperdb.Link      : str,  # XXX numeric ids
   95         hyperdb.Interval  : date.Interval,
   96         hyperdb.Password  : lambda x: password.Password(encrypted=x),
   97         hyperdb.Boolean   : int,
   98         hyperdb.Integer   : int,
   99         hyperdb.Number    : rdbms_common._num_cvt,
  100         hyperdb.Multilink : lambda x: x,    # used in journal marshalling
  101     }
  102 
  103     # We're using DBM for managing session info and one-time keys:
  104     # For SQL database storage of this info we would need two concurrent
  105     # connections to the same database which SQLite doesn't support
  106     def getSessionManager(self):
  107         if not self.Session:
  108             self.Session = Sessions(self)
  109         return self.Session
  110 
  111     def getOTKManager(self):
  112         if not self.Otk:
  113             self.Otk = OneTimeKeys(self)
  114         return self.Otk
  115 
  116     def sqlite_busy_handler(self, data, table, count):
  117         """invoked whenever SQLite tries to access a database that is locked"""
  118         now = time.time()
  119         if count == 1:
  120             # Timeout for handling locked database (default 30s)
  121             self._busy_handler_endtime = now + self.config.RDBMS_SQLITE_TIMEOUT
  122         elif now > self._busy_handler_endtime:
  123             # timeout expired - no more retries
  124             return 0
  125         # sleep adaptively as retry count grows,
  126         # starting from about half a second
  127         time_to_sleep = 0.01 * (2 << min(5, count))
  128         time.sleep(time_to_sleep)
  129         return 1
  130 
  131     def sql_open_connection(self):
  132         """Open a standard, non-autocommitting connection.
  133 
  134         pysqlite will automatically BEGIN TRANSACTION for us.
  135         """
  136         # make sure the database directory exists
  137         # database itself will be created by sqlite if needed
  138         if not os.path.isdir(self.config.DATABASE):
  139             os.makedirs(self.config.DATABASE)
  140 
  141         db = os.path.join(self.config.DATABASE, 'db')
  142         logging.getLogger('roundup.hyperdb').info('open database %r' % db)
  143         # set timeout (30 second default is extraordinarily generous)
  144         # for handling locked database
  145         if sqlite_version == 1:
  146             conn = sqlite.connect(db=db)
  147             conn.db.sqlite_busy_handler(self.sqlite_busy_handler)
  148         else:
  149             conn = sqlite.connect(db, timeout=self.config.RDBMS_SQLITE_TIMEOUT)
  150             conn.row_factory = sqlite.Row
  151 
  152         # pysqlite2 / sqlite3 want us to store Unicode in the db but
  153         # that's not what's been done historically and it's definitely
  154         # not what the other backends do, so we'll stick with UTF-8
  155         if sqlite_version in (2, 3):
  156             conn.text_factory = str
  157 
  158         cursor = conn.cursor()
  159         return (conn, cursor)
  160 
  161     def open_connection(self):
  162         # ensure files are group readable and writable
  163         os.umask(self.config.UMASK)
  164 
  165         (self.conn, self.cursor) = self.sql_open_connection()
  166 
  167         try:
  168             self.load_dbschema()
  169         except sqlite.DatabaseError as error:
  170             if str(error) != 'no such table: schema':
  171                 raise
  172             self.init_dbschema()
  173             self.sql('create table schema (schema varchar)')
  174             self.sql('create table ids (name varchar, num integer)')
  175             self.sql('create index ids_name_idx on ids(name)')
  176             self.create_version_2_tables()
  177 
  178     def create_version_2_tables(self):
  179         self.sql('create table otks (otk_key varchar, '
  180                  'otk_value varchar, otk_time integer)')
  181         self.sql('create index otks_key_idx on otks(otk_key)')
  182         self.sql('create table sessions (session_key varchar, '
  183                  'session_time integer, session_value varchar)')
  184         self.sql('create index sessions_key_idx on '
  185                  'sessions(session_key)')
  186 
  187         # full-text indexing store
  188         self.sql('CREATE TABLE __textids (_class varchar, '
  189                  '_itemid varchar, _prop varchar, _textid'
  190                  ' integer primary key) ')
  191         self.sql('CREATE TABLE __words (_word varchar, '
  192                  '_textid integer)')
  193         self.sql('CREATE INDEX words_word_ids ON __words(_word)')
  194         self.sql('CREATE INDEX words_by_id ON __words (_textid)')
  195         self.sql('CREATE UNIQUE INDEX __textids_by_props ON '
  196                  '__textids (_class, _itemid, _prop)')
  197         sql = 'insert into ids (name, num) values (%s,%s)' % (
  198             self.arg, self.arg)
  199         self.sql(sql, ('__textids', 1))
  200 
  201     def add_new_columns_v2(self):
  202         # update existing tables to have the new actor column
  203         tables = self.database_schema['tables']
  204         for classname, spec in self.classes.items():
  205             if classname in tables:
  206                 dbspec = tables[classname]
  207                 self.update_class(spec, dbspec, force=1, adding_v2=1)
  208                 # we've updated - don't try again
  209                 tables[classname] = spec.schema()
  210 
  211     def fix_version_3_tables(self):
  212         # NOOP - no restriction on column length here
  213         pass
  214 
  215     def update_class(self, spec, old_spec, force=0, adding_v2=0):
  216         """ Determine the differences between the current spec and the
  217             database version of the spec, and update where necessary.
  218 
  219             If 'force' is true, update the database anyway.
  220 
  221             SQLite doesn't have ALTER TABLE, so we have to copy and
  222             regenerate the tables with the new schema.
  223         """
  224         new_spec = spec.schema()
  225         new_spec[1].sort()
  226         old_spec[1].sort()
  227         if not force and new_spec == old_spec:
  228             # no changes
  229             return 0
  230 
  231         logging.getLogger('roundup.hyperdb').info(
  232             'update_class %s' % spec.classname)
  233 
  234         # detect multilinks that have been removed, and drop their table
  235         old_has = {}
  236         for name, prop in old_spec[1]:
  237             old_has[name] = 1
  238             if name in spec.properties or not \
  239                isinstance(prop, hyperdb.Multilink):
  240                 continue
  241             # it's a multilink, and it's been removed - drop the old
  242             # table. First drop indexes.
  243             self.drop_multilink_table_indexes(spec.classname, name)
  244             sql = 'drop table %s_%s' % (spec.classname, prop)
  245             self.sql(sql)
  246 
  247         # now figure how we populate the new table
  248         if adding_v2:
  249             fetch = ['_activity', '_creation', '_creator']
  250         else:
  251             fetch = ['_actor', '_activity', '_creation', '_creator']
  252         properties = spec.getprops()
  253         for propname, _x in new_spec[1]:
  254             prop = properties[propname]
  255             if isinstance(prop, hyperdb.Multilink):
  256                 if propname not in old_has:
  257                     # we need to create the new table
  258                     self.create_multilink_table(spec, propname)
  259                 elif force:
  260                     tn = '%s_%s' % (spec.classname, propname)
  261                     # grabe the current values
  262                     sql = 'select linkid, nodeid from %s' % tn
  263                     self.sql(sql)
  264                     rows = self.cursor.fetchall()
  265 
  266                     # drop the old table
  267                     self.drop_multilink_table_indexes(spec.classname, propname)
  268                     sql = 'drop table %s' % tn
  269                     self.sql(sql)
  270 
  271                     # re-create and populate the new table
  272                     self.create_multilink_table(spec, propname)
  273                     sql = """insert into %s (linkid, nodeid) values
  274                         (%s, %s)""" % (tn, self.arg, self.arg)
  275                     for linkid, nodeid in rows:
  276                         self.sql(sql, (int(linkid), int(nodeid)))
  277             elif propname in old_has:
  278                 # we copy this col over from the old table
  279                 fetch.append('_'+propname)
  280 
  281         # select the data out of the old table
  282         fetch.append('id')
  283         fetch.append('__retired__')
  284         fetchcols = ','.join(fetch)
  285         cn = spec.classname
  286         sql = 'select %s from _%s' % (fetchcols, cn)
  287         self.sql(sql)
  288         olddata = self.cursor.fetchall()
  289 
  290         # TODO: update all the other index dropping code
  291         self.drop_class_table_indexes(cn, old_spec[0])
  292 
  293         # drop the old table
  294         self.sql('drop table _%s' % cn)
  295 
  296         # create the new table
  297         self.create_class_table(spec)
  298 
  299         if olddata:
  300             inscols = ['id', '_actor', '_activity', '_creation',
  301                        '_creator', '__retired__']
  302             for propname, _x in new_spec[1]:
  303                 prop = properties[propname]
  304                 if isinstance(prop, hyperdb.Multilink):
  305                     continue
  306                 elif isinstance(prop, hyperdb.Interval):
  307                     inscols.append('_'+propname)
  308                     inscols.append('__'+propname+'_int__')
  309                 elif propname in old_has:
  310                     # we copy this col over from the old table
  311                     inscols.append('_'+propname)
  312 
  313             # do the insert of the old data - the new columns will have
  314             # NULL values
  315             args = ','.join([self.arg for x in inscols])
  316             cols = ','.join(inscols)
  317             sql = 'insert into _%s (%s) values (%s)' % (cn, cols, args)
  318             for entry in olddata:
  319                 d = []
  320                 retired_id = None
  321                 for name in inscols:
  322                     # generate the new value for the Interval int column
  323                     if name.endswith('_int__'):
  324                         name = name[2:-6]
  325                         if sqlite_version in (2, 3):
  326                             try:
  327                                 v = hyperdb.Interval(entry[name]).as_seconds()
  328                             except IndexError:
  329                                 v = None
  330                         elif name in entry:
  331                             v = hyperdb.Interval(entry[name]).as_seconds()
  332                         else:
  333                             v = None
  334                     elif sqlite_version in (2, 3):
  335                         try:
  336                             v = entry[name]
  337                         except IndexError:
  338                             v = None
  339                     elif (sqlite_version == 1 and name in entry):
  340                         v = entry[name]
  341                     else:
  342                         v = None
  343                     if name == 'id':
  344                         retired_id = v
  345                     elif name == '__retired__' and retired_id and \
  346                          v not in ['0', 0]:
  347                         v = retired_id
  348                     d.append(v)
  349                 self.sql(sql, tuple(d))
  350 
  351         return 1
  352 
  353     def sql_close(self):
  354         """ Squash any error caused by us already having closed the
  355             connection.
  356         """
  357         try:
  358             self.conn.close()
  359         except sqlite.ProgrammingError as value:
  360             if str(value) != 'close failed - Connection is closed.':
  361                 raise
  362 
  363     def sql_rollback(self):
  364         """ Squash any error caused by us having closed the connection (and
  365             therefore not having anything to roll back)
  366         """
  367         try:
  368             self.conn.rollback()
  369         except sqlite.ProgrammingError as value:
  370             if str(value) != 'rollback failed - Connection is closed.':
  371                 raise
  372 
  373     def __repr__(self):
  374         return '<roundlite 0x%x>' % id(self)
  375 
  376     def sql_commit(self):
  377         """ Actually commit to the database.
  378 
  379             Ignore errors if there's nothing to commit.
  380         """
  381         try:
  382             self.conn.commit()
  383         except sqlite.DatabaseError as error:
  384             if str(error) != 'cannot commit - no transaction is active':
  385                 raise
  386         # open a new cursor for subsequent work
  387         self.cursor = self.conn.cursor()
  388 
  389     def sql_index_exists(self, table_name, index_name):
  390         self.sql('pragma index_list(%s)' % table_name)
  391         for entry in self.cursor.fetchall():
  392             if entry[1] == index_name:
  393                 return 1
  394         return 0
  395 
  396     # old-skool id generation
  397     def newid(self, classname):
  398         """ Generate a new id for the given class
  399         """
  400 
  401         # Prevent other processes from reading while we increment.
  402         # Otherwise multiple processes can end up with the same
  403         # new id and hilarity results.
  404         #
  405         # Defeat pysqlite's attempts to do locking by setting
  406         # isolation_level to None. Pysqlite can commit
  407         # on it's own even if we don't want it to end the transaction.
  408         # If we rewrite to use another sqlite library like apsw we
  409         # don't have to deal with this autocommit/autotransact foolishness.
  410         self.conn.isolation_level = None
  411         # Manage the transaction locks manually.
  412         self.sql("BEGIN IMMEDIATE")
  413 
  414         # get the next ID
  415         sql = 'select num from ids where name=%s' % self.arg
  416         self.sql(sql, (classname, ))
  417         newid = int(self.cursor.fetchone()[0])
  418 
  419         # leave the next larger number as the next newid
  420         sql = 'update ids set num=num+1 where name=%s' % self.arg
  421         vals = (classname,)
  422         self.sql(sql, vals)
  423 
  424         # reset pysqlite's auto transact stuff to default since the
  425         # rest of the code expects it.
  426         self.conn.isolation_level = ''
  427         # commit writing the data, clearing locks for other processes
  428         # and create a new cursor to the database.
  429         self.sql_commit()
  430 
  431         # return as string
  432         return str(newid)
  433 
  434     def setid(self, classname, setid):
  435         """ Set the id counter: used during import of database
  436 
  437         We add one to make it behave like the sequences in postgres.
  438         """
  439         sql = 'update ids set num=%s where name=%s' % (self.arg, self.arg)
  440         vals = (int(setid)+1, classname)
  441         self.sql(sql, vals)
  442 
  443     def clear(self):
  444         rdbms_common.Database.clear(self)
  445         # set the id counters to 0 (setid adds one) so we start at 1
  446         for cn in self.classes.keys():
  447             self.setid(cn, 0)
  448 
  449     def create_class(self, spec):
  450         rdbms_common.Database.create_class(self, spec)
  451         sql = 'insert into ids (name, num) values (%s, %s)' %(
  452             self.arg, self.arg)
  453         vals = (spec.classname, 1)
  454         self.sql(sql, vals)
  455 
  456     if sqlite_version in (2, 3):
  457         def load_journal(self, classname, cols, nodeid):
  458             """We need to turn the sqlite3.Row into a tuple so it can be
  459             unpacked"""
  460             l = rdbms_common.Database.load_journal(self,
  461                                                    classname, cols, nodeid)
  462             cols = range(5)
  463             return [[row[col] for col in cols] for row in l]
  464 
  465 
  466 class sqliteClass:
  467     def filter(self, *args, **kw):
  468         """ If there's NO matches to a fetch, sqlite returns NULL
  469             instead of nothing
  470         """
  471         return [f for f in rdbms_common.Class.filter(self, *args, **kw) if f]
  472 
  473 
  474 class Class(sqliteClass, rdbms_common.Class):
  475     pass
  476 
  477 
  478 class IssueClass(sqliteClass, rdbms_common.IssueClass):
  479     pass
  480 
  481 
  482 class FileClass(sqliteClass, rdbms_common.FileClass):
  483     pass
  484 
  485 # vim: set et sts=4 sw=4 :