"Fossies" - the Fresh Open Source Software Archive

Member "keystone-17.0.0/keystone/common/sql/upgrades.py" (13 May 2020, 12206 Bytes) of package /linux/misc/openstack/keystone-17.0.0.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "upgrades.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 16.0.1_vs_17.0.0.

    1 # Copyright 2013 OpenStack Foundation
    2 # Copyright 2013 Red Hat, Inc.
    3 # All Rights Reserved.
    4 #
    5 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    6 #    not use this file except in compliance with the License. You may obtain
    7 #    a copy of the License at
    8 #
    9 #         http://www.apache.org/licenses/LICENSE-2.0
   10 #
   11 #    Unless required by applicable law or agreed to in writing, software
   12 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   13 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   14 #    License for the specific language governing permissions and limitations
   15 #    under the License.
   16 
   17 import os
   18 
   19 import migrate
   20 from migrate import exceptions
   21 from migrate.versioning import api as versioning_api
   22 from oslo_db import exception as db_exception
   23 from oslo_db.sqlalchemy import migration
   24 import sqlalchemy
   25 
   26 from keystone.common import sql
   27 from keystone import exception
   28 from keystone.i18n import _
   29 
   30 
   31 USE_TRIGGERS = True
   32 
   33 LEGACY_REPO = 'migrate_repo'
   34 EXPAND_REPO = 'expand_repo'
   35 DATA_MIGRATION_REPO = 'data_migration_repo'
   36 CONTRACT_REPO = 'contract_repo'
   37 
   38 
   39 class Repository(object):
   40     def __init__(self, engine, repo_name):
   41         self.repo_name = repo_name
   42 
   43         self.repo_path = find_repo(self.repo_name)
   44         self.min_version = (
   45             get_init_version(abs_path=self.repo_path))
   46         self.schema_ = versioning_api.ControlledSchema.create(
   47             engine, self.repo_path, self.min_version)
   48         self.max_version = self.schema_.repository.version().version
   49 
   50     def upgrade(self, version=None, current_schema=None):
   51         version = version or self.max_version
   52         err = ''
   53         upgrade = True
   54         version = versioning_api._migrate_version(
   55             self.schema_, version, upgrade, err)
   56         validate_upgrade_order(self.repo_name, target_repo_version=version)
   57         if not current_schema:
   58             current_schema = self.schema_
   59         changeset = current_schema.changeset(version)
   60         for ver, change in changeset:
   61             self.schema_.runchange(ver, change, changeset.step)
   62 
   63         if self.schema_.version != version:
   64             raise Exception(
   65                 'Actual version (%s) of %s does not equal expected '
   66                 'version (%s)' % (
   67                     self.schema_.version, self.repo_name, version))
   68 
   69     @property
   70     def version(self):
   71         with sql.session_for_read() as session:
   72             return migration.db_version(
   73                 session.get_bind(), self.repo_path, self.min_version)
   74 
   75 
   76 #  Different RDBMSs use different schemes for naming the Foreign Key
   77 #  Constraints.  SQLAlchemy does not yet attempt to determine the name
   78 #  for the constraint, and instead attempts to deduce it from the column.
   79 #  This fails on MySQL.
   80 def get_constraints_names(table, column_name):
   81     fkeys = [fk.name for fk in table.constraints
   82              if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and
   83                  column_name in fk.columns)]
   84     return fkeys
   85 
   86 
   87 #  remove_constraints and add_constraints both accept a list of dictionaries
   88 #  that contain:
   89 #  {'table': a sqlalchemy table. The constraint is added to dropped from
   90 #           this table.
   91 #  'fk_column': the name of a column on the above table,  The constraint
   92 #               is added to or dropped from this column
   93 #  'ref_column':a sqlalchemy column object.  This is the reference column
   94 #               for the constraint.
   95 def remove_constraints(constraints):
   96     for constraint_def in constraints:
   97         constraint_names = get_constraints_names(constraint_def['table'],
   98                                                  constraint_def['fk_column'])
   99         for constraint_name in constraint_names:
  100             migrate.ForeignKeyConstraint(
  101                 columns=[getattr(constraint_def['table'].c,
  102                                  constraint_def['fk_column'])],
  103                 refcolumns=[constraint_def['ref_column']],
  104                 name=constraint_name).drop()
  105 
  106 
  107 def add_constraints(constraints):
  108     for constraint_def in constraints:
  109 
  110         if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM':
  111             # Don't try to create constraint when using MyISAM because it's
  112             # not supported.
  113             continue
  114 
  115         ref_col = constraint_def['ref_column']
  116         ref_engine = ref_col.table.kwargs.get('mysql_engine')
  117         if ref_engine == 'MyISAM':
  118             # Don't try to create constraint when using MyISAM because it's
  119             # not supported.
  120             continue
  121 
  122         migrate.ForeignKeyConstraint(
  123             columns=[getattr(constraint_def['table'].c,
  124                              constraint_def['fk_column'])],
  125             refcolumns=[constraint_def['ref_column']]).create()
  126 
  127 
  128 def find_repo(repo_name):
  129     """Return the absolute path to the named repository."""
  130     path = os.path.abspath(os.path.join(
  131         os.path.dirname(sql.__file__), repo_name))
  132 
  133     if not os.path.isdir(path):
  134         raise exception.MigrationNotProvided(sql.__name__, path)
  135 
  136     return path
  137 
  138 
  139 def _sync_common_repo(version):
  140     abs_path = find_repo(LEGACY_REPO)
  141     init_version = get_init_version()
  142     with sql.session_for_write() as session:
  143         engine = session.get_bind()
  144         _assert_not_schema_downgrade(version=version)
  145         migration.db_sync(engine, abs_path, version=version,
  146                           init_version=init_version, sanity_check=False)
  147 
  148 
  149 def _sync_repo(repo_name):
  150     abs_path = find_repo(repo_name)
  151     with sql.session_for_write() as session:
  152         engine = session.get_bind()
  153         # Register the repo with the version control API
  154         # If it already knows about the repo, it will throw
  155         # an exception that we can safely ignore
  156         try:
  157             migration.db_version_control(engine, abs_path)
  158         except (migration.exception.DBMigrationError,
  159                 exceptions.DatabaseAlreadyControlledError):  # nosec
  160             pass
  161         init_version = get_init_version(abs_path=abs_path)
  162         migration.db_sync(engine, abs_path,
  163                           init_version=init_version, sanity_check=False)
  164 
  165 
  166 def get_init_version(abs_path=None):
  167     """Get the initial version of a migrate repository.
  168 
  169     :param abs_path: Absolute path to migrate repository.
  170     :return:         initial version number or None, if DB is empty.
  171     """
  172     if abs_path is None:
  173         abs_path = find_repo(LEGACY_REPO)
  174 
  175     repo = migrate.versioning.repository.Repository(abs_path)
  176 
  177     # Sadly, Repository has a `latest` but not an `oldest`.
  178     # The value is a VerNum object which needs to be converted into an int.
  179     oldest = int(min(repo.versions.versions))
  180 
  181     if oldest < 1:
  182         return None
  183 
  184     # The initial version is one less
  185     return oldest - 1
  186 
  187 
  188 def _assert_not_schema_downgrade(version=None):
  189     if version is not None:
  190         try:
  191             current_ver = int(str(get_db_version()))
  192             if int(version) < current_ver:
  193                 raise migration.exception.DBMigrationError(
  194                     _("Unable to downgrade schema"))
  195         except exceptions.DatabaseNotControlledError:  # nosec
  196             # NOTE(morganfainberg): The database is not controlled, this action
  197             # cannot be a downgrade.
  198             pass
  199 
  200 
  201 def offline_sync_database_to_version(version=None):
  202     """Perform and off-line sync of the database.
  203 
  204     Migrate the database up to the latest version, doing the equivalent of
  205     the cycle of --expand, --migrate and --contract, for when an offline
  206     upgrade is being performed.
  207 
  208     If a version is specified then only migrate the database up to that
  209     version. Downgrading is not supported. If version is specified, then only
  210     the main database migration is carried out - and the expand, migration and
  211     contract phases will NOT be run.
  212 
  213     """
  214     global USE_TRIGGERS
  215 
  216     # This flags let's us bypass trigger setup & teardown for non-rolling
  217     # upgrades. We set this as a global variable immediately before handing off
  218     # to sqlalchemy-migrate, because we can't pass arguments directly to
  219     # migrations that depend on it. We could also register this as a CONF
  220     # option, but the idea here is that we aren't exposing a new API.
  221     USE_TRIGGERS = False
  222 
  223     if version:
  224         _sync_common_repo(version)
  225     else:
  226         expand_schema()
  227         migrate_data()
  228         contract_schema()
  229 
  230 
  231 def get_db_version(repo=LEGACY_REPO):
  232     with sql.session_for_read() as session:
  233         repo = find_repo(repo)
  234         return migration.db_version(
  235             session.get_bind(), repo, get_init_version(repo))
  236 
  237 
  238 def validate_upgrade_order(repo_name, target_repo_version=None):
  239     """Validate the state of the migration repositories.
  240 
  241     This is run before allowing the db_sync command to execute. Ensure the
  242     upgrade step and version specified by the operator remains consistent with
  243     the upgrade process. I.e. expand's version is greater or equal to
  244     migrate's, migrate's version is greater or equal to contract's.
  245 
  246     :param repo_name: The name of the repository that the user is trying to
  247                       upgrade.
  248     :param target_repo_version: The version to upgrade the repo. Otherwise, the
  249                                 version will be upgraded to the latest version
  250                                 available.
  251     """
  252     # Initialize a dict to have each key assigned a repo with their value being
  253     # the repo that comes before.
  254     db_sync_order = {DATA_MIGRATION_REPO: EXPAND_REPO,
  255                      CONTRACT_REPO: DATA_MIGRATION_REPO}
  256 
  257     if repo_name == LEGACY_REPO:
  258         return
  259     # If expand is being run, we validate that Legacy repo is at the maximum
  260     # version before running the additional schema expansions.
  261     elif repo_name == EXPAND_REPO:
  262         abs_path = find_repo(LEGACY_REPO)
  263         repo = migrate.versioning.repository.Repository(abs_path)
  264         if int(repo.latest) != get_db_version():
  265             raise db_exception.DBMigrationError(
  266                 'Your Legacy repo version is not up to date. Please refer to '
  267                 'https://docs.openstack.org/keystone/latest/admin/'
  268                 'identity-upgrading.html '
  269                 'to see the proper steps for rolling upgrades.')
  270         return
  271 
  272     # find the latest version that the current command will upgrade to if there
  273     # wasn't a version specified for upgrade.
  274     if not target_repo_version:
  275         abs_path = find_repo(repo_name)
  276         repo = migrate.versioning.repository.Repository(abs_path)
  277         target_repo_version = int(repo.latest)
  278 
  279     # get current version of the command that runs before the current command.
  280     dependency_repo_version = get_db_version(repo=db_sync_order[repo_name])
  281 
  282     if dependency_repo_version < target_repo_version:
  283         raise db_exception.DBMigrationError(
  284             'You are attempting to upgrade %s ahead of %s. Please refer to '
  285             'https://docs.openstack.org/keystone/latest/admin/'
  286             'identity-upgrading.html '
  287             'to see the proper steps for rolling upgrades.' % (
  288                 repo_name, db_sync_order[repo_name]))
  289 
  290 
  291 def expand_schema():
  292     """Expand the database schema ahead of data migration.
  293 
  294     This is run manually by the keystone-manage command before the first
  295     keystone node is migrated to the latest release.
  296 
  297     """
  298     # Make sure all the legacy migrations are run before we run any new
  299     # expand migrations.
  300     _sync_common_repo(version=None)
  301     validate_upgrade_order(EXPAND_REPO)
  302     _sync_repo(repo_name=EXPAND_REPO)
  303 
  304 
  305 def migrate_data():
  306     """Migrate data to match the new schema.
  307 
  308     This is run manually by the keystone-manage command once the keystone
  309     schema has been expanded for the new release.
  310 
  311     """
  312     validate_upgrade_order(DATA_MIGRATION_REPO)
  313     _sync_repo(repo_name=DATA_MIGRATION_REPO)
  314 
  315 
  316 def contract_schema():
  317     """Contract the database.
  318 
  319     This is run manually by the keystone-manage command once the keystone
  320     nodes have been upgraded to the latest release and will remove any old
  321     tables/columns that are no longer required.
  322 
  323     """
  324     validate_upgrade_order(CONTRACT_REPO)
  325     _sync_repo(repo_name=CONTRACT_REPO)