"Fossies" - the Fresh Open Source Software Archive

Member "keystone-18.0.0/keystone/cmd/cli.py" (14 Oct 2020, 56306 Bytes) of package /linux/misc/openstack/keystone-18.0.0.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "cli.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 17.0.0_vs_18.0.0.

    1 # Copyright 2012 OpenStack Foundation
    2 #
    3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
    4 # not use this file except in compliance with the License. You may obtain
    5 # a copy of the License at
    6 #
    7 #      http://www.apache.org/licenses/LICENSE-2.0
    8 #
    9 # Unless required by applicable law or agreed to in writing, software
   10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   12 # License for the specific language governing permissions and limitations
   13 # under the License.
   14 
   15 import argparse
   16 import datetime
   17 import os
   18 import sys
   19 import uuid
   20 
   21 import migrate
   22 from oslo_config import cfg
   23 from oslo_db.sqlalchemy import migration
   24 from oslo_log import log
   25 from oslo_serialization import jsonutils
   26 import pbr.version
   27 
   28 from keystone.cmd import bootstrap
   29 from keystone.cmd import doctor
   30 from keystone.common import driver_hints
   31 from keystone.common import fernet_utils
   32 from keystone.common import jwt_utils
   33 from keystone.common import sql
   34 from keystone.common.sql import upgrades
   35 from keystone.common import utils
   36 import keystone.conf
   37 from keystone.credential.providers import fernet as credential_fernet
   38 from keystone import exception
   39 from keystone.federation import idp
   40 from keystone.federation import utils as mapping_engine
   41 from keystone.i18n import _
   42 from keystone.server import backends
   43 
   44 CONF = keystone.conf.CONF
   45 LOG = log.getLogger(__name__)
   46 
   47 
   48 class BaseApp(object):
   49 
   50     name = None
   51 
   52     @classmethod
   53     def add_argument_parser(cls, subparsers):
   54         parser = subparsers.add_parser(cls.name, help=cls.__doc__)
   55         parser.set_defaults(cmd_class=cls)
   56         return parser
   57 
   58 
   59 class BootStrap(BaseApp):
   60     """Perform the basic bootstrap process."""
   61 
   62     name = "bootstrap"
   63 
   64     def __init__(self):
   65         self.bootstrapper = bootstrap.Bootstrapper()
   66 
   67     @classmethod
   68     def add_argument_parser(cls, subparsers):
   69         parser = super(BootStrap, cls).add_argument_parser(subparsers)
   70         parser.add_argument('--bootstrap-username', default='admin',
   71                             metavar='OS_BOOTSTRAP_USERNAME',
   72                             help=('The username of the initial keystone '
   73                                   'user during bootstrap process.'))
   74         # NOTE(morganfainberg): See below for ENV Variable that can be used
   75         # in lieu of the command-line arguments.
   76         parser.add_argument('--bootstrap-password', default=None,
   77                             metavar='OS_BOOTSTRAP_PASSWORD',
   78                             help='The bootstrap user password')
   79         parser.add_argument('--bootstrap-project-name', default='admin',
   80                             metavar='OS_BOOTSTRAP_PROJECT_NAME',
   81                             help=('The initial project created during the '
   82                                   'keystone bootstrap process.'))
   83         parser.add_argument('--bootstrap-role-name', default='admin',
   84                             metavar='OS_BOOTSTRAP_ROLE_NAME',
   85                             help=('The initial role-name created during the '
   86                                   'keystone bootstrap process.'))
   87         parser.add_argument('--bootstrap-service-name', default='keystone',
   88                             metavar='OS_BOOTSTRAP_SERVICE_NAME',
   89                             help=('The initial name for the initial identity '
   90                                   'service created during the keystone '
   91                                   'bootstrap process.'))
   92         parser.add_argument('--bootstrap-admin-url',
   93                             metavar='OS_BOOTSTRAP_ADMIN_URL',
   94                             help=('The initial identity admin url created '
   95                                   'during the keystone bootstrap process. '
   96                                   'e.g. http://127.0.0.1:5000/v3'))
   97         parser.add_argument('--bootstrap-public-url',
   98                             metavar='OS_BOOTSTRAP_PUBLIC_URL',
   99                             help=('The initial identity public url created '
  100                                   'during the keystone bootstrap process. '
  101                                   'e.g. http://127.0.0.1:5000/v3'))
  102         parser.add_argument('--bootstrap-internal-url',
  103                             metavar='OS_BOOTSTRAP_INTERNAL_URL',
  104                             help=('The initial identity internal url created '
  105                                   'during the keystone bootstrap process. '
  106                                   'e.g. http://127.0.0.1:5000/v3'))
  107         parser.add_argument('--bootstrap-region-id',
  108                             metavar='OS_BOOTSTRAP_REGION_ID',
  109                             help=('The initial region_id endpoints will be '
  110                                   'placed in during the keystone bootstrap '
  111                                   'process.'))
  112         parser.add_argument('--immutable-roles',
  113                             default=True,
  114                             action='store_true',
  115                             help=('Whether default roles (admin, member, and '
  116                                   'reader) should be immutable. This is the '
  117                                   'default.'))
  118         parser.add_argument('--no-immutable-roles',
  119                             default=False,
  120                             action='store_true',
  121                             help=('Whether default roles (admin, member, and '
  122                                   'reader) should be immutable. Immutable '
  123                                   'default roles is the default, use this '
  124                                   'flag to opt out of immutable default '
  125                                   'roles.'))
  126         return parser
  127 
  128     def do_bootstrap(self):
  129         """Perform the bootstrap actions.
  130 
  131         Create bootstrap user, project, and role so that CMS, humans, or
  132         scripts can continue to perform initial setup (domains, projects,
  133         services, endpoints, etc) of Keystone when standing up a new
  134         deployment.
  135         """
  136         self.username = (
  137             os.environ.get('OS_BOOTSTRAP_USERNAME') or
  138             CONF.command.bootstrap_username)
  139         self.project_name = (
  140             os.environ.get('OS_BOOTSTRAP_PROJECT_NAME') or
  141             CONF.command.bootstrap_project_name)
  142         self.role_name = (
  143             os.environ.get('OS_BOOTSTRAP_ROLE_NAME') or
  144             CONF.command.bootstrap_role_name)
  145         self.password = (
  146             os.environ.get('OS_BOOTSTRAP_PASSWORD') or
  147             CONF.command.bootstrap_password)
  148         self.service_name = (
  149             os.environ.get('OS_BOOTSTRAP_SERVICE_NAME') or
  150             CONF.command.bootstrap_service_name)
  151         self.admin_url = (
  152             os.environ.get('OS_BOOTSTRAP_ADMIN_URL') or
  153             CONF.command.bootstrap_admin_url)
  154         self.public_url = (
  155             os.environ.get('OS_BOOTSTRAP_PUBLIC_URL') or
  156             CONF.command.bootstrap_public_url)
  157         self.internal_url = (
  158             os.environ.get('OS_BOOTSTRAP_INTERNAL_URL') or
  159             CONF.command.bootstrap_internal_url)
  160         self.region_id = (
  161             os.environ.get('OS_BOOTSTRAP_REGION_ID') or
  162             CONF.command.bootstrap_region_id)
  163         self.service_id = None
  164         self.endpoints = None
  165 
  166         if self.password is None:
  167             print(_('ERROR: Either --bootstrap-password argument or '
  168                     'OS_BOOTSTRAP_PASSWORD must be set.'))
  169             sys.exit(1)
  170 
  171         self.bootstrapper.admin_password = self.password
  172         self.bootstrapper.admin_username = self.username
  173         self.bootstrapper.project_name = self.project_name
  174         self.bootstrapper.admin_role_name = self.role_name
  175         self.bootstrapper.service_name = self.service_name
  176         self.bootstrapper.service_id = self.service_id
  177         self.bootstrapper.admin_url = self.admin_url
  178         self.bootstrapper.public_url = self.public_url
  179         self.bootstrapper.internal_url = self.internal_url
  180         self.bootstrapper.region_id = self.region_id
  181         if CONF.command.no_immutable_roles:
  182             self.bootstrapper.immutable_roles = False
  183         else:
  184             self.bootstrapper.immutable_roles = True
  185 
  186         self.bootstrapper.bootstrap()
  187         self.reader_role_id = self.bootstrapper.reader_role_id
  188         self.member_role_id = self.bootstrapper.member_role_id
  189         self.role_id = self.bootstrapper.admin_role_id
  190         self.project_id = self.bootstrapper.project_id
  191 
  192     @classmethod
  193     def main(cls):
  194         klass = cls()
  195         klass.do_bootstrap()
  196 
  197 
  198 class Doctor(BaseApp):
  199     """Diagnose common problems with keystone deployments."""
  200 
  201     name = 'doctor'
  202 
  203     @classmethod
  204     def add_argument_parser(cls, subparsers):
  205         parser = super(Doctor, cls).add_argument_parser(subparsers)
  206         return parser
  207 
  208     @staticmethod
  209     def main():
  210         # Return a non-zero exit code if we detect any symptoms.
  211         raise SystemExit(doctor.diagnose())
  212 
  213 
  214 def assert_not_extension(extension):
  215     if extension:
  216         print(_("All extensions have been moved into keystone core and as "
  217                 "such its migrations are maintained by the main keystone "
  218                 "database control. Use the command: keystone-manage "
  219                 "db_sync"))
  220         raise RuntimeError
  221 
  222 
  223 class DbSync(BaseApp):
  224     """Sync the database."""
  225 
  226     name = 'db_sync'
  227 
  228     @classmethod
  229     def add_argument_parser(cls, subparsers):
  230         parser = super(DbSync, cls).add_argument_parser(subparsers)
  231         parser.add_argument('version', default=None, nargs='?',
  232                             help=('Migrate the database up to a specified '
  233                                   'version. If not provided, db_sync will '
  234                                   'migrate the database to the latest known '
  235                                   'version. Schema downgrades are not '
  236                                   'supported.'))
  237         parser.add_argument('--extension', default=None,
  238                             help=('This is a deprecated option to migrate a '
  239                                   'specified extension. Since extensions are '
  240                                   'now part of the main repository, '
  241                                   'specifying db_sync without this option '
  242                                   'will cause all extensions to be migrated.'))
  243         group = parser.add_mutually_exclusive_group()
  244         group.add_argument('--expand', default=False, action='store_true',
  245                            help=('Expand the database schema in preparation '
  246                                  'for data migration.'))
  247         group.add_argument('--migrate', default=False,
  248                            action='store_true',
  249                            help=('Copy all data that needs to be migrated '
  250                                  'within the database ahead of starting the '
  251                                  'first keystone node upgraded to the new '
  252                                  'release. This command should be run '
  253                                  'after the --expand command. Once the '
  254                                  '--migrate command has completed, you can '
  255                                  'upgrade all your keystone nodes to the new '
  256                                  'release and restart them.'))
  257 
  258         group.add_argument('--contract', default=False, action='store_true',
  259                            help=('Remove any database tables and columns '
  260                                  'that are no longer required. This command '
  261                                  'should be run after all keystone nodes are '
  262                                  'running the new release.'))
  263 
  264         group.add_argument('--check', default=False, action='store_true',
  265                            help=('Check for outstanding database actions that '
  266                                  'still need to be executed. This command can '
  267                                  'be used to verify the condition of the '
  268                                  'current database state.'))
  269         return parser
  270 
  271     @classmethod
  272     def check_db_sync_status(cls):
  273         status = 0
  274         try:
  275             expand_version = upgrades.get_db_version(repo='expand_repo')
  276         except migration.exception.DBMigrationError:
  277             LOG.info('Your database is not currently under version '
  278                      'control or the database is already controlled. Your '
  279                      'first step is to run `keystone-manage db_sync '
  280                      '--expand`.')
  281             return 2
  282         try:
  283             migrate_version = upgrades.get_db_version(
  284                 repo='data_migration_repo')
  285         except migration.exception.DBMigrationError:
  286             migrate_version = 0
  287         try:
  288             contract_version = upgrades.get_db_version(repo='contract_repo')
  289         except migration.exception.DBMigrationError:
  290             contract_version = 0
  291 
  292         repo = migrate.versioning.repository.Repository(
  293             upgrades.find_repo('expand_repo'))
  294         migration_script_version = int(max(repo.versions.versions))
  295 
  296         if (contract_version > migrate_version or migrate_version >
  297                 expand_version):
  298             LOG.info('Your database is out of sync. For more information '
  299                      'refer to https://docs.openstack.org/keystone/'
  300                      'latest/admin/identity-upgrading.html')
  301             status = 1
  302         elif migration_script_version > expand_version:
  303             LOG.info('Your database is not up to date. Your first step is '
  304                      'to run `keystone-manage db_sync --expand`.')
  305             status = 2
  306         elif expand_version > migrate_version:
  307             LOG.info('Expand version is ahead of migrate. Your next step '
  308                      'is to run `keystone-manage db_sync --migrate`.')
  309             status = 3
  310         elif migrate_version > contract_version:
  311             LOG.info('Migrate version is ahead of contract. Your next '
  312                      'step is to run `keystone-manage db_sync --contract`.')
  313             status = 4
  314         elif (migration_script_version == expand_version == migrate_version ==
  315                 contract_version):
  316             LOG.info('All db_sync commands are upgraded to the same '
  317                      'version and up-to-date.')
  318         LOG.info('The latest installed migration script version is: '
  319                  '%(script)d.\nCurrent repository versions:\nExpand: '
  320                  '%(expand)d \nMigrate: %(migrate)d\nContract: '
  321                  '%(contract)d', {'script': migration_script_version,
  322                                   'expand': expand_version,
  323                                   'migrate': migrate_version,
  324                                   'contract': contract_version})
  325         return status
  326 
  327     @staticmethod
  328     def main():
  329         assert_not_extension(CONF.command.extension)
  330         # It is possible to run expand and migrate at the same time,
  331         # expand needs to run first however.
  332         if CONF.command.check:
  333             sys.exit(DbSync.check_db_sync_status())
  334         elif CONF.command.expand and CONF.command.migrate:
  335             upgrades.expand_schema()
  336             upgrades.migrate_data()
  337         elif CONF.command.expand:
  338             upgrades.expand_schema()
  339         elif CONF.command.migrate:
  340             upgrades.migrate_data()
  341         elif CONF.command.contract:
  342             upgrades.contract_schema()
  343         else:
  344             upgrades.offline_sync_database_to_version(
  345                 CONF.command.version)
  346 
  347 
  348 class DbVersion(BaseApp):
  349     """Print the current migration version of the database."""
  350 
  351     name = 'db_version'
  352 
  353     @classmethod
  354     def add_argument_parser(cls, subparsers):
  355         parser = super(DbVersion, cls).add_argument_parser(subparsers)
  356         parser.add_argument('--extension', default=None,
  357                             help=('This is a deprecated option to print the '
  358                                   'version of a specified extension. Since '
  359                                   'extensions are now part of the main '
  360                                   'repository, the version of an extension is '
  361                                   'implicit in the version of the main '
  362                                   'repository.'))
  363 
  364     @staticmethod
  365     def main():
  366         assert_not_extension(CONF.command.extension)
  367         print(upgrades.get_db_version())
  368 
  369 
  370 class BasePermissionsSetup(BaseApp):
  371     """Common user/group setup for file permissions."""
  372 
  373     @classmethod
  374     def add_argument_parser(cls, subparsers):
  375         parser = super(BasePermissionsSetup,
  376                        cls).add_argument_parser(subparsers)
  377         running_as_root = (os.geteuid() == 0)
  378         parser.add_argument('--keystone-user', required=running_as_root)
  379         parser.add_argument('--keystone-group', required=running_as_root)
  380         return parser
  381 
  382     @staticmethod
  383     def get_user_group():
  384         keystone_user_id = None
  385         keystone_group_id = None
  386 
  387         try:
  388             a = CONF.command.keystone_user
  389             if a:
  390                 keystone_user_id = utils.get_unix_user(a)[0]
  391         except KeyError:
  392             raise ValueError("Unknown user '%s' in --keystone-user" % a)
  393 
  394         try:
  395             a = CONF.command.keystone_group
  396             if a:
  397                 keystone_group_id = utils.get_unix_group(a)[0]
  398         except KeyError:
  399             raise ValueError("Unknown group '%s' in --keystone-group" % a)
  400 
  401         return keystone_user_id, keystone_group_id
  402 
  403     @classmethod
  404     def initialize_fernet_repository(
  405             cls, keystone_user_id, keystone_group_id, config_group=None):
  406         conf_group = getattr(CONF, config_group)
  407         futils = fernet_utils.FernetUtils(
  408             conf_group.key_repository,
  409             conf_group.max_active_keys,
  410             config_group
  411         )
  412 
  413         futils.create_key_directory(keystone_user_id, keystone_group_id)
  414         if futils.validate_key_repository(requires_write=True):
  415             futils.initialize_key_repository(
  416                 keystone_user_id, keystone_group_id)
  417 
  418     @classmethod
  419     def rotate_fernet_repository(
  420             cls, keystone_user_id, keystone_group_id, config_group=None):
  421         conf_group = getattr(CONF, config_group)
  422         futils = fernet_utils.FernetUtils(
  423             conf_group.key_repository,
  424             conf_group.max_active_keys,
  425             config_group
  426         )
  427         if futils.validate_key_repository(requires_write=True):
  428             futils.rotate_keys(keystone_user_id, keystone_group_id)
  429 
  430 
  431 class FernetSetup(BasePermissionsSetup):
  432     """Setup key repositories for Fernet tokens and auth receipts.
  433 
  434     This also creates a primary key used for both creating and validating
  435     Fernet tokens and auth receipts. To improve security, you should rotate
  436     your keys (using keystone-manage fernet_rotate, for example).
  437 
  438     """
  439 
  440     name = 'fernet_setup'
  441 
  442     @classmethod
  443     def main(cls):
  444         keystone_user_id, keystone_group_id = cls.get_user_group()
  445         cls.initialize_fernet_repository(
  446             keystone_user_id, keystone_group_id, 'fernet_tokens')
  447 
  448         if (os.path.abspath(CONF.fernet_tokens.key_repository) !=
  449                 os.path.abspath(CONF.fernet_receipts.key_repository)):
  450             cls.initialize_fernet_repository(
  451                 keystone_user_id, keystone_group_id, 'fernet_receipts')
  452         elif(CONF.fernet_tokens.max_active_keys !=
  453                 CONF.fernet_receipts.max_active_keys):
  454             # WARNING(adriant): If the directories are the same,
  455             # 'max_active_keys' is ignored from fernet_receipts in favor of
  456             # fernet_tokens to avoid a potential mismatch. Only if the
  457             # directories are different do we create a different one for
  458             # receipts, and then respect 'max_active_keys' for receipts.
  459             LOG.warning(
  460                 "Receipt and Token fernet key directories are the same "
  461                 "but `max_active_keys` is different. Receipt "
  462                 "`max_active_keys` will be ignored in favor of Token "
  463                 "`max_active_keys`."
  464             )
  465 
  466 
  467 class FernetRotate(BasePermissionsSetup):
  468     """Rotate Fernet encryption keys.
  469 
  470     This assumes you have already run keystone-manage fernet_setup.
  471 
  472     A new primary key is placed into rotation, which is used for new tokens.
  473     The old primary key is demoted to secondary, which can then still be used
  474     for validating tokens. Excess secondary keys (beyond [fernet_tokens]
  475     max_active_keys) are revoked. Revoked keys are permanently deleted. A new
  476     staged key will be created and used to validate tokens. The next time key
  477     rotation takes place, the staged key will be put into rotation as the
  478     primary key.
  479 
  480     Rotating keys too frequently, or with [fernet_tokens] max_active_keys set
  481     too low, will cause tokens to become invalid prior to their expiration.
  482 
  483     """
  484 
  485     name = 'fernet_rotate'
  486 
  487     @classmethod
  488     def main(cls):
  489         keystone_user_id, keystone_group_id = cls.get_user_group()
  490         cls.rotate_fernet_repository(
  491             keystone_user_id, keystone_group_id, 'fernet_tokens')
  492         if (os.path.abspath(CONF.fernet_tokens.key_repository) !=
  493                 os.path.abspath(CONF.fernet_receipts.key_repository)):
  494             cls.rotate_fernet_repository(
  495                 keystone_user_id, keystone_group_id, 'fernet_receipts')
  496 
  497 
  498 class CreateJWSKeyPair(BasePermissionsSetup):
  499     """Create a key pair for signing and validating JWS tokens.
  500 
  501     This command creates a public and private key pair to use for signing and
  502     validating JWS token signatures. The key pair is written to the directory
  503     where the command is invoked.
  504 
  505     """
  506 
  507     name = 'create_jws_keypair'
  508 
  509     @classmethod
  510     def add_argument_parser(cls, subparsers):
  511         parser = super(CreateJWSKeyPair, cls).add_argument_parser(subparsers)
  512 
  513         parser.add_argument(
  514             '--force', action='store_true',
  515             help=('Forcibly overwrite keys if they already exist')
  516         )
  517         return parser
  518 
  519     @classmethod
  520     def main(cls):
  521         current_directory = os.getcwd()
  522         private_key_path = os.path.join(current_directory, 'private.pem')
  523         public_key_path = os.path.join(current_directory, 'public.pem')
  524 
  525         if os.path.isfile(private_key_path) and not CONF.command.force:
  526             raise SystemExit(_('Private key %(path)s already exists')
  527                              % {'path': private_key_path})
  528         if os.path.isfile(public_key_path) and not CONF.command.force:
  529             raise SystemExit(_('Public key %(path)s already exists')
  530                              % {'path': public_key_path})
  531 
  532         jwt_utils.create_jws_keypair(private_key_path, public_key_path)
  533 
  534 
  535 class TokenSetup(BasePermissionsSetup):
  536     """Setup a key repository for tokens.
  537 
  538     This also creates a primary key used for both creating and validating
  539     tokens. To improve security, you should rotate your keys (using
  540     keystone-manage token_rotate, for example).
  541 
  542     """
  543 
  544     name = 'token_setup'
  545 
  546     @classmethod
  547     def main(cls):
  548         keystone_user_id, keystone_group_id = cls.get_user_group()
  549         cls.initialize_fernet_repository(
  550             keystone_user_id, keystone_group_id, 'fernet_tokens')
  551 
  552 
  553 class TokenRotate(BasePermissionsSetup):
  554     """Rotate token encryption keys.
  555 
  556     This assumes you have already run keystone-manage token_setup.
  557 
  558     A new primary key is placed into rotation, which is used for new tokens.
  559     The old primary key is demoted to secondary, which can then still be used
  560     for validating tokens. Excess secondary keys (beyond [token]
  561     max_active_keys) are revoked. Revoked keys are permanently deleted. A new
  562     staged key will be created and used to validate tokens. The next time key
  563     rotation takes place, the staged key will be put into rotation as the
  564     primary key.
  565 
  566     Rotating keys too frequently, or with [token] max_active_keys set
  567     too low, will cause tokens to become invalid prior to their expiration.
  568 
  569     """
  570 
  571     name = 'token_rotate'
  572 
  573     @classmethod
  574     def main(cls):
  575         keystone_user_id, keystone_group_id = cls.get_user_group()
  576         cls.rotate_fernet_repository(
  577             keystone_user_id, keystone_group_id, 'fernet_tokens')
  578 
  579 
  580 class ReceiptSetup(BasePermissionsSetup):
  581     """Setup a key repository for auth receipts.
  582 
  583     This also creates a primary key used for both creating and validating
  584     receipts. To improve security, you should rotate your keys (using
  585     keystone-manage receipt_rotate, for example).
  586 
  587     """
  588 
  589     name = 'receipt_setup'
  590 
  591     @classmethod
  592     def main(cls):
  593         keystone_user_id, keystone_group_id = cls.get_user_group()
  594         cls.initialize_fernet_repository(
  595             keystone_user_id, keystone_group_id, 'fernet_receipts')
  596 
  597 
  598 class ReceiptRotate(BasePermissionsSetup):
  599     """Rotate auth receipts encryption keys.
  600 
  601     This assumes you have already run keystone-manage receipt_setup.
  602 
  603     A new primary key is placed into rotation, which is used for new receipts.
  604     The old primary key is demoted to secondary, which can then still be used
  605     for validating receipts. Excess secondary keys (beyond [receipt]
  606     max_active_keys) are revoked. Revoked keys are permanently deleted. A new
  607     staged key will be created and used to validate receipts. The next time key
  608     rotation takes place, the staged key will be put into rotation as the
  609     primary key.
  610 
  611     Rotating keys too frequently, or with [receipt] max_active_keys set
  612     too low, will cause receipts to become invalid prior to their expiration.
  613 
  614     """
  615 
  616     name = 'receipt_rotate'
  617 
  618     @classmethod
  619     def main(cls):
  620         keystone_user_id, keystone_group_id = cls.get_user_group()
  621         cls.rotate_fernet_repository(
  622             keystone_user_id, keystone_group_id, 'fernet_receipts')
  623 
  624 
  625 class CredentialSetup(BasePermissionsSetup):
  626     """Setup a Fernet key repository for credential encryption.
  627 
  628     The purpose of this command is very similar to `keystone-manage
  629     fernet_setup` only the keys included in this repository are for encrypting
  630     and decrypting credential secrets instead of token payloads. Keys can be
  631     rotated using `keystone-manage credential_rotate`.
  632     """
  633 
  634     name = 'credential_setup'
  635 
  636     @classmethod
  637     def main(cls):
  638         futils = fernet_utils.FernetUtils(
  639             CONF.credential.key_repository,
  640             credential_fernet.MAX_ACTIVE_KEYS,
  641             'credential'
  642         )
  643 
  644         keystone_user_id, keystone_group_id = cls.get_user_group()
  645         futils.create_key_directory(keystone_user_id, keystone_group_id)
  646         if futils.validate_key_repository(requires_write=True):
  647             futils.initialize_key_repository(
  648                 keystone_user_id,
  649                 keystone_group_id
  650             )
  651 
  652 
  653 class CredentialRotate(BasePermissionsSetup):
  654     """Rotate Fernet encryption keys for credential encryption.
  655 
  656     This assumes you have already run `keystone-manage credential_setup`.
  657 
  658     A new primary key is placed into rotation only if all credentials are
  659     encrypted with the current primary key. If any credentials are encrypted
  660     with a secondary key the rotation will abort. This protects against
  661     removing a key that is still required to decrypt credentials. Once a key is
  662     removed from the repository, it is impossible to recover the original data
  663     without restoring from a backup external to keystone (more on backups
  664     below). To make sure all credentials are encrypted with the latest primary
  665     key, please see the `keystone-manage credential_migrate` command. Since the
  666     maximum number of keys in the credential repository is 3, once all
  667     credentials are encrypted with the latest primary key we can safely
  668     introduce a new primary key. All credentials will still be decryptable
  669     since they are all encrypted with the only secondary key in the repository.
  670 
  671     It is imperitive to understand the importance of backing up keys used to
  672     encrypt credentials. In the event keys are overrotated, applying a key
  673     repository from backup can help recover otherwise useless credentials.
  674     Persisting snapshots of the key repository in secure and encrypted source
  675     control, or a dedicated key management system are good examples of
  676     encryption key backups.
  677 
  678     The `keystone-manage credential_rotate` and `keystone-manage
  679     credential_migrate` commands are intended to be done in sequence. After
  680     performing a rotation, a migration must be done before performing another
  681     rotation. This ensures we don't over-rotate encryption keys.
  682 
  683     """
  684 
  685     name = 'credential_rotate'
  686 
  687     def __init__(self):
  688         drivers = backends.load_backends()
  689         self.credential_provider_api = drivers['credential_provider_api']
  690         self.credential_api = drivers['credential_api']
  691 
  692     def validate_primary_key(self):
  693         crypto, keys = credential_fernet.get_multi_fernet_keys()
  694         primary_key_hash = credential_fernet.primary_key_hash(keys)
  695 
  696         credentials = self.credential_api.driver.list_credentials(
  697             driver_hints.Hints()
  698         )
  699         for credential in credentials:
  700             if credential['key_hash'] != primary_key_hash:
  701                 msg = _('Unable to rotate credential keys because not all '
  702                         'credentials are encrypted with the primary key. '
  703                         'Please make sure all credentials have been encrypted '
  704                         'with the primary key using `keystone-manage '
  705                         'credential_migrate`.')
  706                 raise SystemExit(msg)
  707 
  708     @classmethod
  709     def main(cls):
  710         futils = fernet_utils.FernetUtils(
  711             CONF.credential.key_repository,
  712             credential_fernet.MAX_ACTIVE_KEYS,
  713             'credential'
  714         )
  715 
  716         keystone_user_id, keystone_group_id = cls.get_user_group()
  717         if futils.validate_key_repository(requires_write=True):
  718             klass = cls()
  719             klass.validate_primary_key()
  720             futils.rotate_keys(keystone_user_id, keystone_group_id)
  721 
  722 
  723 class CredentialMigrate(BasePermissionsSetup):
  724     """Provides the ability to encrypt credentials using a new primary key.
  725 
  726     This assumes that there is already a credential key repository in place and
  727     that the database backend has been upgraded to at least the Newton schema.
  728     If the credential repository doesn't exist yet, you can use
  729     ``keystone-manage credential_setup`` to create one.
  730 
  731     """
  732 
  733     name = 'credential_migrate'
  734 
  735     def __init__(self):
  736         drivers = backends.load_backends()
  737         self.credential_provider_api = drivers['credential_provider_api']
  738         self.credential_api = drivers['credential_api']
  739 
  740     def migrate_credentials(self):
  741         crypto, keys = credential_fernet.get_multi_fernet_keys()
  742         primary_key_hash = credential_fernet.primary_key_hash(keys)
  743 
  744         # FIXME(lbragstad): We *should* be able to use Hints() to ask only for
  745         # credentials that have a key_hash equal to a secondary key hash or
  746         # None, but Hints() doesn't seem to honor None values. See
  747         # https://bugs.launchpad.net/keystone/+bug/1614154.  As a workaround -
  748         # we have to ask for *all* credentials and filter them ourselves.
  749         credentials = self.credential_api.driver.list_credentials(
  750             driver_hints.Hints()
  751         )
  752         for credential in credentials:
  753             if credential['key_hash'] != primary_key_hash:
  754                 # If the key_hash isn't None but doesn't match the
  755                 # primary_key_hash, then we know the credential was encrypted
  756                 # with a secondary key. Let's decrypt it, and send it through
  757                 # the update path to re-encrypt it with the new primary key.
  758                 decrypted_blob = self.credential_provider_api.decrypt(
  759                     credential['encrypted_blob']
  760                 )
  761                 cred = {'blob': decrypted_blob}
  762                 self.credential_api.update_credential(
  763                     credential['id'],
  764                     cred
  765                 )
  766 
  767     @classmethod
  768     def main(cls):
  769         # Check to make sure we have a repository that works...
  770         futils = fernet_utils.FernetUtils(
  771             CONF.credential.key_repository,
  772             credential_fernet.MAX_ACTIVE_KEYS,
  773             'credential'
  774         )
  775         futils.validate_key_repository(requires_write=True)
  776         klass = cls()
  777         klass.migrate_credentials()
  778 
  779 
  780 class TrustFlush(BaseApp):
  781     """Flush expired and non-expired soft deleted trusts from the backend."""
  782 
  783     name = 'trust_flush'
  784 
  785     @classmethod
  786     def add_argument_parser(cls, subparsers):
  787         parser = super(TrustFlush, cls).add_argument_parser(subparsers)
  788 
  789         parser.add_argument('--project-id', default=None,
  790                             help=('The id of the project of which the '
  791                                   'expired or non-expired soft-deleted '
  792                                   'trusts is to be purged'))
  793         parser.add_argument('--trustor-user-id', default=None,
  794                             help=('The id of the trustor of which the '
  795                                   'expired or non-expired soft-deleted '
  796                                   'trusts is to be purged'))
  797         parser.add_argument('--trustee-user-id', default=None,
  798                             help=('The id of the trustee of which the '
  799                                   'expired or non-expired soft-deleted '
  800                                   'trusts is to be purged'))
  801         parser.add_argument('--date', default=datetime.datetime.utcnow(),
  802                             help=('The date of which the expired or '
  803                                   'non-expired soft-deleted trusts older '
  804                                   'than that will be purged. The format of '
  805                                   'the date to be "DD-MM-YYYY". If no date '
  806                                   'is supplied keystone-manage will use the '
  807                                   'system clock time at runtime'))
  808         return parser
  809 
  810     @classmethod
  811     def main(cls):
  812         drivers = backends.load_backends()
  813         trust_manager = drivers['trust_api']
  814         if CONF.command.date:
  815             if not isinstance(CONF.command.date, datetime.datetime):
  816                 try:
  817                     CONF.command.date = datetime.datetime.strptime(
  818                         CONF.command.date, '%d-%m-%Y')
  819                 except KeyError:
  820                     raise ValueError("'%s'Invalid input for date, should be "
  821                                      "DD-MM-YYYY", CONF.command.date)
  822             else:
  823                 LOG.info("No date is supplied, keystone-manage will use the "
  824                          "system clock time at runtime ")
  825 
  826         trust_manager.flush_expired_and_soft_deleted_trusts(
  827             project_id=CONF.command.project_id,
  828             trustor_user_id=CONF.command.trustor_user_id,
  829             trustee_user_id=CONF.command.trustee_user_id,
  830             date=CONF.command.date
  831         )
  832 
  833 
  834 class MappingPurge(BaseApp):
  835     """Purge the mapping table."""
  836 
  837     name = 'mapping_purge'
  838 
  839     @classmethod
  840     def add_argument_parser(cls, subparsers):
  841         parser = super(MappingPurge, cls).add_argument_parser(subparsers)
  842         parser.add_argument('--all', default=False, action='store_true',
  843                             help=('Purge all mappings.'))
  844         parser.add_argument('--domain-name', default=None,
  845                             help=('Purge any mappings for the domain '
  846                                   'specified.'))
  847         parser.add_argument('--public-id', default=None,
  848                             help=('Purge the mapping for the Public ID '
  849                                   'specified.'))
  850         parser.add_argument('--local-id', default=None,
  851                             help=('Purge the mappings for the Local ID '
  852                                   'specified.'))
  853         parser.add_argument('--type', default=None, choices=['user', 'group'],
  854                             help=('Purge any mappings for the type '
  855                                   'specified.'))
  856         return parser
  857 
  858     @staticmethod
  859     def main():
  860         def validate_options():
  861             # NOTE(henry-nash): It would be nice to use the argparse automated
  862             # checking for this validation, but the only way I can see doing
  863             # that is to make the default (i.e. if no optional parameters
  864             # are specified) to purge all mappings - and that sounds too
  865             # dangerous as a default.  So we use it in a slightly
  866             # unconventional way, where all parameters are optional, but you
  867             # must specify at least one.
  868             if (CONF.command.all is False and
  869                 CONF.command.domain_name is None and
  870                 CONF.command.public_id is None and
  871                 CONF.command.local_id is None and
  872                     CONF.command.type is None):
  873                 raise ValueError(_('At least one option must be provided'))
  874 
  875             if (CONF.command.all is True and
  876                 (CONF.command.domain_name is not None or
  877                  CONF.command.public_id is not None or
  878                  CONF.command.local_id is not None or
  879                  CONF.command.type is not None)):
  880                 raise ValueError(_('--all option cannot be mixed with '
  881                                    'other options'))
  882 
  883         def get_domain_id(name):
  884             try:
  885                 return resource_manager.get_domain_by_name(name)['id']
  886             except KeyError:
  887                 raise ValueError(_("Unknown domain '%(name)s' specified by "
  888                                    "--domain-name") % {'name': name})
  889 
  890         validate_options()
  891         drivers = backends.load_backends()
  892         resource_manager = drivers['resource_api']
  893         mapping_manager = drivers['id_mapping_api']
  894 
  895         # Now that we have validated the options, we know that at least one
  896         # option has been specified, and if it was the --all option then this
  897         # was the only option specified.
  898         #
  899         # The mapping dict is used to filter which mappings are purged, so
  900         # leaving it empty means purge them all
  901         mapping = {}
  902         if CONF.command.domain_name is not None:
  903             mapping['domain_id'] = get_domain_id(CONF.command.domain_name)
  904         if CONF.command.public_id is not None:
  905             mapping['public_id'] = CONF.command.public_id
  906         if CONF.command.local_id is not None:
  907             mapping['local_id'] = CONF.command.local_id
  908         if CONF.command.type is not None:
  909             mapping['entity_type'] = CONF.command.type
  910 
  911         mapping_manager.purge_mappings(mapping)
  912 
  913 
  914 DOMAIN_CONF_FHEAD = 'keystone.'
  915 DOMAIN_CONF_FTAIL = '.conf'
  916 
  917 
  918 def _domain_config_finder(conf_dir):
  919     """Return a generator of all domain config files found in a directory.
  920 
  921     Domain configs match the filename pattern of
  922     'keystone.<domain_name>.conf'.
  923 
  924     :returns: generator yielding (filename, domain_name) tuples
  925     """
  926     LOG.info('Scanning %r for domain config files', conf_dir)
  927     for r, d, f in os.walk(conf_dir):
  928         for fname in f:
  929             if (fname.startswith(DOMAIN_CONF_FHEAD) and
  930                     fname.endswith(DOMAIN_CONF_FTAIL)):
  931                 if fname.count('.') >= 2:
  932                     domain_name = fname[len(DOMAIN_CONF_FHEAD):
  933                                         -len(DOMAIN_CONF_FTAIL)]
  934                     yield (os.path.join(r, fname), domain_name)
  935                     continue
  936 
  937             LOG.warning('Ignoring file (%s) while scanning '
  938                         'domain config directory', fname)
  939 
  940 
  941 class DomainConfigUploadFiles(object):
  942 
  943     def __init__(self, domain_config_finder=_domain_config_finder):
  944         super(DomainConfigUploadFiles, self).__init__()
  945         self.load_backends()
  946         self._domain_config_finder = domain_config_finder
  947 
  948     def load_backends(self):
  949         drivers = backends.load_backends()
  950         self.resource_manager = drivers['resource_api']
  951         self.domain_config_manager = drivers['domain_config_api']
  952 
  953     def valid_options(self):
  954         """Validate the options, returning True if they are indeed valid.
  955 
  956         It would be nice to use the argparse automated checking for this
  957         validation, but the only way I can see doing that is to make the
  958         default (i.e. if no optional parameters are specified) to upload
  959         all configuration files - and that sounds too dangerous as a
  960         default. So we use it in a slightly unconventional way, where all
  961         parameters are optional, but you must specify at least one.
  962 
  963         """
  964         if (CONF.command.all is False and
  965                 CONF.command.domain_name is None):
  966             print(_('At least one option must be provided, use either '
  967                     '--all or --domain-name'))
  968             return False
  969 
  970         if (CONF.command.all is True and
  971                 CONF.command.domain_name is not None):
  972             print(_('The --all option cannot be used with '
  973                     'the --domain-name option'))
  974             return False
  975 
  976         return True
  977 
  978     def _upload_config_to_database(self, file_name, domain_name):
  979         """Upload a single config file to the database.
  980 
  981         :param file_name: the file containing the config options
  982         :param domain_name: the domain name
  983         :returns: a boolean indicating if the upload succeeded
  984 
  985         """
  986         try:
  987             domain_ref = (
  988                 self.resource_manager.get_domain_by_name(domain_name))
  989         except exception.DomainNotFound:
  990             print(_('Invalid domain name: %(domain)s found in config file '
  991                     'name: %(file)s - ignoring this file.') % {
  992                         'domain': domain_name,
  993                         'file': file_name})
  994             return False
  995 
  996         if self.domain_config_manager.get_config_with_sensitive_info(
  997                 domain_ref['id']):
  998             print(_('Domain: %(domain)s already has a configuration '
  999                     'defined - ignoring file: %(file)s.') % {
 1000                         'domain': domain_name,
 1001                         'file': file_name})
 1002             return False
 1003 
 1004         sections = {}
 1005         try:
 1006             parser = cfg.ConfigParser(file_name, sections)
 1007             parser.parse()
 1008         except Exception:
 1009             # We explicitly don't try and differentiate the error cases, in
 1010             # order to keep the code in this tool more robust as oslo.config
 1011             # changes.
 1012             print(_('Error parsing configuration file for domain: %(domain)s, '
 1013                     'file: %(file)s.') % {
 1014                         'domain': domain_name,
 1015                         'file': file_name})
 1016             return False
 1017 
 1018         try:
 1019             for group in sections:
 1020                 for option in sections[group]:
 1021                     sections[group][option] = sections[group][option][0]
 1022             self.domain_config_manager.create_config(domain_ref['id'],
 1023                                                      sections)
 1024             return True
 1025         except Exception as e:
 1026             msg = ('Error processing config file for domain: '
 1027                    '%(domain_name)s, file: %(filename)s, error: %(error)s')
 1028             LOG.error(msg,
 1029                       {'domain_name': domain_name,
 1030                        'filename': file_name,
 1031                        'error': e},
 1032                       exc_info=True)
 1033             return False
 1034 
 1035     def read_domain_configs_from_files(self):
 1036         """Read configs from file(s) and load into database.
 1037 
 1038         The command line parameters have already been parsed and the CONF
 1039         command option will have been set. It is either set to the name of an
 1040         explicit domain, or it's None to indicate that we want all domain
 1041         config files.
 1042 
 1043         """
 1044         domain_name = CONF.command.domain_name
 1045         conf_dir = CONF.identity.domain_config_dir
 1046         if not os.path.exists(conf_dir):
 1047             print(_('Unable to locate domain config directory: %s') % conf_dir)
 1048             raise ValueError
 1049 
 1050         if domain_name:
 1051             # Request is to upload the configs for just one domain
 1052             fname = DOMAIN_CONF_FHEAD + domain_name + DOMAIN_CONF_FTAIL
 1053             if not self._upload_config_to_database(
 1054                     os.path.join(conf_dir, fname), domain_name):
 1055                 return False
 1056             return True
 1057 
 1058         success_cnt = 0
 1059         failure_cnt = 0
 1060         for filename, domain_name in self._domain_config_finder(conf_dir):
 1061             if self._upload_config_to_database(filename, domain_name):
 1062                 success_cnt += 1
 1063                 LOG.info('Successfully uploaded domain config %r',
 1064                          filename)
 1065             else:
 1066                 failure_cnt += 1
 1067 
 1068         if success_cnt == 0:
 1069             LOG.warning('No domain configs uploaded from %r', conf_dir)
 1070 
 1071         if failure_cnt:
 1072             return False
 1073         return True
 1074 
 1075     def run(self):
 1076         # First off, let's just check we can talk to the domain database
 1077         try:
 1078             self.resource_manager.list_domains(driver_hints.Hints())
 1079         except Exception:
 1080             # It is likely that there is some SQL or other backend error
 1081             # related to set up
 1082             print(_('Unable to access the keystone database, please check it '
 1083                     'is configured correctly.'))
 1084             raise
 1085 
 1086         if not self.valid_options():
 1087             return 1
 1088 
 1089         if not self.read_domain_configs_from_files():
 1090             return 1
 1091 
 1092 
 1093 class DomainConfigUpload(BaseApp):
 1094     """Upload the domain specific configuration files to the database."""
 1095 
 1096     name = 'domain_config_upload'
 1097 
 1098     @classmethod
 1099     def add_argument_parser(cls, subparsers):
 1100         parser = super(DomainConfigUpload, cls).add_argument_parser(subparsers)
 1101         parser.add_argument('--all', default=False, action='store_true',
 1102                             help='Upload contents of all domain specific '
 1103                                  'configuration files. Either use this option '
 1104                                  'or use the --domain-name option to choose a '
 1105                                  'specific domain.')
 1106         parser.add_argument('--domain-name', default=None,
 1107                             help='Upload contents of the specific '
 1108                                  'configuration file for the given domain. '
 1109                                  'Either use this option or use the --all '
 1110                                  'option to upload contents for all domains.')
 1111         return parser
 1112 
 1113     @staticmethod
 1114     def main():
 1115         dcu = DomainConfigUploadFiles()
 1116         status = dcu.run()
 1117         if status is not None:
 1118             sys.exit(status)
 1119 
 1120 
 1121 class SamlIdentityProviderMetadata(BaseApp):
 1122     """Generate Identity Provider metadata."""
 1123 
 1124     name = 'saml_idp_metadata'
 1125 
 1126     @staticmethod
 1127     def main():
 1128         metadata = idp.MetadataGenerator().generate_metadata()
 1129         print(metadata)
 1130 
 1131 
 1132 class MappingEngineTester(BaseApp):
 1133     """Execute mapping engine locally."""
 1134 
 1135     name = 'mapping_engine'
 1136 
 1137     def __init__(self):
 1138         super(MappingEngineTester, self).__init__()
 1139         self.mapping_id = uuid.uuid4().hex
 1140         self.rules_pathname = None
 1141         self.rules = None
 1142         self.assertion_pathname = None
 1143         self.assertion = None
 1144 
 1145     def read_rules(self, path):
 1146         self.rules_pathname = path
 1147         try:
 1148             with open(path, "rb") as file:
 1149                 self.rules = jsonutils.load(file)
 1150         except ValueError as e:
 1151             raise SystemExit(_('Error while parsing rules '
 1152                                '%(path)s: %(err)s') % {'path': path, 'err': e})
 1153 
 1154     def read_assertion(self, path):
 1155         self.assertion_pathname = path
 1156         try:
 1157             with open(path) as file:
 1158                 self.assertion = file.read().strip()
 1159         except IOError as e:
 1160             raise SystemExit(_("Error while opening file "
 1161                                "%(path)s: %(err)s") % {'path': path, 'err': e})
 1162 
 1163     def normalize_assertion(self):
 1164         def split(line, line_num):
 1165             try:
 1166                 k, v = line.split(':', 1)
 1167                 return k.strip(), v.strip()
 1168             except ValueError:
 1169                 msg = _("assertion file %(pathname)s at line %(line_num)d "
 1170                         "expected 'key: value' but found '%(line)s' "
 1171                         "see help for file format")
 1172                 raise SystemExit(msg % {'pathname': self.assertion_pathname,
 1173                                         'line_num': line_num,
 1174                                         'line': line})
 1175         assertion = self.assertion.splitlines()
 1176         assertion_dict = {}
 1177         prefix = CONF.command.prefix
 1178         for line_num, line in enumerate(assertion, 1):
 1179             line = line.strip()
 1180             if line == '':
 1181                 continue
 1182             k, v = split(line, line_num)
 1183             if prefix:
 1184                 if k.startswith(prefix):
 1185                     assertion_dict[k] = v
 1186             else:
 1187                 assertion_dict[k] = v
 1188         self.assertion = assertion_dict
 1189 
 1190     def normalize_rules(self):
 1191         if isinstance(self.rules, list):
 1192             self.rules = {'rules': self.rules}
 1193 
 1194     @classmethod
 1195     def main(cls):
 1196         if CONF.command.engine_debug:
 1197             mapping_engine.LOG.logger.setLevel('DEBUG')
 1198         else:
 1199             mapping_engine.LOG.logger.setLevel('WARN')
 1200 
 1201         tester = cls()
 1202 
 1203         tester.read_rules(CONF.command.rules)
 1204         tester.normalize_rules()
 1205         mapping_engine.validate_mapping_structure(tester.rules)
 1206 
 1207         tester.read_assertion(CONF.command.input)
 1208         tester.normalize_assertion()
 1209 
 1210         if CONF.command.engine_debug:
 1211             print("Using Rules:\n%s" % (
 1212                 jsonutils.dumps(tester.rules, indent=2)))
 1213             print("Using Assertion:\n%s" % (
 1214                 jsonutils.dumps(tester.assertion, indent=2)))
 1215 
 1216         rp = mapping_engine.RuleProcessor(tester.mapping_id,
 1217                                           tester.rules['rules'])
 1218         mapped = rp.process(tester.assertion)
 1219         print(jsonutils.dumps(mapped, indent=2))
 1220 
 1221     @classmethod
 1222     def add_argument_parser(cls, subparsers):
 1223         parser = super(MappingEngineTester,
 1224                        cls).add_argument_parser(subparsers)
 1225 
 1226         parser.formatter_class = argparse.RawTextHelpFormatter
 1227         parser.add_argument('--rules', default=None, required=True,
 1228                             help=("Path to the file with "
 1229                                   "rules to be executed. "
 1230                                   "Content must be\na proper JSON structure, "
 1231                                   "with a top-level key 'rules' and\n"
 1232                                   "corresponding value being a list."))
 1233         parser.add_argument('--input', default=None, required=True,
 1234                             help=("Path to the file with input attributes. "
 1235                                   "The content\nconsists of ':' separated "
 1236                                   "parameter names and their values.\nThere "
 1237                                   "is only one key-value pair per line. "
 1238                                   "A ';' in the\nvalue is a separator and "
 1239                                   "then a value is treated as a list.\n"
 1240                                   "Example:\n"
 1241                                   "\tEMAIL: me@example.com\n"
 1242                                   "\tLOGIN: me\n"
 1243                                   "\tGROUPS: group1;group2;group3"))
 1244         parser.add_argument('--prefix', default=None,
 1245                             help=("A prefix used for each environment "
 1246                                   "variable in the\nassertion. For example, "
 1247                                   "all environment variables may have\nthe "
 1248                                   "prefix ASDF_."))
 1249         parser.add_argument('--engine-debug',
 1250                             default=False, action="store_true",
 1251                             help=("Enable debug messages from the mapping "
 1252                                   "engine."))
 1253 
 1254 
 1255 class MappingPopulate(BaseApp):
 1256     """Pre-populate entries from domain-specific backends.
 1257 
 1258     Running this command is not required. It should only be run right after
 1259     the LDAP was configured, when many new users were added, or when
 1260     "mapping_purge" is run.
 1261 
 1262     This command will take a while to run. It is perfectly fine for it to run
 1263     more than several minutes.
 1264     """
 1265 
 1266     name = "mapping_populate"
 1267 
 1268     @classmethod
 1269     def load_backends(cls):
 1270         drivers = backends.load_backends()
 1271         cls.identity_api = drivers['identity_api']
 1272         cls.resource_api = drivers['resource_api']
 1273 
 1274     @classmethod
 1275     def add_argument_parser(cls, subparsers):
 1276         parser = super(MappingPopulate, cls).add_argument_parser(
 1277             subparsers)
 1278 
 1279         parser.add_argument('--domain-name', default=None, required=True,
 1280                             help=("Name of the domain configured to use "
 1281                                   "domain-specific backend"))
 1282         return parser
 1283 
 1284     @classmethod
 1285     def main(cls):
 1286         """Process entries for id_mapping_api."""
 1287         cls.load_backends()
 1288         domain_name = CONF.command.domain_name
 1289         try:
 1290             domain_id = cls.resource_api.get_domain_by_name(domain_name)['id']
 1291         except exception.DomainNotFound:
 1292             print(_('Invalid domain name: %(domain)s') % {
 1293                 'domain': domain_name})
 1294             return False
 1295         # We don't actually need to tackle id_mapping_api in order to get
 1296         # entries there, because list_users does this anyway. That's why it
 1297         # will be enough to just make the call below.
 1298         cls.identity_api.list_users(domain_scope=domain_id)
 1299 
 1300 
 1301 CMDS = [
 1302     BootStrap,
 1303     CredentialMigrate,
 1304     CredentialRotate,
 1305     CredentialSetup,
 1306     DbSync,
 1307     DbVersion,
 1308     Doctor,
 1309     DomainConfigUpload,
 1310     FernetRotate,
 1311     FernetSetup,
 1312     CreateJWSKeyPair,
 1313     MappingPopulate,
 1314     MappingPurge,
 1315     MappingEngineTester,
 1316     ReceiptRotate,
 1317     ReceiptSetup,
 1318     SamlIdentityProviderMetadata,
 1319     TokenRotate,
 1320     TokenSetup,
 1321     TrustFlush
 1322 ]
 1323 
 1324 
 1325 def add_command_parsers(subparsers):
 1326     for cmd in CMDS:
 1327         cmd.add_argument_parser(subparsers)
 1328 
 1329 
 1330 command_opt = cfg.SubCommandOpt('command',
 1331                                 title='Commands',
 1332                                 help='Available commands',
 1333                                 handler=add_command_parsers)
 1334 
 1335 
 1336 def main(argv=None, developer_config_file=None):
 1337     """Main entry point into the keystone-manage CLI utility.
 1338 
 1339     :param argv: Arguments supplied via the command line using the ``sys``
 1340                  standard library.
 1341     :type argv: list
 1342     :param developer_config_file: The location of a configuration file normally
 1343                                   found in development environments.
 1344     :type developer_config_file: string
 1345 
 1346     """
 1347     CONF.register_cli_opt(command_opt)
 1348 
 1349     keystone.conf.configure()
 1350     sql.initialize()
 1351     keystone.conf.set_default_for_default_log_levels()
 1352 
 1353     user_supplied_config_file = False
 1354     if argv:
 1355         for argument in argv:
 1356             if argument == '--config-file':
 1357                 user_supplied_config_file = True
 1358 
 1359     if developer_config_file:
 1360         developer_config_file = [developer_config_file]
 1361 
 1362     # NOTE(lbragstad): At this point in processing, the first element of argv
 1363     # is the binary location of keystone-manage, which oslo.config doesn't need
 1364     # and is keystone specific. Only pass a list of arguments so that
 1365     # oslo.config can determine configuration file locations based on user
 1366     # provided arguments, if present.
 1367     CONF(args=argv[1:],
 1368          project='keystone',
 1369          version=pbr.version.VersionInfo('keystone').version_string(),
 1370          usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
 1371          default_config_files=developer_config_file)
 1372 
 1373     if not CONF.default_config_files and not user_supplied_config_file:
 1374         LOG.warning('Config file not found, using default configs.')
 1375     keystone.conf.setup_logging()
 1376     CONF.command.cmd_class.main()