"Fossies" - the Fresh Open Source Software Archive

Member "magnum-8.1.0/magnum/conductor/scale_manager.py" (1 Oct 2019, 3366 Bytes) of package /linux/misc/openstack/magnum-8.1.0.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "scale_manager.py" see the Fossies "Dox" file reference documentation.

    1 # Copyright 2015 Huawei Technologies Co.,LTD.
    2 #
    3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
    4 # not use this file except in compliance with the License. You may obtain
    5 # a copy of the License at
    6 #
    7 #      http://www.apache.org/licenses/LICENSE-2.0
    8 #
    9 # Unless required by applicable law or agreed to in writing, software
   10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   12 # License for the specific language governing permissions and limitations
   13 # under the License.
   14 
   15 import abc
   16 from oslo_log import log as logging
   17 
   18 from magnum.common import exception
   19 from magnum.drivers.common.driver import Driver
   20 from magnum.i18n import _
   21 from magnum import objects
   22 
   23 
   24 LOG = logging.getLogger(__name__)
   25 
   26 
   27 def get_scale_manager(context, osclient, cluster):
   28     cluster_driver = Driver.get_driver_for_cluster(context, cluster)
   29     manager = cluster_driver.get_scale_manager(context, osclient, cluster)
   30     if not manager:
   31         LOG.warning(
   32             "Currently only kubernetes and mesos cluster scale manager "
   33             "are available")
   34 
   35     return manager
   36 
   37 
   38 class ScaleManager(object):
   39 
   40     def __init__(self, context, osclient, cluster):
   41         self.context = context
   42         self.osclient = osclient
   43         self.old_cluster = objects.Cluster.get_by_uuid(context, cluster.uuid)
   44         self.new_cluster = cluster
   45 
   46     def get_removal_nodes(self, hosts_output):
   47         if not self._is_scale_down():
   48             return list()
   49 
   50         cluster = self.new_cluster
   51         stack = self.osclient.heat().stacks.get(cluster.stack_id)
   52         hosts = hosts_output.get_output_value(stack)
   53         if hosts is None:
   54             raise exception.MagnumException(_(
   55                 "Output key '%(output_key)s' is missing from stack "
   56                 "%(stack_id)s") % {'output_key': hosts_output.heat_output,
   57                                    'stack_id': stack.id})
   58 
   59         hosts_with_container = self._get_hosts_with_container(self.context,
   60                                                               cluster)
   61         hosts_no_container = list(set(hosts) - hosts_with_container)
   62         LOG.debug('List of hosts that has no container: %s',
   63                   str(hosts_no_container))
   64 
   65         num_of_removal = self._get_num_of_removal()
   66         if len(hosts_no_container) < num_of_removal:
   67             LOG.warning(
   68                 "About to remove %(num_removal)d nodes, which is larger than "
   69                 "the number of empty nodes (%(num_empty)d). %(num_non_empty)d "
   70                 "non-empty nodes will be removed.", {
   71                     'num_removal': num_of_removal,
   72                     'num_empty': len(hosts_no_container),
   73                     'num_non_empty': num_of_removal - len(hosts_no_container)})
   74 
   75         hosts_to_remove = hosts_no_container[0:num_of_removal]
   76         LOG.info('Require removal of hosts: %s', hosts_to_remove)
   77 
   78         return hosts_to_remove
   79 
   80     def _is_scale_down(self):
   81         return self.new_cluster.node_count < self.old_cluster.node_count
   82 
   83     def _get_num_of_removal(self):
   84         return self.old_cluster.node_count - self.new_cluster.node_count
   85 
   86     @abc.abstractmethod
   87     def _get_hosts_with_container(self, context, cluster):
   88         """Return the hosts with container running on them."""
   89         pass