"Fossies" - the Fresh Open Source Software Archive

Member "netxms-3.8.166/src/agent/subagents/xen/cpu.cpp" (23 Feb 2021, 6416 Bytes) of package /linux/misc/netxms-3.8.166.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "cpu.cpp" see the Fossies "Dox" file reference documentation.

    1 /*
    2 ** NetXMS XEN hypervisor subagent
    3 ** Copyright (C) 2017-2020 Raden Solutions
    4 **
    5 ** This program is free software; you can redistribute it and/or modify
    6 ** it under the terms of the GNU General Public License as published by
    7 ** the Free Software Foundation; either version 2 of the License, or
    8 ** (at your option) any later version.
    9 **
   10 ** This program is distributed in the hope that it will be useful,
   11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
   12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   13 ** GNU General Public License for more details.
   14 **
   15 ** You should have received a copy of the GNU General Public License
   16 ** along with this program; if not, write to the Free Software
   17 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
   18 **
   19 ** File: cpu.cpp
   20 **
   21 **/
   22 
   23 #include "xen.h"
   24 
   25 #define MAX_CPU_USAGE_SLOTS   900
   26 
   27 /**
   28  * CPU usage structure
   29  */
   30 class CpuUsageData
   31 {
   32 public:
   33    uint64_t prevTime;
   34    INT32 usage[MAX_CPU_USAGE_SLOTS];
   35    int currPos;
   36 
   37    CpuUsageData()
   38    {
   39       prevTime = 0;
   40       memset(usage, 0, sizeof(usage));
   41       currPos = 0;
   42    }
   43 
   44    void update(uint64_t timePeriod, uint64_t cpuTime, int cpuCount)
   45    {
   46       if (prevTime != 0)
   47       {
   48          usage[currPos++] = (INT32)((cpuTime - prevTime) * 1000 / timePeriod) / cpuCount;
   49          if (currPos == MAX_CPU_USAGE_SLOTS)
   50             currPos = 0;
   51       }
   52       prevTime = cpuTime;
   53    }
   54 
   55    UINT32 getCurrentUsage()
   56    {
   57       return usage[currPos > 0 ? currPos - 1 : MAX_CPU_USAGE_SLOTS - 1];
   58    }
   59 
   60    UINT32 getAverageUsage(int samples)
   61    {
   62       UINT32 sum = 0;
   63       for(int i = 0, pos = currPos; i < samples; i++)
   64       {
   65          pos--;
   66          if (pos < 0)
   67             pos = MAX_CPU_USAGE_SLOTS - 1;
   68          sum += usage[pos];
   69       }
   70       return sum / samples;
   71    }
   72 };
   73 
   74 /**
   75  * Collected data
   76  */
   77 static HashMap<uint32_t, CpuUsageData> s_vmCpuUsage(Ownership::True);
   78 static CpuUsageData s_hostCpuUsage;
   79 static Mutex s_dataLock;
   80 
   81 /**
   82  * Query CPU usage for domain
   83  */
   84 bool XenQueryDomainCpuUsage(uint32_t domId, INT32 *curr, INT32 *avg1min)
   85 {
   86    s_dataLock.lock();
   87    CpuUsageData *data = s_vmCpuUsage.get(domId);
   88    if (data != NULL)
   89    {
   90       *curr = data->getCurrentUsage();
   91       *avg1min = data->getAverageUsage(60);
   92    }
   93    s_dataLock.unlock();
   94    return data != NULL;
   95 }
   96 
   97 /**
   98  * Collect CPU usage data
   99  */
  100 static bool CollectData(libxl_ctx *ctx, struct timespec *prevClock)
  101 {
  102    bool success = false;
  103    uint64_t totalTime = 0;
  104 
  105    int count;
  106    libxl_dominfo *domains = libxl_list_domain(ctx, &count);
  107    if (domains != NULL)
  108    {
  109       struct timespec currClock;
  110       clock_gettime(CLOCK_MONOTONIC_RAW, &currClock);
  111       uint64_t tdiff = (currClock.tv_sec - prevClock->tv_sec) * _ULL(1000000000) + (currClock.tv_nsec - prevClock->tv_nsec);
  112 
  113       int cpuCount = libxl_get_online_cpus(ctx);
  114 
  115       s_dataLock.lock();
  116       for(int i = 0; i < count; i++)
  117       {
  118          CpuUsageData *u = s_vmCpuUsage.get(domains[i].domid);
  119          if (u == NULL)
  120          {
  121             u = new CpuUsageData();
  122             s_vmCpuUsage.set(domains[i].domid, u);
  123          }
  124          u->update(tdiff, domains[i].cpu_time, cpuCount);
  125          totalTime += domains[i].cpu_time;
  126       }
  127       libxl_dominfo_list_free(domains, count);
  128 
  129       s_hostCpuUsage.update(tdiff, totalTime, cpuCount);
  130       s_dataLock.unlock();
  131 
  132       memcpy(prevClock, &currClock, sizeof(struct timespec));
  133       success = true;
  134    }
  135    else
  136    {
  137       nxlog_debug(4, _T("XEN: call to libxl_list_domain failed"));
  138    }
  139    return success;
  140 }
  141 
  142 /**
  143  * CPU collector thread
  144  */
  145 static THREAD_RESULT THREAD_CALL CollectorThread(void *arg)
  146 {
  147    nxlog_debug(1, _T("XEN: CPU collector thread started"));
  148 
  149    libxl_ctx *ctx = NULL;
  150    bool connected = false;
  151 
  152    struct timespec currClock;
  153    while(!AgentSleepAndCheckForShutdown(1000))
  154    {
  155       if (!connected)
  156       {
  157          connected = (libxl_ctx_alloc(&ctx, LIBXL_VERSION, 0, &g_xenLogger) == 0);
  158          if (connected)
  159             CollectData(ctx, &currClock);
  160          continue;
  161       }
  162       CollectData(ctx, &currClock);
  163    }
  164    if (ctx != NULL)
  165       libxl_ctx_free(ctx);
  166    nxlog_debug(1, _T("XEN: CPU collector thread stopped"));
  167    return THREAD_OK;
  168 }
  169 
  170 /**
  171  * CPU collector thread handle
  172  */
  173 static THREAD s_cpuCollectorThread = INVALID_THREAD_HANDLE;
  174 
  175 /**
  176  * Start CPU collector
  177  */
  178 void XenStartCPUCollector()
  179 {
  180    s_cpuCollectorThread = ThreadCreateEx(CollectorThread, 0, NULL);
  181 }
  182 
  183 /**
  184  * Stop CPU collector
  185  */
  186 void XenStopCPUCollector()
  187 {
  188    ThreadJoin(s_cpuCollectorThread);
  189 }
  190 
  191 /**
  192  * Handler for XEN.Host.CPU.Usage parameters
  193  */
  194 LONG H_XenHostCPUUsage(const TCHAR *param, const TCHAR *arg, TCHAR *value, AbstractCommSession *session)
  195 {
  196    UINT32 usage;
  197    s_dataLock.lock();
  198    switch(*arg)
  199    {
  200       case '0':
  201          usage = s_hostCpuUsage.getCurrentUsage();
  202          break;
  203       case '1':
  204          usage = s_hostCpuUsage.getAverageUsage(60);
  205          break;
  206       case '5':
  207          usage = s_hostCpuUsage.getAverageUsage(300);
  208          break;
  209       case 'F':
  210          usage = s_hostCpuUsage.getAverageUsage(900);
  211          break;
  212    }
  213    s_dataLock.unlock();
  214    _sntprintf(value, MAX_RESULT_LENGTH, _T("%d.%d"), usage / 10, usage % 10);
  215    return SYSINFO_RC_SUCCESS;
  216 }
  217 
  218 /**
  219  * Handler for XEN.Domain.CPU.Usage parameters
  220  */
  221 LONG H_XenDomainCPUUsage(const TCHAR *param, const TCHAR *arg, TCHAR *value, AbstractCommSession *session)
  222 {
  223    char domName[256];
  224    if (!AgentGetParameterArgA(param, 1, domName, 256))
  225       return SYSINFO_RC_UNSUPPORTED;
  226 
  227    char *eptr;
  228    uint32_t domId = strtoul(domName, &eptr, 0);
  229    if (*eptr != 0)
  230    {
  231       LONG rc = XenResolveDomainName(domName, &domId);
  232       if (rc != SYSINFO_RC_SUCCESS)
  233          return rc;
  234    }
  235 
  236    LONG rc = SYSINFO_RC_SUCCESS;
  237    UINT32 usage;
  238    s_dataLock.lock();
  239    CpuUsageData *data = s_vmCpuUsage.get(domId);
  240    if (data != NULL)
  241    {
  242       switch(*arg)
  243       {
  244          case '0':
  245             usage = data->getCurrentUsage();
  246             break;
  247          case '1':
  248             usage = data->getAverageUsage(60);
  249             break;
  250          case '5':
  251             usage = data->getAverageUsage(300);
  252             break;
  253          case 'F':
  254             usage = data->getAverageUsage(900);
  255             break;
  256       }
  257       _sntprintf(value, MAX_RESULT_LENGTH, _T("%d.%d"), usage / 10, usage % 10);
  258    }
  259    else
  260    {
  261       rc = SYSINFO_RC_ERROR;
  262    }
  263    s_dataLock.unlock();
  264    return rc;
  265 }