"Fossies" - the Fresh Open Source Software Archive

Member "honggfuzz-2.2/linux/perf.c" (23 Apr 2020, 14754 Bytes) of package /linux/privat/honggfuzz-2.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "perf.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 2.1_vs_2.2.

    1 /*
    2  *
    3  * honggfuzz - architecture dependent code (LINUX/PERF)
    4  * -----------------------------------------
    5  *
    6  * Author: Robert Swiecki <swiecki@google.com>
    7  *
    8  * Copyright 2010-2018 by Google Inc. All Rights Reserved.
    9  *
   10  * Licensed under the Apache License, Version 2.0 (the "License"); you may
   11  * not use this file except in compliance with the License. You may obtain
   12  * a copy of the License at
   13  *
   14  * http://www.apache.org/licenses/LICENSE-2.0
   15  *
   16  * Unless required by applicable law or agreed to in writing, software
   17  * distributed under the License is distributed on an "AS IS" BASIS,
   18  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
   19  * implied. See the License for the specific language governing
   20  * permissions and limitations under the License.
   21  *
   22  */
   23 
   24 #include "perf.h"
   25 
   26 #include <asm/mman.h>
   27 #include <errno.h>
   28 #include <fcntl.h>
   29 #include <inttypes.h>
   30 #include <linux/hw_breakpoint.h>
   31 #include <linux/perf_event.h>
   32 #include <linux/sysctl.h>
   33 #include <signal.h>
   34 #include <stdlib.h>
   35 #include <string.h>
   36 #include <sys/ioctl.h>
   37 #include <sys/mman.h>
   38 #include <sys/poll.h>
   39 #include <sys/ptrace.h>
   40 #include <sys/syscall.h>
   41 #include <unistd.h>
   42 
   43 #include "libhfcommon/common.h"
   44 #include "libhfcommon/files.h"
   45 #include "libhfcommon/log.h"
   46 #include "libhfcommon/util.h"
   47 #include "pt.h"
   48 
   49 #define _HF_PERF_MAP_SZ (1024 * 512)
   50 #define _HF_PERF_AUX_SZ (1024 * 1024)
   51 /* PERF_TYPE for Intel_PT/BTS -1 if none */
   52 static int32_t perfIntelPtPerfType = -1;
   53 static int32_t perfIntelBtsPerfType = -1;
   54 
   55 #if defined(PERF_ATTR_SIZE_VER5)
   56 __attribute__((hot)) static inline void arch_perfBtsCount(run_t* run) {
   57     struct perf_event_mmap_page* pem = (struct perf_event_mmap_page*)run->arch_linux.perfMmapBuf;
   58     struct bts_branch {
   59         uint64_t from;
   60         uint64_t to;
   61         uint64_t misc;
   62     };
   63 
   64     uint64_t aux_head = ATOMIC_GET(pem->aux_head);
   65     struct bts_branch* br = (struct bts_branch*)run->arch_linux.perfMmapAux;
   66     for (; br < ((struct bts_branch*)(run->arch_linux.perfMmapAux + aux_head)); br++) {
   67         /*
   68          * Kernel sometimes reports branches from the kernel (iret), we are not interested in that
   69          * as it makes the whole concept of unique branch counting less predictable
   70          */
   71         if (run->global->arch_linux.kernelOnly == false &&
   72             (__builtin_expect(br->from > 0xFFFFFFFF00000000, false) ||
   73                 __builtin_expect(br->to > 0xFFFFFFFF00000000, false))) {
   74             LOG_D("Adding branch %#018" PRIx64 " - %#018" PRIx64, br->from, br->to);
   75             continue;
   76         }
   77         if (br->from >= run->global->arch_linux.dynamicCutOffAddr ||
   78             br->to >= run->global->arch_linux.dynamicCutOffAddr) {
   79             continue;
   80         }
   81 
   82         register size_t pos = ((br->from << 12) ^ (br->to & 0xFFF));
   83         pos &= _HF_PERF_BITMAP_BITSZ_MASK;
   84 
   85         register bool prev = ATOMIC_BITMAP_SET(run->global->feedback.covFeedbackMap->bbMapPc, pos);
   86         if (!prev) {
   87             run->hwCnts.newBBCnt++;
   88         }
   89     }
   90 }
   91 #endif /* defined(PERF_ATTR_SIZE_VER5) */
   92 
   93 static inline void arch_perfMmapParse(run_t* run HF_ATTR_UNUSED) {
   94 #if defined(PERF_ATTR_SIZE_VER5)
   95     struct perf_event_mmap_page* pem = (struct perf_event_mmap_page*)run->arch_linux.perfMmapBuf;
   96     if (pem->aux_head == pem->aux_tail) {
   97         return;
   98     }
   99     if (pem->aux_head < pem->aux_tail) {
  100         LOG_F("The PERF AUX data has been overwritten. The AUX buffer is too small");
  101     }
  102     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_BTS_EDGE) {
  103         arch_perfBtsCount(run);
  104     }
  105     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_IPT_BLOCK) {
  106         arch_ptAnalyze(run);
  107     }
  108 #endif /* defined(PERF_ATTR_SIZE_VER5) */
  109 }
  110 
  111 static long perf_event_open(
  112     struct perf_event_attr* hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags) {
  113     return syscall(__NR_perf_event_open, hw_event, (uintptr_t)pid, (uintptr_t)cpu,
  114         (uintptr_t)group_fd, (uintptr_t)flags);
  115 }
  116 
  117 static bool arch_perfCreate(run_t* run, pid_t pid, dynFileMethod_t method, int* perfFd) {
  118     LOG_D("Enabling PERF for pid=%d method=%x", pid, method);
  119 
  120     if (*perfFd != -1) {
  121         LOG_F("The PERF FD is already initialized, possibly conflicting perf types enabled");
  122     }
  123 
  124     if ((method & _HF_DYNFILE_BTS_EDGE) && perfIntelBtsPerfType == -1) {
  125         LOG_F("Intel BTS events (new type) are not supported on this platform");
  126     }
  127     if ((method & _HF_DYNFILE_IPT_BLOCK) && perfIntelPtPerfType == -1) {
  128         LOG_F("Intel PT events are not supported on this platform");
  129     }
  130 
  131     struct perf_event_attr pe;
  132     memset(&pe, 0, sizeof(struct perf_event_attr));
  133     pe.size = sizeof(struct perf_event_attr);
  134     if (run->global->arch_linux.kernelOnly) {
  135         pe.exclude_user = 1;
  136     } else {
  137         pe.exclude_kernel = 1;
  138     }
  139     pe.disabled = 1;
  140     if (!run->global->exe.persistent) {
  141         pe.enable_on_exec = 1;
  142     }
  143     pe.exclude_hv = 1;
  144     pe.type = PERF_TYPE_HARDWARE;
  145 
  146     switch (method) {
  147         case _HF_DYNFILE_INSTR_COUNT:
  148             LOG_D("Using: PERF_COUNT_HW_INSTRUCTIONS for pid=%d", (int)pid);
  149             pe.config = PERF_COUNT_HW_INSTRUCTIONS;
  150             pe.inherit = 1;
  151             break;
  152         case _HF_DYNFILE_BRANCH_COUNT:
  153             LOG_D("Using: PERF_COUNT_HW_BRANCH_INSTRUCTIONS for pid=%d", (int)pid);
  154             pe.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
  155             pe.inherit = 1;
  156             break;
  157         case _HF_DYNFILE_BTS_EDGE:
  158             LOG_D("Using: (Intel BTS) type=%" PRIu32 " for pid=%d", perfIntelBtsPerfType, (int)pid);
  159             pe.type = perfIntelBtsPerfType;
  160             break;
  161         case _HF_DYNFILE_IPT_BLOCK:
  162             LOG_D("Using: (Intel PT) type=%" PRIu32 " for pid=%d", perfIntelPtPerfType, (int)pid);
  163             pe.type = perfIntelPtPerfType;
  164             pe.config = RTIT_CTL_DISRETC;
  165             break;
  166         default:
  167             LOG_E("Unknown perf mode: '%d' for pid=%d", method, (int)pid);
  168             return false;
  169             break;
  170     }
  171 
  172 #if !defined(PERF_FLAG_FD_CLOEXEC)
  173 #define PERF_FLAG_FD_CLOEXEC 0
  174 #endif
  175     *perfFd = perf_event_open(&pe, pid, -1, -1, PERF_FLAG_FD_CLOEXEC);
  176     if (*perfFd == -1) {
  177         PLOG_E("perf_event_open() failed");
  178         return false;
  179     }
  180 
  181     if (method != _HF_DYNFILE_BTS_EDGE && method != _HF_DYNFILE_IPT_BLOCK) {
  182         return true;
  183     }
  184 #if defined(PERF_ATTR_SIZE_VER5)
  185     if ((run->arch_linux.perfMmapBuf = mmap(NULL, _HF_PERF_MAP_SZ + getpagesize(),
  186              PROT_READ | PROT_WRITE, MAP_SHARED, *perfFd, 0)) == MAP_FAILED) {
  187         run->arch_linux.perfMmapBuf = NULL;
  188         PLOG_W("mmap(mmapBuf) failed, sz=%zu, try increasing the kernel.perf_event_mlock_kb sysctl "
  189                "(up to even 300000000)",
  190             (size_t)_HF_PERF_MAP_SZ + getpagesize());
  191         close(*perfFd);
  192         *perfFd = -1;
  193         return false;
  194     }
  195 
  196     struct perf_event_mmap_page* pem = (struct perf_event_mmap_page*)run->arch_linux.perfMmapBuf;
  197     pem->aux_offset = pem->data_offset + pem->data_size;
  198     pem->aux_size = _HF_PERF_AUX_SZ;
  199     if ((run->arch_linux.perfMmapAux = mmap(
  200              NULL, pem->aux_size, PROT_READ, MAP_SHARED, *perfFd, pem->aux_offset)) == MAP_FAILED) {
  201         munmap(run->arch_linux.perfMmapBuf, _HF_PERF_MAP_SZ + getpagesize());
  202         run->arch_linux.perfMmapBuf = NULL;
  203         run->arch_linux.perfMmapAux = NULL;
  204         PLOG_W(
  205             "mmap(mmapAuxBuf) failed, try increasing the kernel.perf_event_mlock_kb sysctl (up to "
  206             "even 300000000)");
  207         close(*perfFd);
  208         *perfFd = -1;
  209         return false;
  210     }
  211 #else  /* defined(PERF_ATTR_SIZE_VER5) */
  212     LOG_F("Your <linux/perf_event.h> includes are too old to support Intel PT/BTS");
  213 #endif /* defined(PERF_ATTR_SIZE_VER5) */
  214 
  215     return true;
  216 }
  217 
  218 bool arch_perfOpen(run_t* run) {
  219     if (run->global->feedback.dynFileMethod == _HF_DYNFILE_NONE) {
  220         return true;
  221     }
  222 
  223     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_INSTR_COUNT) {
  224         if (!arch_perfCreate(run, run->pid, _HF_DYNFILE_INSTR_COUNT, &run->arch_linux.cpuInstrFd)) {
  225             LOG_E("Cannot set up perf for pid=%d (_HF_DYNFILE_INSTR_COUNT)", (int)run->pid);
  226             goto out;
  227         }
  228     }
  229     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_BRANCH_COUNT) {
  230         if (!arch_perfCreate(
  231                 run, run->pid, _HF_DYNFILE_BRANCH_COUNT, &run->arch_linux.cpuBranchFd)) {
  232             LOG_E("Cannot set up perf for pid=%d (_HF_DYNFILE_BRANCH_COUNT)", (int)run->pid);
  233             goto out;
  234         }
  235     }
  236     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_BTS_EDGE) {
  237         if (!arch_perfCreate(run, run->pid, _HF_DYNFILE_BTS_EDGE, &run->arch_linux.cpuIptBtsFd)) {
  238             LOG_E("Cannot set up perf for pid=%d (_HF_DYNFILE_BTS_EDGE)", (int)run->pid);
  239             goto out;
  240         }
  241     }
  242     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_IPT_BLOCK) {
  243         if (!arch_perfCreate(run, run->pid, _HF_DYNFILE_IPT_BLOCK, &run->arch_linux.cpuIptBtsFd)) {
  244             LOG_E("Cannot set up perf for pid=%d (_HF_DYNFILE_IPT_BLOCK)", (int)run->pid);
  245             goto out;
  246         }
  247     }
  248 
  249     return true;
  250 
  251 out:
  252     close(run->arch_linux.cpuInstrFd);
  253     run->arch_linux.cpuInstrFd = -1;
  254     close(run->arch_linux.cpuBranchFd);
  255     run->arch_linux.cpuBranchFd = -1;
  256     close(run->arch_linux.cpuIptBtsFd);
  257     run->arch_linux.cpuIptBtsFd = -1;
  258 
  259     return false;
  260 }
  261 
  262 void arch_perfClose(run_t* run) {
  263     if (run->global->feedback.dynFileMethod == _HF_DYNFILE_NONE) {
  264         return;
  265     }
  266 
  267     if (run->arch_linux.perfMmapAux != NULL) {
  268         munmap(run->arch_linux.perfMmapAux, _HF_PERF_AUX_SZ);
  269         run->arch_linux.perfMmapAux = NULL;
  270     }
  271     if (run->arch_linux.perfMmapBuf != NULL) {
  272         munmap(run->arch_linux.perfMmapBuf, _HF_PERF_MAP_SZ + getpagesize());
  273         run->arch_linux.perfMmapBuf = NULL;
  274     }
  275 
  276     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_INSTR_COUNT) {
  277         close(run->arch_linux.cpuInstrFd);
  278         run->arch_linux.cpuInstrFd = -1;
  279     }
  280     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_BRANCH_COUNT) {
  281         close(run->arch_linux.cpuBranchFd);
  282         run->arch_linux.cpuBranchFd = -1;
  283     }
  284     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_BTS_EDGE) {
  285         close(run->arch_linux.cpuIptBtsFd);
  286         run->arch_linux.cpuIptBtsFd = -1;
  287     }
  288     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_IPT_BLOCK) {
  289         close(run->arch_linux.cpuIptBtsFd);
  290         run->arch_linux.cpuIptBtsFd = -1;
  291     }
  292 }
  293 
  294 bool arch_perfEnable(run_t* run) {
  295     if (run->global->feedback.dynFileMethod == _HF_DYNFILE_NONE) {
  296         return true;
  297     }
  298     /* It's enabled on exec in such scenario */
  299     if (!run->global->exe.persistent) {
  300         return true;
  301     }
  302 
  303     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_INSTR_COUNT) {
  304         ioctl(run->arch_linux.cpuInstrFd, PERF_EVENT_IOC_ENABLE, 0);
  305     }
  306     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_BRANCH_COUNT) {
  307         ioctl(run->arch_linux.cpuBranchFd, PERF_EVENT_IOC_ENABLE, 0);
  308     }
  309     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_BTS_EDGE) {
  310         ioctl(run->arch_linux.cpuIptBtsFd, PERF_EVENT_IOC_ENABLE, 0);
  311     }
  312     if (run->global->feedback.dynFileMethod & _HF_DYNFILE_IPT_BLOCK) {
  313         ioctl(run->arch_linux.cpuIptBtsFd, PERF_EVENT_IOC_ENABLE, 0);
  314     }
  315 
  316     return true;
  317 }
  318 
  319 static void arch_perfMmapReset(run_t* run) {
  320     /* smp_mb() required as per /usr/include/linux/perf_event.h */
  321     wmb();
  322 
  323     struct perf_event_mmap_page* pem = (struct perf_event_mmap_page*)run->arch_linux.perfMmapBuf;
  324     ATOMIC_SET(pem->data_head, 0);
  325     ATOMIC_SET(pem->data_tail, 0);
  326 #if defined(PERF_ATTR_SIZE_VER5)
  327     ATOMIC_SET(pem->aux_head, 0);
  328     ATOMIC_SET(pem->aux_tail, 0);
  329 #endif /* defined(PERF_ATTR_SIZE_VER5) */
  330 }
  331 
  332 void arch_perfAnalyze(run_t* run) {
  333     if (run->global->feedback.dynFileMethod == _HF_DYNFILE_NONE) {
  334         return;
  335     }
  336 
  337     uint64_t instrCount = 0;
  338     if ((run->global->feedback.dynFileMethod & _HF_DYNFILE_INSTR_COUNT) &&
  339         run->arch_linux.cpuInstrFd != -1) {
  340         ioctl(run->arch_linux.cpuInstrFd, PERF_EVENT_IOC_DISABLE, 0);
  341         if (files_readFromFd(run->arch_linux.cpuInstrFd, (uint8_t*)&instrCount,
  342                 sizeof(instrCount)) != sizeof(instrCount)) {
  343             PLOG_E("read(perfFd='%d') failed", run->arch_linux.cpuInstrFd);
  344         }
  345         ioctl(run->arch_linux.cpuInstrFd, PERF_EVENT_IOC_RESET, 0);
  346     }
  347 
  348     uint64_t branchCount = 0;
  349     if ((run->global->feedback.dynFileMethod & _HF_DYNFILE_BRANCH_COUNT) &&
  350         run->arch_linux.cpuBranchFd != -1) {
  351         ioctl(run->arch_linux.cpuBranchFd, PERF_EVENT_IOC_DISABLE, 0);
  352         if (files_readFromFd(run->arch_linux.cpuBranchFd, (uint8_t*)&branchCount,
  353                 sizeof(branchCount)) != sizeof(branchCount)) {
  354             PLOG_E("read(perfFd='%d') failed", run->arch_linux.cpuBranchFd);
  355         }
  356         ioctl(run->arch_linux.cpuBranchFd, PERF_EVENT_IOC_RESET, 0);
  357     }
  358 
  359     if ((run->global->feedback.dynFileMethod & _HF_DYNFILE_BTS_EDGE) &&
  360         run->arch_linux.cpuIptBtsFd != -1) {
  361         ioctl(run->arch_linux.cpuIptBtsFd, PERF_EVENT_IOC_DISABLE, 0);
  362         arch_perfMmapParse(run);
  363         arch_perfMmapReset(run);
  364         ioctl(run->arch_linux.cpuIptBtsFd, PERF_EVENT_IOC_RESET, 0);
  365     }
  366     if ((run->global->feedback.dynFileMethod & _HF_DYNFILE_IPT_BLOCK) &&
  367         run->arch_linux.cpuIptBtsFd != -1) {
  368         ioctl(run->arch_linux.cpuIptBtsFd, PERF_EVENT_IOC_DISABLE, 0);
  369         arch_perfMmapParse(run);
  370         arch_perfMmapReset(run);
  371         ioctl(run->arch_linux.cpuIptBtsFd, PERF_EVENT_IOC_RESET, 0);
  372     }
  373 
  374     run->hwCnts.cpuInstrCnt = instrCount;
  375     run->hwCnts.cpuBranchCnt = branchCount;
  376 }
  377 
  378 bool arch_perfInit(honggfuzz_t* hfuzz HF_ATTR_UNUSED) {
  379     static char const intel_pt_path[] = "/sys/bus/event_source/devices/intel_pt/type";
  380     static char const intel_bts_path[] = "/sys/bus/event_source/devices/intel_bts/type";
  381 
  382     if (files_exists(intel_pt_path)) {
  383         uint8_t buf[256];
  384         ssize_t sz = files_readFileToBufMax(intel_pt_path, buf, sizeof(buf) - 1);
  385         if (sz > 0) {
  386             buf[sz] = '\0';
  387             perfIntelPtPerfType = (int32_t)strtoul((char*)buf, NULL, 10);
  388             LOG_D("perfIntelPtPerfType = %" PRIu32, perfIntelPtPerfType);
  389         }
  390     }
  391 
  392     if (files_exists(intel_bts_path)) {
  393         uint8_t buf[256];
  394         ssize_t sz = files_readFileToBufMax(intel_bts_path, buf, sizeof(buf) - 1);
  395         if (sz > 0) {
  396             buf[sz] = '\0';
  397             perfIntelBtsPerfType = (int32_t)strtoul((char*)buf, NULL, 10);
  398             LOG_D("perfIntelBtsPerfType = %" PRIu32, perfIntelBtsPerfType);
  399         }
  400     }
  401 
  402     perf_ptInit();
  403 
  404     return true;
  405 }