"Fossies" - the Fresh Open Source Software Archive

Member "dmd2/src/druntime/import/core/sys/linux/sched.d" (20 Nov 2020, 3988 Bytes) of package /linux/misc/dmd.2.094.2.linux.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) D source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file.

    1 /*******************************************************************************
    2 
    3     D binding for Linux specific scheduler control and thread spawning
    4     methods.
    5 
    6     Defines functions sched_setaffinity and sched_getaffinity and the data
    7     types they operate on, as well as clone and unshare and their related
    8     constants.
    9 
   10     Copyright:  Copyright (c) 2016 Sociomantic Labs. All rights reserved.
   11     License:    $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
   12     Authors:    Nemanja Boric
   13 
   14 *******************************************************************************/
   15 
   16 
   17 module core.sys.linux.sched;
   18 
   19 import core.bitop : popcnt;
   20 import core.stdc.stdlib : malloc, free;
   21 import core.sys.posix.sched;
   22 import core.sys.posix.config;
   23 import core.sys.posix.sys.types;
   24 
   25 version (linux):
   26 extern (C):
   27 @nogc:
   28 nothrow:
   29 @system:
   30 
   31 
   32 private // helpers
   33 {
   34 
   35     /* Size definition for CPU sets.  */
   36     enum
   37     {
   38         __CPU_SETSIZE = 1024,
   39         __NCPUBITS  = 8 * cpu_mask.sizeof,
   40     }
   41 
   42     /* Macros */
   43 
   44     /* Basic access functions.  */
   45     size_t __CPUELT(size_t cpu) pure
   46     {
   47         return cpu / __NCPUBITS;
   48     }
   49     cpu_mask __CPUMASK(size_t cpu) pure
   50     {
   51         return 1UL << (cpu % __NCPUBITS);
   52     }
   53 
   54     cpu_set_t* __CPU_ALLOC(size_t count)
   55     {
   56         return cast(cpu_set_t*) malloc(__CPU_ALLOC_SIZE(count));
   57     }
   58 
   59     size_t __CPU_ALLOC_SIZE(size_t count) pure
   60     {
   61         return ((count + __NCPUBITS - 1) / __NCPUBITS) * cpu_mask.sizeof;
   62     }
   63 
   64     void __CPU_FREE(cpu_set_t* set)
   65     {
   66         free(cast(void*) set);
   67     }
   68 
   69     cpu_mask __CPU_SET_S(size_t cpu, size_t setsize, cpu_set_t* cpusetp) pure
   70     {
   71         if (cpu < 8 * setsize)
   72         {
   73             cpusetp.__bits[__CPUELT(cpu)] |= __CPUMASK(cpu);
   74             return __CPUMASK(cpu);
   75         }
   76 
   77         return 0;
   78     }
   79 
   80     bool __CPU_ISSET_S(size_t cpu, size_t setsize, cpu_set_t* cpusetp) pure
   81     {
   82         if (cpu < 8 * setsize)
   83             return (cpusetp.__bits[__CPUELT(cpu)] & __CPUMASK(cpu)) != 0;
   84         return false;
   85     }
   86 
   87     int __CPU_COUNT_S(size_t setsize, cpu_set_t* cpusetp) pure
   88     {
   89         int s = 0;
   90         foreach (i; cpusetp.__bits[0 .. (setsize / cpu_mask.sizeof)])
   91             s += popcnt(i);
   92         return s;
   93     }
   94 }
   95 
   96 /// Type for array elements in 'cpu_set_t'.
   97 alias c_ulong cpu_mask;
   98 
   99 /// Data structure to describe CPU mask.
  100 struct cpu_set_t
  101 {
  102     cpu_mask[__CPU_SETSIZE / __NCPUBITS] __bits;
  103 }
  104 
  105 /// Access macros for 'cpu_set' (missing a lot of them)
  106 
  107 cpu_set_t* CPU_ALLOC(size_t count)
  108 {
  109     return __CPU_ALLOC(count);
  110 }
  111 
  112 size_t CPU_ALLOC_SIZE(size_t count) pure
  113 {
  114     return __CPU_ALLOC_SIZE(count);
  115 }
  116 
  117 void CPU_FREE(cpu_set_t* set)
  118 {
  119     __CPU_FREE(set);
  120 }
  121 
  122 cpu_mask CPU_SET(size_t cpu, cpu_set_t* cpusetp) pure
  123 {
  124      return __CPU_SET_S(cpu, cpu_set_t.sizeof, cpusetp);
  125 }
  126 
  127 bool CPU_ISSET(size_t cpu, cpu_set_t* cpusetp) pure
  128 {
  129     return __CPU_ISSET_S(cpu, cpu_set_t.sizeof, cpusetp);
  130 }
  131 
  132 int CPU_COUNT(cpu_set_t* cpusetp) pure
  133 {
  134     return __CPU_COUNT_S(cpu_set_t.sizeof, cpusetp);
  135 }
  136 
  137 int CPU_COUNT_S(size_t setsize, cpu_set_t* cpusetp) pure
  138 {
  139     return __CPU_COUNT_S(setsize, cpusetp);
  140 }
  141 
  142 /* Scheduler control functions */
  143 int sched_setaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask);
  144 int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask);
  145 
  146 /* Clone and related functions and constants */
  147 int clone(int function(void*), void* child_stack, int flags, void* arg, ...);
  148 int unshare(int flags) @trusted;
  149 
  150 version (CRuntime_Glibc)
  151 {
  152     /* Determine CPU on which the calling thread is running */
  153     int sched_getcpu();
  154 }
  155 
  156 enum CLONE_FILES = 0x400;
  157 enum CLONE_FS = 0x200;
  158 enum CLONE_NEWCGROUP = 0x2000000;
  159 enum CLONE_NEWIPC = 0x8000000;
  160 enum CLONE_NEWNET = 0x40000000;
  161 enum CLONE_NEWNS = 0x20000;
  162 enum CLONE_NEWPID = 0x20000000;
  163 enum CLONE_NEWUSER = 0x10000000;
  164 enum CLONE_NEWUTS = 0x4000000;
  165 enum CLONE_SIGHAND = 0x800;
  166 enum CLONE_SYSVSEM = 0x40000;
  167 enum CLONE_THREAD = 0x10000;
  168 enum CLONE_VFORK = 0x4000;
  169 enum CLONE_VM = 0x100;