"Fossies" - the Fresh Open Source Software Archive

Member "redis-5.0.6/deps/jemalloc/include/jemalloc/internal/mutex.h" (25 Sep 2019, 8089 Bytes) of package /linux/misc/redis-5.0.6.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file.

    1 #ifndef JEMALLOC_INTERNAL_MUTEX_H
    2 #define JEMALLOC_INTERNAL_MUTEX_H
    3 
    4 #include "jemalloc/internal/atomic.h"
    5 #include "jemalloc/internal/mutex_prof.h"
    6 #include "jemalloc/internal/tsd.h"
    7 #include "jemalloc/internal/witness.h"
    8 
    9 typedef enum {
   10     /* Can only acquire one mutex of a given witness rank at a time. */
   11     malloc_mutex_rank_exclusive,
   12     /*
   13      * Can acquire multiple mutexes of the same witness rank, but in
   14      * address-ascending order only.
   15      */
   16     malloc_mutex_address_ordered
   17 } malloc_mutex_lock_order_t;
   18 
   19 typedef struct malloc_mutex_s malloc_mutex_t;
   20 struct malloc_mutex_s {
   21     union {
   22         struct {
   23             /*
   24              * prof_data is defined first to reduce cacheline
   25              * bouncing: the data is not touched by the mutex holder
   26              * during unlocking, while might be modified by
   27              * contenders.  Having it before the mutex itself could
   28              * avoid prefetching a modified cacheline (for the
   29              * unlocking thread).
   30              */
   31             mutex_prof_data_t   prof_data;
   32 #ifdef _WIN32
   33 #  if _WIN32_WINNT >= 0x0600
   34             SRWLOCK             lock;
   35 #  else
   36             CRITICAL_SECTION    lock;
   37 #  endif
   38 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
   39             os_unfair_lock      lock;
   40 #elif (defined(JEMALLOC_OSSPIN))
   41             OSSpinLock      lock;
   42 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
   43             pthread_mutex_t     lock;
   44             malloc_mutex_t      *postponed_next;
   45 #else
   46             pthread_mutex_t     lock;
   47 #endif
   48         };
   49         /*
   50          * We only touch witness when configured w/ debug.  However we
   51          * keep the field in a union when !debug so that we don't have
   52          * to pollute the code base with #ifdefs, while avoid paying the
   53          * memory cost.
   54          */
   55 #if !defined(JEMALLOC_DEBUG)
   56         witness_t           witness;
   57         malloc_mutex_lock_order_t   lock_order;
   58 #endif
   59     };
   60 
   61 #if defined(JEMALLOC_DEBUG)
   62     witness_t           witness;
   63     malloc_mutex_lock_order_t   lock_order;
   64 #endif
   65 };
   66 
   67 /*
   68  * Based on benchmark results, a fixed spin with this amount of retries works
   69  * well for our critical sections.
   70  */
   71 #define MALLOC_MUTEX_MAX_SPIN 250
   72 
   73 #ifdef _WIN32
   74 #  if _WIN32_WINNT >= 0x0600
   75 #    define MALLOC_MUTEX_LOCK(m)    AcquireSRWLockExclusive(&(m)->lock)
   76 #    define MALLOC_MUTEX_UNLOCK(m)  ReleaseSRWLockExclusive(&(m)->lock)
   77 #    define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
   78 #  else
   79 #    define MALLOC_MUTEX_LOCK(m)    EnterCriticalSection(&(m)->lock)
   80 #    define MALLOC_MUTEX_UNLOCK(m)  LeaveCriticalSection(&(m)->lock)
   81 #    define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
   82 #  endif
   83 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
   84 #    define MALLOC_MUTEX_LOCK(m)    os_unfair_lock_lock(&(m)->lock)
   85 #    define MALLOC_MUTEX_UNLOCK(m)  os_unfair_lock_unlock(&(m)->lock)
   86 #    define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
   87 #elif (defined(JEMALLOC_OSSPIN))
   88 #    define MALLOC_MUTEX_LOCK(m)    OSSpinLockLock(&(m)->lock)
   89 #    define MALLOC_MUTEX_UNLOCK(m)  OSSpinLockUnlock(&(m)->lock)
   90 #    define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
   91 #else
   92 #    define MALLOC_MUTEX_LOCK(m)    pthread_mutex_lock(&(m)->lock)
   93 #    define MALLOC_MUTEX_UNLOCK(m)  pthread_mutex_unlock(&(m)->lock)
   94 #    define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
   95 #endif
   96 
   97 #define LOCK_PROF_DATA_INITIALIZER                  \
   98     {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0,     \
   99         ATOMIC_INIT(0), 0, NULL, 0}
  100 
  101 #ifdef _WIN32
  102 #  define MALLOC_MUTEX_INITIALIZER
  103 #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
  104 #  define MALLOC_MUTEX_INITIALIZER                  \
  105      {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}},      \
  106       WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
  107 #elif (defined(JEMALLOC_OSSPIN))
  108 #  define MALLOC_MUTEX_INITIALIZER                  \
  109      {{{LOCK_PROF_DATA_INITIALIZER, 0}},                \
  110       WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
  111 #elif (defined(JEMALLOC_MUTEX_INIT_CB))
  112 #  define MALLOC_MUTEX_INITIALIZER                  \
  113      {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}},  \
  114       WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
  115 #else
  116 #    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
  117 #    define MALLOC_MUTEX_INITIALIZER                    \
  118        {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}},  \
  119         WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
  120 #endif
  121 
  122 #ifdef JEMALLOC_LAZY_LOCK
  123 extern bool isthreaded;
  124 #else
  125 #  undef isthreaded /* Undo private_namespace.h definition. */
  126 #  define isthreaded true
  127 #endif
  128 
  129 bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
  130     witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
  131 void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
  132 void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
  133 void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
  134 bool malloc_mutex_boot(void);
  135 void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
  136 
  137 void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
  138 
  139 static inline void
  140 malloc_mutex_lock_final(malloc_mutex_t *mutex) {
  141     MALLOC_MUTEX_LOCK(mutex);
  142 }
  143 
  144 static inline bool
  145 malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
  146     return MALLOC_MUTEX_TRYLOCK(mutex);
  147 }
  148 
  149 static inline void
  150 mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
  151     if (config_stats) {
  152         mutex_prof_data_t *data = &mutex->prof_data;
  153         data->n_lock_ops++;
  154         if (data->prev_owner != tsdn) {
  155             data->prev_owner = tsdn;
  156             data->n_owner_switches++;
  157         }
  158     }
  159 }
  160 
  161 /* Trylock: return false if the lock is successfully acquired. */
  162 static inline bool
  163 malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
  164     witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
  165     if (isthreaded) {
  166         if (malloc_mutex_trylock_final(mutex)) {
  167             return true;
  168         }
  169         mutex_owner_stats_update(tsdn, mutex);
  170     }
  171     witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
  172 
  173     return false;
  174 }
  175 
  176 /* Aggregate lock prof data. */
  177 static inline void
  178 malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
  179     nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
  180     if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
  181         nstime_copy(&sum->max_wait_time, &data->max_wait_time);
  182     }
  183 
  184     sum->n_wait_times += data->n_wait_times;
  185     sum->n_spin_acquired += data->n_spin_acquired;
  186 
  187     if (sum->max_n_thds < data->max_n_thds) {
  188         sum->max_n_thds = data->max_n_thds;
  189     }
  190     uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
  191         ATOMIC_RELAXED);
  192     uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
  193         &data->n_waiting_thds, ATOMIC_RELAXED);
  194     atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
  195         ATOMIC_RELAXED);
  196     sum->n_owner_switches += data->n_owner_switches;
  197     sum->n_lock_ops += data->n_lock_ops;
  198 }
  199 
  200 static inline void
  201 malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
  202     witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
  203     if (isthreaded) {
  204         if (malloc_mutex_trylock_final(mutex)) {
  205             malloc_mutex_lock_slow(mutex);
  206         }
  207         mutex_owner_stats_update(tsdn, mutex);
  208     }
  209     witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
  210 }
  211 
  212 static inline void
  213 malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
  214     witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
  215     if (isthreaded) {
  216         MALLOC_MUTEX_UNLOCK(mutex);
  217     }
  218 }
  219 
  220 static inline void
  221 malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
  222     witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
  223 }
  224 
  225 static inline void
  226 malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
  227     witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
  228 }
  229 
  230 /* Copy the prof data from mutex for processing. */
  231 static inline void
  232 malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
  233     malloc_mutex_t *mutex) {
  234     mutex_prof_data_t *source = &mutex->prof_data;
  235     /* Can only read holding the mutex. */
  236     malloc_mutex_assert_owner(tsdn, mutex);
  237 
  238     /*
  239      * Not *really* allowed (we shouldn't be doing non-atomic loads of
  240      * atomic data), but the mutex protection makes this safe, and writing
  241      * a member-for-member copy is tedious for this situation.
  242      */
  243     *data = *source;
  244     /* n_wait_thds is not reported (modified w/o locking). */
  245     atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
  246 }
  247 
  248 #endif /* JEMALLOC_INTERNAL_MUTEX_H */