"Fossies" - the Fresh Open Source Software Archive

Member "lttng-modules-2.11.0-rc5/lttng-context-callstack-legacy-impl.h" (11 Jun 2019, 6110 Bytes) of package /linux/misc/lttng-modules-2.11.0-rc5.tar.bz2:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "lttng-context-callstack-legacy-impl.h" see the Fossies "Dox" file reference documentation.

    1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
    2  *
    3  * lttng-context-callstack-legacy-impl.h
    4  *
    5  * LTTng callstack event context, legacy implementation. Targets
    6  * kernels and architectures not yet using the stacktrace common
    7  * infrastructure introduced in the upstream Linux kernel by commit
    8  * 214d8ca6ee "stacktrace: Provide common infrastructure" (merged in
    9  * Linux 5.2, then gradually introduced within architectures).
   10  *
   11  * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
   12  * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
   13  */
   14 
   15 #define MAX_ENTRIES 128
   16 
   17 enum lttng_cs_ctx_modes {
   18     CALLSTACK_KERNEL = 0,
   19     CALLSTACK_USER = 1,
   20     NR_CALLSTACK_MODES,
   21 };
   22 
   23 struct lttng_cs_dispatch {
   24     struct stack_trace stack_trace;
   25     unsigned long entries[MAX_ENTRIES];
   26 };
   27 
   28 struct lttng_cs {
   29     struct lttng_cs_dispatch dispatch[RING_BUFFER_MAX_NESTING];
   30 };
   31 
   32 struct field_data {
   33     struct lttng_cs __percpu *cs_percpu;
   34     enum lttng_cs_ctx_modes mode;
   35 };
   36 
   37 struct lttng_cs_type {
   38     const char *name;
   39     const char *save_func_name;
   40     void (*save_func)(struct stack_trace *trace);
   41 };
   42 
   43 static struct lttng_cs_type cs_types[] = {
   44     {
   45         .name       = "callstack_kernel",
   46         .save_func_name = "save_stack_trace",
   47         .save_func  = NULL,
   48     },
   49     {
   50         .name       = "callstack_user",
   51         .save_func_name = "save_stack_trace_user",
   52         .save_func  = NULL,
   53     },
   54 };
   55 
   56 static
   57 const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
   58 {
   59     return cs_types[mode].name;
   60 }
   61 
   62 static
   63 int init_type(enum lttng_cs_ctx_modes mode)
   64 {
   65     unsigned long func;
   66 
   67     if (cs_types[mode].save_func)
   68         return 0;
   69     func = kallsyms_lookup_funcptr(cs_types[mode].save_func_name);
   70     if (!func) {
   71         printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
   72                 cs_types[mode].save_func_name);
   73         return -EINVAL;
   74     }
   75     cs_types[mode].save_func = (void *) func;
   76     return 0;
   77 }
   78 
   79 static
   80 void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
   81 {
   82     int cpu, i;
   83 
   84     for_each_possible_cpu(cpu) {
   85         struct lttng_cs *cs;
   86 
   87         cs = per_cpu_ptr(cs_set, cpu);
   88         for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
   89             struct lttng_cs_dispatch *dispatch;
   90 
   91             dispatch = &cs->dispatch[i];
   92             dispatch->stack_trace.entries = dispatch->entries;
   93             dispatch->stack_trace.max_entries = MAX_ENTRIES;
   94         }
   95     }
   96 }
   97 
   98 /* Keep track of nesting inside userspace callstack context code */
   99 DEFINE_PER_CPU(int, callstack_user_nesting);
  100 
  101 static
  102 struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
  103                     struct lib_ring_buffer_ctx *ctx)
  104 {
  105     int buffer_nesting, cs_user_nesting;
  106     struct lttng_cs *cs;
  107     struct field_data *fdata = field->priv;
  108 
  109     /*
  110      * Do not gather the userspace callstack context when the event was
  111      * triggered by the userspace callstack context saving mechanism.
  112      */
  113     cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
  114 
  115     if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
  116         return NULL;
  117 
  118     /*
  119      * get_cpu() is not required, preemption is already
  120      * disabled while event is written.
  121      *
  122      * max nesting is checked in lib_ring_buffer_get_cpu().
  123      * Check it again as a safety net.
  124      */
  125     cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
  126     buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
  127     if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
  128         return NULL;
  129 
  130     return &cs->dispatch[buffer_nesting].stack_trace;
  131 }
  132 
  133 /*
  134  * In order to reserve the correct size, the callstack is computed. The
  135  * resulting callstack is saved to be accessed in the record step.
  136  */
  137 static
  138 size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
  139                 struct lib_ring_buffer_ctx *ctx,
  140                 struct lttng_channel *chan)
  141 {
  142     struct stack_trace *trace;
  143     struct field_data *fdata = field->priv;
  144     size_t orig_offset = offset;
  145 
  146     /* do not write data if no space is available */
  147     trace = stack_trace_context(field, ctx);
  148     if (unlikely(!trace)) {
  149         offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
  150         offset += sizeof(unsigned int);
  151         offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
  152         return offset - orig_offset;
  153     }
  154 
  155     /* reset stack trace, no need to clear memory */
  156     trace->nr_entries = 0;
  157 
  158     if (fdata->mode == CALLSTACK_USER)
  159         ++per_cpu(callstack_user_nesting, ctx->cpu);
  160 
  161     /* do the real work and reserve space */
  162     cs_types[fdata->mode].save_func(trace);
  163 
  164     if (fdata->mode == CALLSTACK_USER)
  165         per_cpu(callstack_user_nesting, ctx->cpu)--;
  166 
  167     /*
  168      * Remove final ULONG_MAX delimiter. If we cannot find it, add
  169      * our own marker to show that the stack is incomplete. This is
  170      * more compact for a trace.
  171      */
  172     if (trace->nr_entries > 0
  173             && trace->entries[trace->nr_entries - 1] == ULONG_MAX) {
  174         trace->nr_entries--;
  175     }
  176     offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
  177     offset += sizeof(unsigned int);
  178     offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
  179     offset += sizeof(unsigned long) * trace->nr_entries;
  180     /* Add our own ULONG_MAX delimiter to show incomplete stack. */
  181     if (trace->nr_entries == trace->max_entries)
  182         offset += sizeof(unsigned long);
  183     return offset - orig_offset;
  184 }
  185 
  186 static
  187 void lttng_callstack_record(struct lttng_ctx_field *field,
  188             struct lib_ring_buffer_ctx *ctx,
  189             struct lttng_channel *chan)
  190 {
  191     struct stack_trace *trace = stack_trace_context(field, ctx);
  192     unsigned int nr_seq_entries;
  193 
  194     if (unlikely(!trace)) {
  195         nr_seq_entries = 0;
  196         lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
  197         chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
  198         lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
  199         return;
  200     }
  201     lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
  202     nr_seq_entries = trace->nr_entries;
  203     if (trace->nr_entries == trace->max_entries)
  204         nr_seq_entries++;
  205     chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
  206     lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
  207     chan->ops->event_write(ctx, trace->entries,
  208             sizeof(unsigned long) * trace->nr_entries);
  209     /* Add our own ULONG_MAX delimiter to show incomplete stack. */
  210     if (trace->nr_entries == trace->max_entries) {
  211         unsigned long delim = ULONG_MAX;
  212 
  213         chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
  214     }
  215 }