"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "src/lib/ringbuffer/ring_buffer_frontend.c" between
lttng-modules-2.13.2.tar.bz2 and lttng-modules-2.13.3.tar.bz2

About: LTTng-modules (Linux Trace Toolkit Next Generation) is a system software package for correlated tracing of the Linux kernel, applications and libraries: This package contains the Linux kernel tracing modules.

ring_buffer_frontend.c  (lttng-modules-2.13.2.tar.bz2):ring_buffer_frontend.c  (lttng-modules-2.13.3.tar.bz2)
skipping to change at line 1585 skipping to change at line 1585
/* /*
* lib_ring_buffer_switch_old_start: Populate old subbuffer header. * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
* *
* Only executed when the buffer is finalized, in SWITCH_FLUSH. * Only executed when the buffer is finalized, in SWITCH_FLUSH.
*/ */
static static
void lib_ring_buffer_switch_old_start(struct lttng_kernel_ring_buffer *buf, void lib_ring_buffer_switch_old_start(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *ch an, struct lttng_kernel_ring_buffer_channel *ch an,
struct switch_offsets *offsets, struct switch_offsets *offsets,
u64 tsc) const struct lttng_kernel_ring_buffer_ctx * ctx)
{ {
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig; const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig;
unsigned long oldidx = subbuf_index(offsets->old, chan); unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count; unsigned long commit_count;
struct commit_counters_hot *cc_hot; struct commit_counters_hot *cc_hot;
config->cb.buffer_begin(buf, tsc, oldidx); config->cb.buffer_begin(buf, ctx->priv.tsc, oldidx);
/* /*
* Order all writes to buffer before the commit count update that will * Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full. * determine that the subbuffer is full.
*/ */
if (config->ipi == RING_BUFFER_IPI_BARRIER) { if (config->ipi == RING_BUFFER_IPI_BARRIER) {
/* /*
* Must write slot data before incrementing commit count. This * Must write slot data before incrementing commit count. This
* compiler barrier is upgraded into a smp_mb() by the IPI sent * compiler barrier is upgraded into a smp_mb() by the IPI sent
* by get_subbuf(). * by get_subbuf().
*/ */
barrier(); barrier();
} else } else
smp_wmb(); smp_wmb();
cc_hot = &buf->commit_hot[oldidx]; cc_hot = &buf->commit_hot[oldidx];
v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc); v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc); commit_count = v_read(config, &cc_hot->cc);
/* Check if the written buffer has to be delivered */ /* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old, lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
commit_count, oldidx, tsc); commit_count, oldidx, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan, lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->old + config->cb.subbuffer_header_size(), offsets->old + config->cb.subbuffer_header_size(),
commit_count, cc_hot); commit_count, cc_hot);
} }
/* /*
* lib_ring_buffer_switch_old_end: switch old subbuffer * lib_ring_buffer_switch_old_end: switch old subbuffer
* *
* Note : offset_old should never be 0 here. It is ok, because we never perform * Note : offset_old should never be 0 here. It is ok, because we never perform
* buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
* increments the offset_old value when doing a SWITCH_FLUSH on an empty * increments the offset_old value when doing a SWITCH_FLUSH on an empty
* subbuffer. * subbuffer.
*/ */
static static
void lib_ring_buffer_switch_old_end(struct lttng_kernel_ring_buffer *buf, void lib_ring_buffer_switch_old_end(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan , struct lttng_kernel_ring_buffer_channel *chan ,
struct switch_offsets *offsets, struct switch_offsets *offsets,
u64 tsc) const struct lttng_kernel_ring_buffer_ctx *ct x)
{ {
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig; const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan); unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size; unsigned long commit_count, padding_size, data_size;
struct commit_counters_hot *cc_hot; struct commit_counters_hot *cc_hot;
u64 *ts_end; u64 *ts_end;
data_size = subbuf_offset(offsets->old - 1, chan) + 1; data_size = subbuf_offset(offsets->old - 1, chan) + 1;
padding_size = chan->backend.subbuf_size - data_size; padding_size = chan->backend.subbuf_size - data_size;
subbuffer_set_data_size(config, &buf->backend, oldidx, data_size); subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
ts_end = &buf->ts_end[oldidx]; ts_end = &buf->ts_end[oldidx];
/* /*
* This is the last space reservation in that sub-buffer before * This is the last space reservation in that sub-buffer before
* it gets delivered. This provides exclusive access to write to * it gets delivered. This provides exclusive access to write to
* this sub-buffer's ts_end. There are also no concurrent * this sub-buffer's ts_end. There are also no concurrent
* readers of that ts_end because delivery of that sub-buffer is * readers of that ts_end because delivery of that sub-buffer is
* postponed until the commit counter is incremented for the * postponed until the commit counter is incremented for the
* current space reservation. * current space reservation.
*/ */
*ts_end = tsc; *ts_end = ctx->priv.tsc;
/* /*
* Order all writes to buffer and store to ts_end before the commit * Order all writes to buffer and store to ts_end before the commit
* count update that will determine that the subbuffer is full. * count update that will determine that the subbuffer is full.
*/ */
if (config->ipi == RING_BUFFER_IPI_BARRIER) { if (config->ipi == RING_BUFFER_IPI_BARRIER) {
/* /*
* Must write slot data before incrementing commit count. This * Must write slot data before incrementing commit count. This
* compiler barrier is upgraded into a smp_mb() by the IPI sent * compiler barrier is upgraded into a smp_mb() by the IPI sent
* by get_subbuf(). * by get_subbuf().
*/ */
barrier(); barrier();
} else } else
smp_wmb(); smp_wmb();
cc_hot = &buf->commit_hot[oldidx]; cc_hot = &buf->commit_hot[oldidx];
v_add(config, padding_size, &cc_hot->cc); v_add(config, padding_size, &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc); commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1, lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
commit_count, oldidx, tsc); commit_count, oldidx, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan, lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->old + padding_size, commit_count, offsets->old + padding_size, commit_count,
cc_hot); cc_hot);
} }
/* /*
* lib_ring_buffer_switch_new_start: Populate new subbuffer. * lib_ring_buffer_switch_new_start: Populate new subbuffer.
* *
* This code can be executed unordered : writers may already have written to the * This code can be executed unordered : writers may already have written to the
* sub-buffer before this code gets executed, caution. The commit makes sure * sub-buffer before this code gets executed, caution. The commit makes sure
* that this code is executed before the deliver of this sub-buffer. * that this code is executed before the deliver of this sub-buffer.
*/ */
static static
void lib_ring_buffer_switch_new_start(struct lttng_kernel_ring_buffer *buf, void lib_ring_buffer_switch_new_start(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *ch an, struct lttng_kernel_ring_buffer_channel *ch an,
struct switch_offsets *offsets, struct switch_offsets *offsets,
u64 tsc) const struct lttng_kernel_ring_buffer_ctx * ctx)
{ {
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig; const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig;
unsigned long beginidx = subbuf_index(offsets->begin, chan); unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count; unsigned long commit_count;
struct commit_counters_hot *cc_hot; struct commit_counters_hot *cc_hot;
config->cb.buffer_begin(buf, tsc, beginidx); config->cb.buffer_begin(buf, ctx->priv.tsc, beginidx);
/* /*
* Order all writes to buffer before the commit count update that will * Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full. * determine that the subbuffer is full.
*/ */
if (config->ipi == RING_BUFFER_IPI_BARRIER) { if (config->ipi == RING_BUFFER_IPI_BARRIER) {
/* /*
* Must write slot data before incrementing commit count. This * Must write slot data before incrementing commit count. This
* compiler barrier is upgraded into a smp_mb() by the IPI sent * compiler barrier is upgraded into a smp_mb() by the IPI sent
* by get_subbuf(). * by get_subbuf().
*/ */
barrier(); barrier();
} else } else
smp_wmb(); smp_wmb();
cc_hot = &buf->commit_hot[beginidx]; cc_hot = &buf->commit_hot[beginidx];
v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc); v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc); commit_count = v_read(config, &cc_hot->cc);
/* Check if the written buffer has to be delivered */ /* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin, lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
commit_count, beginidx, tsc); commit_count, beginidx, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan, lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->begin + config->cb.subbuffer_header_size(), offsets->begin + config->cb.subbuffer_header_size(),
commit_count, cc_hot); commit_count, cc_hot);
} }
/* /*
* lib_ring_buffer_switch_new_end: finish switching current subbuffer * lib_ring_buffer_switch_new_end: finish switching current subbuffer
* *
* Calls subbuffer_set_data_size() to set the data size of the current * Calls subbuffer_set_data_size() to set the data size of the current
* sub-buffer. We do not need to perform check_deliver nor commit here, * sub-buffer. We do not need to perform check_deliver nor commit here,
* since this task will be done by the "commit" of the event for which * since this task will be done by the "commit" of the event for which
* we are currently doing the space reservation. * we are currently doing the space reservation.
*/ */
static static
void lib_ring_buffer_switch_new_end(struct lttng_kernel_ring_buffer *buf, void lib_ring_buffer_switch_new_end(struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_chann el *chan, struct lttng_kernel_ring_buffer_chann el *chan,
struct switch_offsets *offsets, struct switch_offsets *offsets,
u64 tsc) const struct lttng_kernel_ring_buffer _ctx *ctx)
{ {
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig; const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig;
unsigned long endidx, data_size; unsigned long endidx, data_size;
u64 *ts_end; u64 *ts_end;
endidx = subbuf_index(offsets->end - 1, chan); endidx = subbuf_index(offsets->end - 1, chan);
data_size = subbuf_offset(offsets->end - 1, chan) + 1; data_size = subbuf_offset(offsets->end - 1, chan) + 1;
subbuffer_set_data_size(config, &buf->backend, endidx, data_size); subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
ts_end = &buf->ts_end[endidx]; ts_end = &buf->ts_end[endidx];
/* /*
* This is the last space reservation in that sub-buffer before * This is the last space reservation in that sub-buffer before
* it gets delivered. This provides exclusive access to write to * it gets delivered. This provides exclusive access to write to
* this sub-buffer's ts_end. There are also no concurrent * this sub-buffer's ts_end. There are also no concurrent
* readers of that ts_end because delivery of that sub-buffer is * readers of that ts_end because delivery of that sub-buffer is
* postponed until the commit counter is incremented for the * postponed until the commit counter is incremented for the
* current space reservation. * current space reservation.
*/ */
*ts_end = tsc; *ts_end = ctx->priv.tsc;
} }
/* /*
* Returns : * Returns :
* 0 if ok * 0 if ok
* !0 if execution must be aborted. * !0 if execution must be aborted.
*/ */
static static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode, int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
struct lttng_kernel_ring_buffer *buf, struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan , struct lttng_kernel_ring_buffer_channel *chan ,
struct switch_offsets *offsets, struct switch_offsets *offsets,
u64 *tsc) struct lttng_kernel_ring_buffer_ctx *ctx)
{ {
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig; const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig;
unsigned long off, reserve_commit_diff; unsigned long off, reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset); offsets->begin = v_read(config, &buf->offset);
offsets->old = offsets->begin; offsets->old = offsets->begin;
offsets->switch_old_start = 0; offsets->switch_old_start = 0;
off = subbuf_offset(offsets->begin, chan); off = subbuf_offset(offsets->begin, chan);
*tsc = config->cb.ring_buffer_clock_read(chan); ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan);
/* /*
* Ensure we flush the header of an empty subbuffer when doing the * Ensure we flush the header of an empty subbuffer when doing the
* finalize (SWITCH_FLUSH). This ensures that we end up knowing the * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
* total data gathering duration even if there were no records saved * total data gathering duration even if there were no records saved
* after the last buffer switch. * after the last buffer switch.
* In SWITCH_ACTIVE mode, switch the buffer when it contains events. * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
* SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
* subbuffer header as appropriate. * subbuffer header as appropriate.
* The next record that reserves space will be responsible for * The next record that reserves space will be responsible for
skipping to change at line 1855 skipping to change at line 1855
} }
/* /*
* Need to write the subbuffer start header on finalize. * Need to write the subbuffer start header on finalize.
*/ */
offsets->switch_old_start = 1; offsets->switch_old_start = 1;
} }
offsets->begin = subbuf_align(offsets->begin, chan); offsets->begin = subbuf_align(offsets->begin, chan);
/* Note: old points to the next subbuf at offset 0 */ /* Note: old points to the next subbuf at offset 0 */
offsets->end = offsets->begin; offsets->end = offsets->begin;
/*
* Populate the records lost counters prior to performing a
* sub-buffer switch.
*/
ctx->priv.records_lost_full = v_read(config, &buf->records_lost_full);
ctx->priv.records_lost_wrap = v_read(config, &buf->records_lost_wrap);
ctx->priv.records_lost_big = v_read(config, &buf->records_lost_big);
return 0; return 0;
} }
/* /*
* Force a sub-buffer switch. This operation is completely reentrant : can be * Force a sub-buffer switch. This operation is completely reentrant : can be
* called while tracing is active with absolutely no lock held. * called while tracing is active with absolutely no lock held.
* *
* Note, however, that as a v_cmpxchg is used for some atomic * Note, however, that as a v_cmpxchg is used for some atomic
* operations, this function must be called from the CPU which owns the buffer * operations, this function must be called from the CPU which owns the buffer
* for a ACTIVE flush. * for a ACTIVE flush.
*/ */
void lib_ring_buffer_switch_slow(struct lttng_kernel_ring_buffer *buf, enum swit ch_mode mode) void lib_ring_buffer_switch_slow(struct lttng_kernel_ring_buffer *buf, enum swit ch_mode mode)
{ {
struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan; struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig; const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig;
struct lttng_kernel_ring_buffer_ctx ctx;
struct switch_offsets offsets; struct switch_offsets offsets;
unsigned long oldidx; unsigned long oldidx;
u64 tsc;
offsets.size = 0; offsets.size = 0;
/* /*
* Perform retryable operations. * Perform retryable operations.
*/ */
do { do {
if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets, if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
&tsc)) &ctx))
return; /* Switch not needed */ return; /* Switch not needed */
} while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end) } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
!= offsets.old); != offsets.old);
/* /*
* Atomically update last_tsc. This update races against concurrent * Atomically update last_tsc. This update races against concurrent
* atomic updates, but the race will always cause supplementary full TSC * atomic updates, but the race will always cause supplementary full TSC
* records, never the opposite (missing a full TSC record when it would * records, never the opposite (missing a full TSC record when it would
* be needed). * be needed).
*/ */
save_last_tsc(config, buf, tsc); save_last_tsc(config, buf, ctx.priv.tsc);
/* /*
* Push the reader if necessary * Push the reader if necessary
*/ */
lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old); lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
oldidx = subbuf_index(offsets.old, chan); oldidx = subbuf_index(offsets.old, chan);
lib_ring_buffer_clear_noref(config, &buf->backend, oldidx); lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
/* /*
* May need to populate header start on SWITCH_FLUSH. * May need to populate header start on SWITCH_FLUSH.
*/ */
if (offsets.switch_old_start) { if (offsets.switch_old_start) {
lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc); lib_ring_buffer_switch_old_start(buf, chan, &offsets, &ctx);
offsets.old += config->cb.subbuffer_header_size(); offsets.old += config->cb.subbuffer_header_size();
} }
/* /*
* Switch old subbuffer. * Switch old subbuffer.
*/ */
lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc); lib_ring_buffer_switch_old_end(buf, chan, &offsets, &ctx);
} }
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow); EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
struct switch_param { struct switch_param {
struct lttng_kernel_ring_buffer *buf; struct lttng_kernel_ring_buffer *buf;
enum switch_mode mode; enum switch_mode mode;
}; };
static void remote_switch(void *info) static void remote_switch(void *info)
{ {
skipping to change at line 2146 skipping to change at line 2153
} }
offsets->end = offsets->begin + offsets->size; offsets->end = offsets->begin + offsets->size;
if (unlikely(subbuf_offset(offsets->end, chan) == 0)) { if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
/* /*
* The offset_end will fall at the very beginning of the next * The offset_end will fall at the very beginning of the next
* subbuffer. * subbuffer.
*/ */
offsets->switch_new_end = 1; /* For offsets->begin */ offsets->switch_new_end = 1; /* For offsets->begin */
} }
/*
* Populate the records lost counters when the space reservation
* may cause a sub-buffer switch.
*/
if (offsets->switch_new_end || offsets->switch_old_end) {
ctx->priv.records_lost_full = v_read(config, &buf->records_lost_f
ull);
ctx->priv.records_lost_wrap = v_read(config, &buf->records_lost_w
rap);
ctx->priv.records_lost_big = v_read(config, &buf->records_lost_bi
g);
}
return 0; return 0;
} }
static struct lttng_kernel_ring_buffer *get_current_buf(struct lttng_kernel_ring _buffer_channel *chan, int cpu) static struct lttng_kernel_ring_buffer *get_current_buf(struct lttng_kernel_ring _buffer_channel *chan, int cpu)
{ {
const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig; const struct lttng_kernel_ring_buffer_config *config = &chan->backend.con fig;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
return per_cpu_ptr(chan->backend.buf, cpu); return per_cpu_ptr(chan->backend.buf, cpu);
else else
skipping to change at line 2222 skipping to change at line 2238
*/ */
lib_ring_buffer_clear_noref(config, &buf->backend, lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.end - 1, chan)); subbuf_index(offsets.end - 1, chan));
/* /*
* Switch old subbuffer if needed. * Switch old subbuffer if needed.
*/ */
if (unlikely(offsets.switch_old_end)) { if (unlikely(offsets.switch_old_end)) {
lib_ring_buffer_clear_noref(config, &buf->backend, lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.old - 1, chan)); subbuf_index(offsets.old - 1, chan));
lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->priv.tsc ); lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx);
} }
/* /*
* Populate new subbuffer. * Populate new subbuffer.
*/ */
if (unlikely(offsets.switch_new_start)) if (unlikely(offsets.switch_new_start))
lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->priv.t sc); lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx);
if (unlikely(offsets.switch_new_end)) if (unlikely(offsets.switch_new_end))
lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->priv.tsc ); lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx);
ctx->priv.slot_size = offsets.size; ctx->priv.slot_size = offsets.size;
ctx->priv.pre_offset = offsets.begin; ctx->priv.pre_offset = offsets.begin;
ctx->priv.buf_offset = offsets.begin + offsets.pre_header_padding; ctx->priv.buf_offset = offsets.begin + offsets.pre_header_padding;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow); EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
static static
void lib_ring_buffer_vmcore_check_deliver(const struct lttng_kernel_ring_buffer_ config *config, void lib_ring_buffer_vmcore_check_deliver(const struct lttng_kernel_ring_buffer_ config *config,
skipping to change at line 2283 skipping to change at line 2299
{ {
} }
#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
void lib_ring_buffer_check_deliver_slow(const struct lttng_kernel_ring_buffer_co nfig *config, void lib_ring_buffer_check_deliver_slow(const struct lttng_kernel_ring_buffer_co nfig *config,
struct lttng_kernel_ring_buffer *buf, struct lttng_kernel_ring_buffer *buf,
struct lttng_kernel_ring_buffer_channel *chan, struct lttng_kernel_ring_buffer_channel *chan,
unsigned long offset, unsigned long offset,
unsigned long commit_count, unsigned long commit_count,
unsigned long idx, unsigned long idx,
u64 tsc) const struct lttng_kernel_ring_buffer_ctx *ctx )
{ {
unsigned long old_commit_count = commit_count unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size; - chan->backend.subbuf_size;
/* /*
* If we succeeded at updating cc_sb below, we are the subbuffer * If we succeeded at updating cc_sb below, we are the subbuffer
* writer delivering the subbuffer. Deals with concurrent * writer delivering the subbuffer. Deals with concurrent
* updates of the "cc" value without adding a add_return atomic * updates of the "cc" value without adding a add_return atomic
* operation to the fast path. * operation to the fast path.
* *
skipping to change at line 2343 skipping to change at line 2359
* reservation for the current sub-buffer. * reservation for the current sub-buffer.
* *
* Order increment of commit counter before reading ts_end. * Order increment of commit counter before reading ts_end.
*/ */
smp_mb(); smp_mb();
ts_end = &buf->ts_end[idx]; ts_end = &buf->ts_end[idx];
deliver_count_events(config, buf, idx); deliver_count_events(config, buf, idx);
config->cb.buffer_end(buf, *ts_end, idx, config->cb.buffer_end(buf, *ts_end, idx,
lib_ring_buffer_get_data_size(config, lib_ring_buffer_get_data_size(config,
buf, buf,
idx)); idx), ctx);
/* /*
* Increment the packet counter while we have exclusive * Increment the packet counter while we have exclusive
* access. * access.
*/ */
subbuffer_inc_packet_count(config, &buf->backend, idx); subbuffer_inc_packet_count(config, &buf->backend, idx);
/* /*
* Set noref flag and offset for this subbuffer id. * Set noref flag and offset for this subbuffer id.
* Contains a memory barrier that ensures counter stores * Contains a memory barrier that ensures counter stores
 End of changes. 26 change blocks. 
23 lines changed or deleted 42 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)