glusterfs  8.2
About: GlusterFS is a network/cluster filesystem. The storage server (or each in a cluster) runs glusterfsd and the clients use mount command or glusterfs client to mount the exported filesystem. Release series 8.x (latest version).
  Fossies Dox: glusterfs-8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

stack.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
3  This file is part of GlusterFS.
4 
5  This file is licensed to you under your choice of the GNU Lesser
6  General Public License, version 3 or any later version (LGPLv3 or
7  later), or the GNU General Public License, version 2 (GPLv2), in all
8  cases as published by the Free Software Foundation.
9 */
10 
11 /*
12  This file defines MACROS and static inlines used to emulate a function
13  call over asynchronous communication with remote server
14 */
15 
16 #ifndef _STACK_H
17 #define _STACK_H
18 
19 struct _call_stack;
20 typedef struct _call_stack call_stack_t;
21 struct _call_frame;
22 typedef struct _call_frame call_frame_t;
23 struct call_pool;
24 typedef struct call_pool call_pool_t;
25 
26 #include <sys/time.h>
27 
28 #include "glusterfs/xlator.h"
29 #include "glusterfs/dict.h"
30 #include "glusterfs/list.h"
31 #include "glusterfs/common-utils.h"
32 #include "glusterfs/lkowner.h"
33 #include "glusterfs/client_t.h"
35 #include "glusterfs/timespec.h"
36 
37 #define NFS_PID 1
38 #define LOW_PRIO_PROC_PID -1
39 
40 #define STACK_ERR_XL_NAME(stack) (stack->err_xl ? stack->err_xl->name : "-")
41 #define STACK_CLIENT_NAME(stack) \
42  (stack->client ? stack->client->client_uid : "-")
43 
44 typedef int32_t (*ret_fn_t)(call_frame_t *frame, call_frame_t *prev_frame,
45  xlator_t *this, int32_t op_ret, int32_t op_errno,
46  ...);
47 
48 struct call_pool {
49  union {
50  struct list_head all_frames;
51  struct {
55  };
56  int64_t cnt;
61 };
62 
63 struct _call_frame {
64  call_stack_t *root; /* stack root */
65  call_frame_t *parent; /* previous BP */
66  struct list_head frames;
67  void *local; /* local variables */
68  xlator_t *this; /* implicit object */
69  ret_fn_t ret; /* op_return address */
70  int32_t ref_count;
72  void *cookie; /* unique cookie */
74 
76  struct timespec begin; /* when this frame was created */
77  struct timespec end; /* when this frame completed */
78  const char *wind_from;
79  const char *wind_to;
80  const char *unwind_from;
81  const char *unwind_to;
82 };
83 
84 struct _ns_info {
85  uint32_t hash; /* Hash of the namespace from SuperFastHash */
86  gf_boolean_t found; /* Set to true if we found a namespace */
87 };
88 
89 typedef struct _ns_info ns_info_t;
90 
91 #define SMALL_GROUP_COUNT 128
92 
93 struct _call_stack {
94  union {
95  struct list_head all_frames;
96  struct {
99  };
100  };
104  uint64_t unique;
105  void *state; /* pointer to request state */
106  uid_t uid;
107  gid_t gid;
108  pid_t pid;
110  uint16_t ngrps;
112  uint32_t *groups_large;
113  uint32_t *groups;
116 
117  struct list_head myframes; /* List of call_frame_t that go
118  to make the call stack */
119 
120  int32_t op;
121  int8_t type;
122  struct timespec tv;
124  int32_t error;
125 
126  uint32_t flags; /* use it wisely, think of it as a mechanism to
127  send information over the wire too */
128  struct timespec ctime; /* timestamp, most probably set at
129  creation of stack. */
130 
132 };
133 
134 /* call_stack flags field users */
135 #define MDATA_CTIME (1 << 0)
136 #define MDATA_MTIME (1 << 1)
137 #define MDATA_ATIME (1 << 2)
138 #define MDATA_PAR_CTIME (1 << 3)
139 #define MDATA_PAR_MTIME (1 << 4)
140 #define MDATA_PAR_ATIME (1 << 5)
141 
142 #define frame_set_uid_gid(frm, u, g) \
143  do { \
144  if (frm) { \
145  (frm)->root->uid = u; \
146  (frm)->root->gid = g; \
147  (frm)->root->ngrps = 0; \
148  } \
149  } while (0);
150 
151 struct xlator_fops;
152 void
154 
155 static inline void
157 {
158  void *local = NULL;
159 
160  if (frame->root->ctx->measure_latency)
161  gf_update_latency(frame);
162 
163  list_del_init(&frame->frames);
164  if (frame->local) {
165  local = frame->local;
166  frame->local = NULL;
167  }
168 
169  LOCK_DESTROY(&frame->lock);
170  mem_put(frame);
171 
172  if (local)
173  mem_put(local);
174 }
175 
176 static inline void
178 {
179  call_frame_t *frame = NULL;
180  call_frame_t *tmp = NULL;
181 
182  LOCK(&stack->pool->lock);
183  {
184  list_del_init(&stack->all_frames);
185  stack->pool->cnt--;
186  }
187  UNLOCK(&stack->pool->lock);
188 
189  LOCK_DESTROY(&stack->stack_lock);
190 
191  list_for_each_entry_safe(frame, tmp, &stack->myframes, frames)
192  {
193  FRAME_DESTROY(frame);
194  }
195 
196  GF_FREE(stack->groups_large);
197 
198  mem_put(stack);
199 }
200 
201 static inline void
203 {
204  call_frame_t *frame = NULL;
205  call_frame_t *tmp = NULL;
206  call_frame_t *last = NULL;
207  struct list_head toreset = {0};
208 
209  INIT_LIST_HEAD(&toreset);
210 
211  /* We acquire call_pool->lock only to remove the frames from this stack
212  * to preserve atomicity. This synchronizes across concurrent requests
213  * like statedump, STACK_DESTROY etc. */
214 
215  LOCK(&stack->pool->lock);
216  {
217  last = list_last_entry(&stack->myframes, call_frame_t, frames);
218  list_del_init(&last->frames);
219  list_splice_init(&stack->myframes, &toreset);
220  list_add(&last->frames, &stack->myframes);
221  }
222  UNLOCK(&stack->pool->lock);
223 
224  list_for_each_entry_safe(frame, tmp, &toreset, frames)
225  {
226  FRAME_DESTROY(frame);
227  }
228 }
229 
230 #define FRAME_SU_DO(frm, local_type) \
231  do { \
232  local_type *__local = (frm)->local; \
233  __local->uid = frm->root->uid; \
234  __local->gid = frm->root->gid; \
235  __local->pid = frm->root->pid; \
236  frm->root->uid = 0; \
237  frm->root->gid = 0; \
238  frm->root->pid = GF_CLIENT_PID_NO_ROOT_SQUASH; \
239  } while (0);
240 
241 #define FRAME_SU_UNDO(frm, local_type) \
242  do { \
243  local_type *__local = (frm)->local; \
244  frm->root->uid = __local->uid; \
245  frm->root->gid = __local->gid; \
246  frm->root->pid = __local->pid; \
247  } while (0);
248 
249 /* NOTE: make sure to keep this as an macro, mainly because, we need 'fn'
250  field here to be the proper fn ptr, so its address is valid entry in
251  'xlator_fops' struct.
252  To understand this, check the `xlator.h:struct xlator_fops`, and then
253  see a STACK_WIND call, which generally calls `subvol->fops->fop`, so
254  the address offset should give the index */
255 
256 /* +1 is required as 0 means NULL fop, and we don't have a variable for it */
257 #define get_fop_index_from_fn(xl, fn) \
258  (1 + (((long)&(fn) - (long)&((xl)->fops->stat)) / sizeof(void *)))
259 
260 /* NOTE: the above reason holds good here too. But notice that we are getting
261  the base address of the 'stat' fop, which is the first entry in the fop
262  structure. All we need to do is move as much as 'idx' fields, and get the
263  actual pointer from that field. */
264 
265 static inline void *
266 get_the_pt_fop(void *base_fop, int fop_idx)
267 {
268  void *target_addr = (base_fop + ((fop_idx - 1) * sizeof(void *)));
269  /* all below type casting is for not getting warning. */
270  return (void *)*(unsigned long *)target_addr;
271 }
272 
273 /* make a call without switching frames */
274 #define STACK_WIND_TAIL(frame, obj, fn, params...) \
275  do { \
276  xlator_t *old_THIS = NULL; \
277  xlator_t *next_xl = obj; \
278  typeof(fn) next_xl_fn = fn; \
279  int opn = get_fop_index_from_fn((next_xl), (fn)); \
280  \
281  frame->this = next_xl; \
282  frame->wind_to = #fn; \
283  old_THIS = THIS; \
284  THIS = next_xl; \
285  gf_msg_trace("stack-trace", 0, \
286  "stack-address: %p, " \
287  "winding from %s to %s", \
288  frame->root, old_THIS->name, THIS->name); \
289  /* Need to capture counts at leaf node */ \
290  if (!next_xl->pass_through && !next_xl->children) { \
291  GF_ATOMIC_INC(next_xl->stats.total.metrics[opn].fop); \
292  GF_ATOMIC_INC(next_xl->stats.interval.metrics[opn].fop); \
293  GF_ATOMIC_INC(next_xl->stats.total.count); \
294  GF_ATOMIC_INC(next_xl->stats.interval.count); \
295  } \
296  \
297  if (next_xl->pass_through) { \
298  next_xl_fn = get_the_pt_fop(&next_xl->pass_through_fops->stat, \
299  opn); \
300  } \
301  next_xl_fn(frame, next_xl, params); \
302  THIS = old_THIS; \
303  } while (0)
304 
305 /* make a call */
306 #define STACK_WIND(frame, rfn, obj, fn, params...) \
307  STACK_WIND_COMMON(frame, rfn, 0, NULL, obj, fn, params)
308 
309 /* make a call with a cookie */
310 #define STACK_WIND_COOKIE(frame, rfn, cky, obj, fn, params...) \
311  STACK_WIND_COMMON(frame, rfn, 1, cky, obj, fn, params)
312 
313 /* Cookie passed as the argument can be NULL (ptr) or 0 (int). Hence we
314  have to have a mechanism to separate out the two STACK_WIND formats.
315  Needed a common macro, as other than for cookie, all the other code
316  is common across.
317  */
318 #define STACK_WIND_COMMON(frame, rfn, has_cookie, cky, obj, fn, params...) \
319  do { \
320  call_frame_t *_new = NULL; \
321  xlator_t *old_THIS = NULL; \
322  typeof(fn) next_xl_fn = fn; \
323  \
324  _new = mem_get0(frame->root->pool->frame_mem_pool); \
325  if (!_new) { \
326  break; \
327  } \
328  typeof(fn##_cbk) tmp_cbk = rfn; \
329  _new->root = frame->root; \
330  _new->this = obj; \
331  _new->ret = (ret_fn_t)tmp_cbk; \
332  _new->parent = frame; \
333  /* (void *) is required for avoiding gcc warning */ \
334  _new->cookie = ((has_cookie == 1) ? (void *)(cky) : (void *)_new); \
335  _new->wind_from = __FUNCTION__; \
336  _new->wind_to = #fn; \
337  _new->unwind_to = #rfn; \
338  LOCK_INIT(&_new->lock); \
339  LOCK(&frame->root->stack_lock); \
340  { \
341  list_add(&_new->frames, &frame->root->myframes); \
342  frame->ref_count++; \
343  } \
344  UNLOCK(&frame->root->stack_lock); \
345  fn##_cbk = rfn; \
346  old_THIS = THIS; \
347  THIS = obj; \
348  gf_msg_trace("stack-trace", 0, \
349  "stack-address: %p, " \
350  "winding from %s to %s", \
351  frame->root, old_THIS->name, THIS->name); \
352  if (obj->ctx->measure_latency) \
353  timespec_now(&_new->begin); \
354  _new->op = get_fop_index_from_fn((_new->this), (fn)); \
355  if (!obj->pass_through) { \
356  GF_ATOMIC_INC(obj->stats.total.metrics[_new->op].fop); \
357  GF_ATOMIC_INC(obj->stats.interval.metrics[_new->op].fop); \
358  GF_ATOMIC_INC(obj->stats.total.count); \
359  GF_ATOMIC_INC(obj->stats.interval.count); \
360  } else { \
361  /* we want to get to the actual fop to call */ \
362  next_xl_fn = get_the_pt_fop(&obj->pass_through_fops->stat, \
363  _new->op); \
364  } \
365  next_xl_fn(_new, obj, params); \
366  THIS = old_THIS; \
367  } while (0)
368 
369 #define STACK_UNWIND STACK_UNWIND_STRICT
370 
371 /* return from function in type-safe way */
372 #define STACK_UNWIND_STRICT(fop, frame, op_ret, op_errno, params...) \
373  do { \
374  fop_##fop##_cbk_t fn = NULL; \
375  call_frame_t *_parent = NULL; \
376  xlator_t *old_THIS = NULL; \
377  \
378  if (!frame) { \
379  gf_msg("stack", GF_LOG_CRITICAL, 0, LG_MSG_FRAME_ERROR, "!frame"); \
380  break; \
381  } \
382  if ((op_ret) < 0) { \
383  gf_msg_debug("stack-trace", op_errno, \
384  "stack-address: %p, " \
385  "%s returned %d error: %s", \
386  frame->root, THIS->name, (int32_t)(op_ret), \
387  strerror(op_errno)); \
388  } else { \
389  gf_msg_trace("stack-trace", 0, \
390  "stack-address: %p, " \
391  "%s returned %d", \
392  frame->root, THIS->name, (int32_t)(op_ret)); \
393  } \
394  fn = (fop_##fop##_cbk_t)frame->ret; \
395  _parent = frame->parent; \
396  LOCK(&frame->root->stack_lock); \
397  { \
398  _parent->ref_count--; \
399  if ((op_ret) < 0 && (op_errno) != frame->root->error) { \
400  frame->root->err_xl = frame->this; \
401  frame->root->error = (op_errno); \
402  } else if ((op_ret) == 0) { \
403  frame->root->err_xl = NULL; \
404  frame->root->error = 0; \
405  } \
406  } \
407  UNLOCK(&frame->root->stack_lock); \
408  old_THIS = THIS; \
409  THIS = _parent->this; \
410  frame->complete = _gf_true; \
411  frame->unwind_from = __FUNCTION__; \
412  if (frame->this->ctx->measure_latency) { \
413  timespec_now(&frame->end); \
414  /* required for top most xlator */ \
415  if (_parent->ret == NULL) \
416  timespec_now(&_parent->end); \
417  } \
418  if (op_ret < 0) { \
419  GF_ATOMIC_INC(THIS->stats.total.metrics[frame->op].cbk); \
420  GF_ATOMIC_INC(THIS->stats.interval.metrics[frame->op].cbk); \
421  } \
422  fn(_parent, frame->cookie, _parent->this, op_ret, op_errno, params); \
423  THIS = old_THIS; \
424  } while (0)
425 
426 static inline int
428 {
429  if (ngrps <= SMALL_GROUP_COUNT) {
430  stack->groups = stack->groups_small;
431  } else {
432  stack->groups_large = GF_CALLOC(ngrps, sizeof(gid_t),
434  if (!stack->groups_large)
435  return -1;
436  stack->groups = stack->groups_large;
437  }
438 
439  stack->ngrps = ngrps;
440 
441  return 0;
442 }
443 
444 static inline int
446 {
447  call_frame_t *pos;
448  int32_t count = 0;
449 
450  if (!call_stack)
451  return count;
452 
453  list_for_each_entry(pos, &call_stack->myframes, frames) count++;
454 
455  return count;
456 }
457 
458 static inline call_frame_t *
460 {
461  call_stack_t *newstack = NULL;
462  call_stack_t *oldstack = NULL;
463  call_frame_t *newframe = NULL;
464 
465  if (!frame) {
466  return NULL;
467  }
468 
469  newstack = mem_get0(frame->root->pool->stack_mem_pool);
470  if (newstack == NULL) {
471  return NULL;
472  }
473 
474  INIT_LIST_HEAD(&newstack->myframes);
475 
476  newframe = mem_get0(frame->root->pool->frame_mem_pool);
477  if (!newframe) {
478  mem_put(newstack);
479  return NULL;
480  }
481 
482  newframe->this = frame->this;
483  newframe->root = newstack;
484  INIT_LIST_HEAD(&newframe->frames);
485  list_add(&newframe->frames, &newstack->myframes);
486 
487  oldstack = frame->root;
488 
489  newstack->uid = oldstack->uid;
490  newstack->gid = oldstack->gid;
491  newstack->pid = oldstack->pid;
492  newstack->op = oldstack->op;
493  newstack->type = oldstack->type;
494  newstack->ctime = oldstack->ctime;
495  newstack->flags = oldstack->flags;
496  if (call_stack_alloc_groups(newstack, oldstack->ngrps) != 0) {
497  mem_put(newstack);
498  return NULL;
499  }
500  if (!oldstack->groups) {
501  gf_msg_debug("stack", EINVAL, "groups is null (ngrps: %d)",
502  oldstack->ngrps);
503  /* Considering 'groups' is NULL, set ngrps to 0 */
504  oldstack->ngrps = 0;
505 
506  if (oldstack->groups_large)
507  oldstack->groups = oldstack->groups_large;
508  else
509  oldstack->groups = oldstack->groups_small;
510  }
511  newstack->ngrps = oldstack->ngrps;
512  memcpy(newstack->groups, oldstack->groups, sizeof(gid_t) * oldstack->ngrps);
513  newstack->unique = oldstack->unique;
514  newstack->pool = oldstack->pool;
515  newstack->lk_owner = oldstack->lk_owner;
516  newstack->ctx = oldstack->ctx;
517 
518  if (newstack->ctx->measure_latency) {
519  timespec_now(&newstack->tv);
520  memcpy(&newframe->begin, &newstack->tv, sizeof(newstack->tv));
521  }
522 
523  LOCK_INIT(&newframe->lock);
524  LOCK_INIT(&newstack->stack_lock);
525 
526  LOCK(&oldstack->pool->lock);
527  {
528  list_add(&newstack->all_frames, &oldstack->all_frames);
529  newstack->pool->cnt++;
530  }
531  UNLOCK(&oldstack->pool->lock);
532  GF_ATOMIC_INC(newstack->pool->total_count);
533 
534  return newframe;
535 }
536 
537 void
538 call_stack_set_groups(call_stack_t *stack, int ngrps, gid_t **groupbuf_p);
539 void
541 void
543 call_frame_t *
544 create_frame(xlator_t *xl, call_pool_t *pool);
547 #endif /* _STACK_H */
xlator.h
call_pool::all_stacks
struct call_pool::@31::@33 all_stacks
gf_lkowner_t
Definition: glusterfs-fops.h:190
_call_stack::error
int32_t error
Definition: stack.h:124
call_stack_alloc_groups
static int call_stack_alloc_groups(call_stack_t *stack, int ngrps)
Definition: stack.h:427
_call_frame::ref_count
int32_t ref_count
Definition: stack.h:70
_call_stack::ngrps
uint16_t ngrps
Definition: stack.h:110
call_pool::next_call
call_stack_t * next_call
Definition: stack.h:52
_call_stack::pool
call_pool_t * pool
Definition: stack.h:101
_glusterfs_ctx
Definition: glusterfs.h:618
mem_get0
void * mem_get0(struct mem_pool *pool)
Definition: mem-pool.c:703
gf_update_latency
void gf_update_latency(call_frame_t *frame)
Definition: latency.c:20
mem_pool
Definition: mem-pool.h:207
_call_frame::wind_to
const char * wind_to
Definition: stack.h:79
_call_frame::end
struct timespec end
Definition: stack.h:77
_call_stack::uid
uid_t uid
Definition: stack.h:106
copy_frame
static call_frame_t * copy_frame(call_frame_t *frame)
Definition: stack.h:459
_call_frame::cookie
void * cookie
Definition: stack.h:72
_call_frame::this
xlator_t * this
Definition: stack.h:68
_call_stack::pid
pid_t pid
Definition: stack.h:108
_ns_info::hash
uint32_t hash
Definition: stack.h:85
GF_FREE
#define GF_FREE(free_ptr)
Definition: mem-pool.h:159
call_frames_count
static int call_frames_count(call_stack_t *call_stack)
Definition: stack.h:445
_call_frame::op
glusterfs_fop_t op
Definition: stack.h:75
gf_lock_t
pthread_mutex_t gf_lock_t
Definition: locking.h:74
list_del_init
static void list_del_init(struct list_head *old)
Definition: list.h:80
_call_frame::unwind_to
const char * unwind_to
Definition: stack.h:81
_call_frame
Definition: stack.h:63
_call_stack::tv
struct timespec tv
Definition: stack.h:122
lkowner.h
_ns_info::found
bool found
Definition: stack.h:86
_call_stack::unique
uint64_t unique
Definition: stack.h:104
xlator_fops
Definition: xlator.h:561
_call_stack::prev_call
call_stack_t * prev_call
Definition: stack.h:98
_call_frame::ret
ret_fn_t ret
Definition: stack.h:69
STACK_RESET
static void STACK_RESET(call_stack_t *stack)
Definition: stack.h:202
SMALL_GROUP_COUNT
#define SMALL_GROUP_COUNT
Definition: stack.h:91
create_frame
call_frame_t * create_frame(xlator_t *xl, call_pool_t *pool)
Definition: stack.c:16
call_pool::cnt
int64_t cnt
Definition: stack.h:56
INIT_LIST_HEAD
#define INIT_LIST_HEAD(head)
Definition: list.h:19
call_pool::lock
gf_lock_t lock
Definition: stack.h:58
get_the_pt_fop
static void * get_the_pt_fop(void *base_fop, int fop_idx)
Definition: stack.h:266
gf_boolean_t
#define gf_boolean_t
Definition: glusterfs.h:368
_call_frame::frames
struct list_head frames
Definition: stack.h:66
_call_stack::stack_lock
gf_lock_t stack_lock
Definition: stack.h:102
_call_stack::ctime
struct timespec ctime
Definition: stack.h:128
UNLOCK
#define UNLOCK(x)
Definition: locking.h:79
_call_stack::groups_small
uint32_t groups_small[128]
Definition: stack.h:111
glusterfs_fop_t
glusterfs_fop_t
Definition: glusterfs-fops.h:16
_call_frame::lock
gf_lock_t lock
Definition: stack.h:71
_call_stack
Definition: stack.h:93
_call_frame::local
void * local
Definition: stack.h:67
gf_msg_debug
#define gf_msg_debug(dom, errnum, fmt...)
Definition: logging.h:270
_call_stack::gid
gid_t gid
Definition: stack.h:107
list_head
Definition: list.h:14
_call_stack::err_xl
xlator_t * err_xl
Definition: stack.h:123
__is_fuse_call
bool __is_fuse_call(call_frame_t *frame)
Definition: stack.c:443
_ns_info
Definition: stack.h:84
timespec_now
void timespec_now(struct timespec *ts)
Definition: timespec.c:27
_call_frame::unwind_from
const char * unwind_from
Definition: stack.h:80
_call_stack::myframes
struct list_head myframes
Definition: stack.h:117
_call_frame::begin
struct timespec begin
Definition: stack.h:76
call_pool
Definition: stack.h:48
_xlator
Definition: xlator.h:770
_call_stack::ns_info
ns_info_t ns_info
Definition: stack.h:131
mem_put
void mem_put(void *ptr)
Definition: mem-pool.c:849
call_pool::stack_mem_pool
struct mem_pool * stack_mem_pool
Definition: stack.h:60
call_pool::frame_mem_pool
struct mem_pool * frame_mem_pool
Definition: stack.h:59
_call_stack::client
client_t * client
Definition: stack.h:103
_glusterfs_ctx::measure_latency
unsigned char measure_latency
Definition: glusterfs.h:648
gf_proc_dump_pending_frames_to_dict
void gf_proc_dump_pending_frames_to_dict(call_pool_t *call_pool, dict_t *dict)
Definition: stack.c:405
_call_stack::all_frames
struct list_head all_frames
Definition: stack.h:95
LOCK_DESTROY
#define LOCK_DESTROY(x)
Definition: locking.h:80
_call_stack::next_call
call_stack_t * next_call
Definition: stack.h:97
_client
Definition: client_t.h:34
_call_stack::lk_owner
gf_lkowner_t lk_owner
Definition: stack.h:114
_call_frame::root
call_stack_t * root
Definition: stack.h:64
_call_stack::groups_large
uint32_t * groups_large
Definition: stack.h:112
call_pool::total_count
gf_atomic_int64_t total_count
Definition: stack.h:57
list_for_each_entry
#define list_for_each_entry(pos, head, member)
Definition: list.h:235
GF_ATOMIC_INC
#define GF_ATOMIC_INC(_atomic)
Definition: atomic.h:454
LOCK_INIT
#define LOCK_INIT(x)
Definition: locking.h:76
call_pool::prev_call
call_stack_t * prev_call
Definition: stack.h:53
FRAME_DESTROY
static void FRAME_DESTROY(call_frame_t *frame)
Definition: stack.h:156
ret_fn_t
int32_t(* ret_fn_t)(call_frame_t *frame, call_frame_t *prev_frame, xlator_t *this, int32_t op_ret, int32_t op_errno,...)
Definition: stack.h:44
dict.h
LOCK
#define LOCK(x)
Definition: locking.h:77
gf_proc_dump_pending_frames
void gf_proc_dump_pending_frames(call_pool_t *call_pool)
Definition: stack.c:212
call_pool::all_frames
struct list_head all_frames
Definition: stack.h:50
list_splice_init
static void list_splice_init(struct list_head *list, struct list_head *head)
Definition: list.h:130
list_add
static void list_add(struct list_head *new, struct list_head *head)
Definition: list.h:25
_call_stack::groups
uint32_t * groups
Definition: stack.h:113
_call_frame::complete
bool complete
Definition: stack.h:73
client_t.h
libglusterfs-messages.h
_call_stack::ctx
glusterfs_ctx_t * ctx
Definition: stack.h:115
_call_frame::parent
call_frame_t * parent
Definition: stack.h:65
list_for_each_entry_safe
#define list_for_each_entry_safe(pos, n, head, member)
Definition: list.h:240
_call_stack::op
int32_t op
Definition: stack.h:120
_dict
Definition: dict.h:114
GF_CALLOC
#define GF_CALLOC(nmemb, size, type)
Definition: mem-pool.h:153
common-utils.h
list.h
STACK_DESTROY
static void STACK_DESTROY(call_stack_t *stack)
Definition: stack.h:177
_call_stack::state
void * state
Definition: stack.h:105
gf_atomic_t
#define gf_atomic_t
Definition: atomic.h:111
_call_stack::flags
uint32_t flags
Definition: stack.h:126
timespec.h
list_last_entry
#define list_last_entry(ptr, type, member)
Definition: list.h:224
UNIX_PATH_MAX
#define UNIX_PATH_MAX
Definition: rdd.c:26
_call_stack::identifier
char identifier[UNIX_PATH_MAX]
Definition: stack.h:109
_call_frame::wind_from
const char * wind_from
Definition: stack.h:78
call_stack_set_groups
void call_stack_set_groups(call_stack_t *stack, int ngrps, gid_t **groupbuf_p)
Definition: stack.c:67
gf_common_mt_groups_t
@ gf_common_mt_groups_t
Definition: mem-types.h:83
_call_stack::type
int8_t type
Definition: stack.h:121