"Fossies" - the Fresh Open Source Software Archive

Member "glusterfs-8.2/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c" (16 Sep 2020, 71376 Bytes) of package /linux/misc/glusterfs-8.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "glusterd-rpc-ops.c" see the Fossies "Dox" file reference documentation.

    1 /*
    2    Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com>
    3    This file is part of GlusterFS.
    4 
    5    This file is licensed to you under your choice of the GNU Lesser
    6    General Public License, version 3 or any later version (LGPLv3 or
    7    later), or the GNU General Public License, version 2 (GPLv2), in all
    8    cases as published by the Free Software Foundation.
    9 */
   10 
   11 #include "rpc-clnt.h"
   12 #include "glusterd1-xdr.h"
   13 #include "cli1-xdr.h"
   14 
   15 #include "xdr-generic.h"
   16 
   17 #include <glusterfs/compat-errno.h>
   18 #include "glusterd-op-sm.h"
   19 #include "glusterd-sm.h"
   20 #include "glusterd.h"
   21 #include "protocol-common.h"
   22 #include "glusterd-utils.h"
   23 #include <glusterfs/common-utils.h>
   24 #include "glusterd-messages.h"
   25 #include "glusterd-snapshot-utils.h"
   26 #include <sys/uio.h>
   27 
   28 #define SERVER_PATH_MAX (16 * 1024)
   29 
   30 #define GLUSTERD_STACK_DESTROY(frame)                                          \
   31     do {                                                                       \
   32         frame->local = NULL;                                                   \
   33         STACK_DESTROY(frame->root);                                            \
   34     } while (0)
   35 
   36 extern glusterd_op_info_t opinfo;
   37 extern uuid_t global_txn_id;
   38 
   39 int32_t
   40 glusterd_op_send_cli_response(glusterd_op_t op, int32_t op_ret,
   41                               int32_t op_errno, rpcsvc_request_t *req,
   42                               void *op_ctx, char *op_errstr)
   43 {
   44     int32_t ret = -1;
   45     void *cli_rsp = NULL;
   46     dict_t *ctx = NULL;
   47     char *free_ptr = NULL;
   48     glusterd_conf_t *conf = NULL;
   49     xdrproc_t xdrproc = NULL;
   50     char *errstr = NULL;
   51     int32_t status = 0;
   52     int32_t count = 0;
   53     gf_cli_rsp rsp = {
   54         0,
   55     };
   56     xlator_t *this = NULL;
   57 
   58     this = THIS;
   59     GF_ASSERT(this);
   60     conf = this->private;
   61 
   62     GF_ASSERT(conf);
   63 
   64     ctx = op_ctx;
   65 
   66     switch (op) {
   67         case GD_OP_REMOVE_BRICK: {
   68             if (ctx)
   69                 ret = dict_get_strn(ctx, "errstr", SLEN("errstr"), &errstr);
   70             break;
   71         }
   72         case GD_OP_RESET_VOLUME: {
   73             if (op_ret && !op_errstr)
   74                 errstr = "Error while resetting options";
   75             break;
   76         }
   77         case GD_OP_REBALANCE:
   78         case GD_OP_DEFRAG_BRICK_VOLUME: {
   79             if (ctx) {
   80                 ret = dict_get_int32n(ctx, "status", SLEN("status"), &status);
   81                 if (ret) {
   82                     gf_msg_trace(this->name, 0, "failed to get status");
   83                 }
   84             }
   85             break;
   86         }
   87         case GD_OP_GSYNC_CREATE:
   88         case GD_OP_GSYNC_SET: {
   89             if (ctx) {
   90                 ret = dict_get_strn(ctx, "errstr", SLEN("errstr"), &errstr);
   91                 ret = dict_set_strn(ctx, "glusterd_workdir",
   92                                     SLEN("glusterd_workdir"), conf->workdir);
   93                 /* swallow error here, that will be re-triggered in cli */
   94             }
   95             break;
   96         }
   97         case GD_OP_PROFILE_VOLUME: {
   98             if (ctx && dict_get_int32n(ctx, "count", SLEN("count"), &count)) {
   99                 ret = dict_set_int32n(ctx, "count", SLEN("count"), 0);
  100                 if (ret) {
  101                     gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
  102                            "failed to set count in dictionary");
  103                 }
  104             }
  105             break;
  106         }
  107         case GD_OP_START_BRICK:
  108         case GD_OP_STOP_BRICK: {
  109             gf_msg_debug(this->name, 0, "op '%s' not supported",
  110                          gd_op_list[op]);
  111             break;
  112         }
  113         case GD_OP_NONE:
  114         case GD_OP_MAX: {
  115             gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_OP_UNSUPPORTED,
  116                    "invalid operation");
  117             break;
  118         }
  119         case GD_OP_CREATE_VOLUME:
  120         case GD_OP_START_VOLUME:
  121         case GD_OP_STOP_VOLUME:
  122         case GD_OP_DELETE_VOLUME:
  123         case GD_OP_DEFRAG_VOLUME:
  124         case GD_OP_ADD_BRICK:
  125         case GD_OP_LOG_ROTATE:
  126         case GD_OP_SYNC_VOLUME:
  127         case GD_OP_STATEDUMP_VOLUME:
  128         case GD_OP_REPLACE_BRICK:
  129         case GD_OP_STATUS_VOLUME:
  130         case GD_OP_SET_VOLUME:
  131         case GD_OP_LIST_VOLUME:
  132         case GD_OP_CLEARLOCKS_VOLUME:
  133         case GD_OP_HEAL_VOLUME:
  134         case GD_OP_QUOTA:
  135         case GD_OP_SNAP:
  136         case GD_OP_BARRIER:
  137         case GD_OP_BITROT:
  138         case GD_OP_SCRUB_STATUS:
  139         case GD_OP_SCRUB_ONDEMAND:
  140         case GD_OP_RESET_BRICK:
  141         case GD_OP_MAX_OPVERSION:
  142         case GD_OP_DETACH_NOT_STARTED:
  143         case GD_OP_GANESHA:
  144         case GD_OP_DETACH_TIER:
  145         case GD_OP_TIER_MIGRATE:
  146         case GD_OP_TIER_START_STOP:
  147         case GD_OP_TIER_STATUS:
  148         case GD_OP_DETACH_TIER_STATUS:
  149         case GD_OP_REMOVE_TIER_BRICK:
  150         case GD_OP_ADD_TIER_BRICK:
  151 
  152         {
  153             /*nothing specific to be done*/
  154             break;
  155         }
  156         case GD_OP_COPY_FILE: {
  157             if (ctx)
  158                 ret = dict_get_strn(ctx, "errstr", SLEN("errstr"), &errstr);
  159             break;
  160         }
  161         case GD_OP_SYS_EXEC: {
  162             if (ctx) {
  163                 ret = dict_get_strn(ctx, "errstr", SLEN("errstr"), &errstr);
  164                 ret = dict_set_strn(ctx, "glusterd_workdir",
  165                                     SLEN("glusterd_workdir"), conf->workdir);
  166             }
  167             break;
  168         }
  169     }
  170 
  171     rsp.op_ret = op_ret;
  172     rsp.op_errno = op_errno;
  173 
  174     if (errstr)
  175         rsp.op_errstr = errstr;
  176     else if (op_errstr)
  177         rsp.op_errstr = op_errstr;
  178 
  179     if (!rsp.op_errstr)
  180         rsp.op_errstr = "";
  181 
  182     if (ctx) {
  183         ret = dict_allocate_and_serialize(ctx, &rsp.dict.dict_val,
  184                                           &rsp.dict.dict_len);
  185         if (ret < 0)
  186             gf_msg(this->name, GF_LOG_ERROR, 0,
  187                    GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
  188                    "failed to "
  189                    "serialize buffer");
  190         else
  191             free_ptr = rsp.dict.dict_val;
  192     }
  193 
  194     /* needed by 'rebalance status' */
  195     if (status)
  196         rsp.op_errno = status;
  197 
  198     cli_rsp = &rsp;
  199     xdrproc = (xdrproc_t)xdr_gf_cli_rsp;
  200 
  201     glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, xdrproc, ctx);
  202     ret = 0;
  203 
  204     GF_FREE(free_ptr);
  205     gf_msg_debug(this->name, 0, "Returning %d", ret);
  206     return ret;
  207 }
  208 
  209 int
  210 glusterd_big_locked_cbk(struct rpc_req *req, struct iovec *iov, int count,
  211                         void *myframe, fop_cbk_fn_t fn)
  212 {
  213     glusterd_conf_t *priv = THIS->private;
  214     int ret = -1;
  215 
  216     synclock_lock(&priv->big_lock);
  217     ret = fn(req, iov, count, myframe);
  218     synclock_unlock(&priv->big_lock);
  219 
  220     return ret;
  221 }
  222 
  223 int
  224 __glusterd_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
  225                      void *myframe)
  226 {
  227     gd1_mgmt_probe_rsp rsp = {
  228         {0},
  229     };
  230     int ret = 0;
  231     glusterd_peerinfo_t *peerinfo = NULL;
  232     glusterd_friend_sm_event_t *event = NULL;
  233     glusterd_probe_ctx_t *ctx = NULL;
  234     xlator_t *this = NULL;
  235     glusterd_conf_t *conf = NULL;
  236 
  237     if (-1 == req->rpc_status) {
  238         goto out;
  239     }
  240 
  241     this = THIS;
  242     GF_ASSERT(this != NULL);
  243     conf = this->private;
  244     GF_VALIDATE_OR_GOTO(this->name, (conf != NULL), out);
  245 
  246     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
  247     if (ret < 0) {
  248         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL, "error");
  249         // rsp.op_ret   = -1;
  250         // rsp.op_errno = EINVAL;
  251         goto out;
  252     }
  253 
  254     gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_PROBE_REQ_RESP_RCVD,
  255            "Received probe resp from uuid: %s, host: %s", uuid_utoa(rsp.uuid),
  256            rsp.hostname);
  257     if (rsp.op_ret != 0) {
  258         ctx = ((call_frame_t *)myframe)->local;
  259         ((call_frame_t *)myframe)->local = NULL;
  260 
  261         GF_ASSERT(ctx);
  262 
  263         if (ctx->req) {
  264             glusterd_xfer_cli_probe_resp(ctx->req, rsp.op_ret, rsp.op_errno,
  265                                          rsp.op_errstr, ctx->hostname,
  266                                          ctx->port, ctx->dict);
  267         }
  268 
  269         glusterd_destroy_probe_ctx(ctx);
  270         (void)glusterd_friend_remove(rsp.uuid, rsp.hostname);
  271         ret = rsp.op_ret;
  272         goto out;
  273     }
  274 
  275     RCU_READ_LOCK;
  276     peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
  277     if (peerinfo == NULL) {
  278         RCU_READ_UNLOCK
  279         ret = -1;
  280         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
  281                "Could not find peerd %s(%s)", rsp.hostname,
  282                uuid_utoa(rsp.uuid));
  283         goto out;
  284     }
  285 
  286     /*
  287      * In the case of a fresh probe rsp.uuid and peerinfo.uuid will not
  288      * match, as peerinfo->uuid will be NULL.
  289      *
  290      * In the case of a peer probe being done to add a new network to a
  291      * peer, rsp.uuid will match an existing peerinfo.uuid. If we have this
  292      * stage it means that the current address/hostname being used isn't
  293      * present in the found peerinfo. If it were, we would have found out
  294      * earlier in the probe process and wouldn't even reach till here. So,
  295      * we need to add the new hostname to the peer.
  296      *
  297      * This addition should only be done for cluster op-version >=
  298      * GD_OP_VERSION_3_6_0 as address lists are only supported from then on.
  299      * Also, this update should only be done when an explicit CLI probe
  300      * command was used to begin the probe process.
  301      */
  302     if ((conf->op_version >= GD_OP_VERSION_3_6_0) &&
  303         (gf_uuid_compare(rsp.uuid, peerinfo->uuid) == 0)) {
  304         ctx = ((call_frame_t *)myframe)->local;
  305         /* Presence of ctx->req implies this probe was started by a cli
  306          * probe command
  307          */
  308         if (ctx->req == NULL)
  309             goto cont;
  310 
  311         gf_msg_debug(this->name, 0,
  312                      "Adding address '%s' to "
  313                      "existing peer %s",
  314                      rsp.hostname, uuid_utoa(rsp.uuid));
  315 
  316         ret = glusterd_friend_remove(NULL, rsp.hostname);
  317         if (ret) {
  318             gf_msg(this->name, GF_LOG_ERROR, 0,
  319                    GD_MSG_STALE_PEERINFO_REMOVE_FAIL,
  320                    "Could not remove "
  321                    "stale peerinfo with name %s",
  322                    rsp.hostname);
  323             goto reply;
  324         }
  325 
  326         ret = gd_add_address_to_peer(peerinfo, rsp.hostname);
  327         if (ret) {
  328             gf_msg(this->name, GF_LOG_ERROR, 0,
  329                    GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
  330                    "Couldn't add hostname to peer list");
  331             goto reply;
  332         }
  333 
  334         /* Injecting EVENT_NEW_NAME to send update */
  335         ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_NEW_NAME, &event);
  336         if (!ret) {
  337             event->peername = gf_strdup(peerinfo->hostname);
  338             gf_uuid_copy(event->peerid, peerinfo->uuid);
  339 
  340             ret = glusterd_friend_sm_inject_event(event);
  341         }
  342         rsp.op_errno = GF_PROBE_FRIEND;
  343 
  344     reply:
  345         ctx = ((call_frame_t *)myframe)->local;
  346         ((call_frame_t *)myframe)->local = NULL;
  347 
  348         if (!ctx) {
  349             ret = -1;
  350             goto unlock;
  351         }
  352 
  353         if (ctx->req) {
  354             glusterd_xfer_cli_probe_resp(ctx->req, ret, rsp.op_errno,
  355                                          rsp.op_errstr, ctx->hostname,
  356                                          ctx->port, ctx->dict);
  357         }
  358 
  359         glusterd_destroy_probe_ctx(ctx);
  360 
  361         goto unlock;
  362 
  363     } else if (strncasecmp(rsp.hostname, peerinfo->hostname, 1024)) {
  364         gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_HOST_PRESENT_ALREADY,
  365                "Host: %s  with uuid: %s "
  366                "already present in cluster with alias hostname: %s",
  367                rsp.hostname, uuid_utoa(rsp.uuid), peerinfo->hostname);
  368 
  369         ctx = ((call_frame_t *)myframe)->local;
  370         ((call_frame_t *)myframe)->local = NULL;
  371 
  372         if (!ctx) {
  373             ret = -1;
  374             goto unlock;
  375         }
  376 
  377         rsp.op_errno = GF_PROBE_FRIEND;
  378         if (ctx->req) {
  379             glusterd_xfer_cli_probe_resp(ctx->req, rsp.op_ret, rsp.op_errno,
  380                                          rsp.op_errstr, ctx->hostname,
  381                                          ctx->port, ctx->dict);
  382         }
  383 
  384         glusterd_destroy_probe_ctx(ctx);
  385         (void)glusterd_friend_remove(NULL, rsp.hostname);
  386         ret = rsp.op_ret;
  387 
  388         goto unlock;
  389     }
  390 
  391 cont:
  392     gf_uuid_copy(peerinfo->uuid, rsp.uuid);
  393 
  394     ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event);
  395 
  396     if (ret) {
  397         RCU_READ_UNLOCK;
  398         gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_NEW_FRIEND_SM_EVENT_GET_FAIL,
  399                "Unable to get event");
  400         goto out;
  401     }
  402 
  403     event->peername = gf_strdup(peerinfo->hostname);
  404     gf_uuid_copy(event->peerid, peerinfo->uuid);
  405 
  406     event->ctx = ((call_frame_t *)myframe)->local;
  407     ((call_frame_t *)myframe)->local = NULL;
  408     ret = glusterd_friend_sm_inject_event(event);
  409 
  410     gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_REQ_RESP_RCVD,
  411            "Received resp to probe req");
  412 
  413 unlock:
  414     RCU_READ_UNLOCK;
  415 
  416 out:
  417     free(rsp.hostname);  // malloced by xdr
  418     GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
  419 
  420     /* Attempt to start the state machine. Needed as no state machine could
  421      * be running at time this RPC reply was received
  422      */
  423     if (!ret) {
  424         glusterd_friend_sm();
  425         glusterd_op_sm();
  426     }
  427 
  428     return ret;
  429 }
  430 
  431 int
  432 glusterd_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
  433                    void *myframe)
  434 {
  435     return glusterd_big_locked_cbk(req, iov, count, myframe,
  436                                    __glusterd_probe_cbk);
  437 }
  438 
  439 int
  440 __glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
  441                           void *myframe)
  442 {
  443     gd1_mgmt_friend_rsp rsp = {
  444         {0},
  445     };
  446     int ret = -1;
  447     glusterd_friend_sm_event_t *event = NULL;
  448     glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
  449     glusterd_peerinfo_t *peerinfo = NULL;
  450     int32_t op_ret = -1;
  451     int32_t op_errno = EINVAL;
  452     glusterd_probe_ctx_t *ctx = NULL;
  453     glusterd_friend_update_ctx_t *ev_ctx = NULL;
  454 
  455     if (-1 == req->rpc_status) {
  456         rsp.op_ret = -1;
  457         rsp.op_errno = EINVAL;
  458         goto out;
  459     }
  460 
  461     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
  462     if (ret < 0) {
  463         gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_RES_DECODE_FAIL,
  464                "error");
  465         rsp.op_ret = -1;
  466         rsp.op_errno = EINVAL;
  467         goto out;
  468     }
  469 
  470     op_ret = rsp.op_ret;
  471     op_errno = rsp.op_errno;
  472 
  473     gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
  474            "Received %s from uuid: %s, host: %s, port: %d",
  475            (op_ret) ? "RJT" : "ACC", uuid_utoa(rsp.uuid), rsp.hostname,
  476            rsp.port);
  477 
  478     RCU_READ_LOCK;
  479 
  480     peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
  481     if (peerinfo == NULL) {
  482         RCU_READ_UNLOCK
  483         ret = -1;
  484         gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
  485                "received friend add response from"
  486                " unknown peer uuid: %s",
  487                uuid_utoa(rsp.uuid));
  488         goto out;
  489     }
  490 
  491     if (op_ret)
  492         event_type = GD_FRIEND_EVENT_RCVD_RJT;
  493     else
  494         event_type = GD_FRIEND_EVENT_RCVD_ACC;
  495 
  496     ret = glusterd_friend_sm_new_event(event_type, &event);
  497 
  498     if (ret) {
  499         gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
  500                "Unable to get event");
  501         goto unlock;
  502     }
  503 
  504     ev_ctx = GF_CALLOC(1, sizeof(*ev_ctx), gf_gld_mt_friend_update_ctx_t);
  505     if (!ev_ctx) {
  506         ret = -1;
  507         goto unlock;
  508     }
  509 
  510     gf_uuid_copy(ev_ctx->uuid, rsp.uuid);
  511     ev_ctx->hostname = gf_strdup(rsp.hostname);
  512 
  513     event->peername = gf_strdup(peerinfo->hostname);
  514     gf_uuid_copy(event->peerid, peerinfo->uuid);
  515     event->ctx = ev_ctx;
  516     ret = glusterd_friend_sm_inject_event(event);
  517 
  518 unlock:
  519     RCU_READ_UNLOCK;
  520 out:
  521     ctx = ((call_frame_t *)myframe)->local;
  522     ((call_frame_t *)myframe)->local = NULL;
  523 
  524     if (ctx && ctx->req) {
  525         /*reverse probe doesn't have req*/
  526         ret = glusterd_xfer_cli_probe_resp(ctx->req, op_ret, op_errno, NULL,
  527                                            ctx->hostname, ctx->port, ctx->dict);
  528     }
  529     if (!ret) {
  530         glusterd_friend_sm();
  531         glusterd_op_sm();
  532     }
  533 
  534     if (ctx)
  535         glusterd_destroy_probe_ctx(ctx);
  536     free(rsp.hostname);  // malloced by xdr
  537     GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
  538     return ret;
  539 }
  540 
  541 int
  542 glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
  543                         void *myframe)
  544 {
  545     return glusterd_big_locked_cbk(req, iov, count, myframe,
  546                                    __glusterd_friend_add_cbk);
  547 }
  548 
  549 int
  550 __glusterd_friend_remove_cbk(struct rpc_req *req, struct iovec *iov, int count,
  551                              void *myframe)
  552 {
  553     gd1_mgmt_friend_rsp rsp = {
  554         {0},
  555     };
  556     glusterd_conf_t *conf = NULL;
  557     int ret = -1;
  558     glusterd_friend_sm_event_t *event = NULL;
  559     glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
  560     glusterd_peerinfo_t *peerinfo = NULL;
  561     int32_t op_ret = -1;
  562     int32_t op_errno = 0;
  563     glusterd_probe_ctx_t *ctx = NULL;
  564     gf_boolean_t move_sm_now = _gf_true;
  565 
  566     conf = THIS->private;
  567     GF_ASSERT(conf);
  568 
  569     ctx = ((call_frame_t *)myframe)->local;
  570     ((call_frame_t *)myframe)->local = NULL;
  571     if (!ctx) {
  572         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
  573                "Unable to get glusterd probe context");
  574         goto out;
  575     }
  576     if (-1 == req->rpc_status) {
  577         rsp.op_ret = -1;
  578         rsp.op_errno = EINVAL;
  579         move_sm_now = _gf_false;
  580         goto inject;
  581     }
  582 
  583     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
  584     if (ret < 0) {
  585         gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_RES_DECODE_FAIL,
  586                "error");
  587         rsp.op_ret = -1;
  588         rsp.op_errno = EINVAL;
  589         goto respond;
  590     }
  591 
  592     op_ret = rsp.op_ret;
  593     op_errno = rsp.op_errno;
  594 
  595     gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
  596            "Received %s from uuid: %s, host: %s, port: %d",
  597            (op_ret) ? "RJT" : "ACC", uuid_utoa(rsp.uuid), rsp.hostname,
  598            rsp.port);
  599 
  600 inject:
  601     RCU_READ_LOCK;
  602 
  603     peerinfo = glusterd_peerinfo_find(rsp.uuid, ctx->hostname);
  604     if (peerinfo == NULL) {
  605         // can happen as part of rpc clnt connection cleanup
  606         // when the frame timeout happens after 30 minutes
  607         goto unlock;
  608     }
  609 
  610     event_type = GD_FRIEND_EVENT_REMOVE_FRIEND;
  611 
  612     ret = glusterd_friend_sm_new_event(event_type, &event);
  613 
  614     if (ret) {
  615         gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
  616                "Unable to get event");
  617         goto unlock;
  618     }
  619     event->peername = gf_strdup(peerinfo->hostname);
  620     gf_uuid_copy(event->peerid, peerinfo->uuid);
  621 
  622     ret = glusterd_friend_sm_inject_event(event);
  623 
  624     if (ret)
  625         goto unlock;
  626 
  627     /*friend_sm would be moved on CLNT_DISCONNECT, consequently
  628       cleaning up peerinfo. Else, we run the risk of triggering
  629       a clnt_destroy within saved_frames_unwind.
  630     */
  631     op_ret = 0;
  632 
  633 unlock:
  634     RCU_READ_UNLOCK;
  635 
  636 respond:
  637     ret = glusterd_xfer_cli_deprobe_resp(ctx->req, op_ret, op_errno, NULL,
  638                                          ctx->hostname, ctx->dict);
  639     if (!ret && move_sm_now) {
  640         glusterd_friend_sm();
  641         glusterd_op_sm();
  642     }
  643 
  644     glusterd_broadcast_friend_delete(ctx->hostname, NULL);
  645     glusterd_destroy_probe_ctx(ctx);
  646 out:
  647     free(rsp.hostname);  // malloced by xdr
  648     GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
  649     return ret;
  650 }
  651 
  652 int
  653 glusterd_friend_remove_cbk(struct rpc_req *req, struct iovec *iov, int count,
  654                            void *myframe)
  655 {
  656     return glusterd_big_locked_cbk(req, iov, count, myframe,
  657                                    __glusterd_friend_remove_cbk);
  658 }
  659 
  660 int32_t
  661 __glusterd_friend_update_cbk(struct rpc_req *req, struct iovec *iov, int count,
  662                              void *myframe)
  663 {
  664     int ret = -1;
  665     gd1_mgmt_friend_update_rsp rsp = {
  666         {0},
  667     };
  668     xlator_t *this = NULL;
  669 
  670     GF_ASSERT(req);
  671     this = THIS;
  672 
  673     if (-1 == req->rpc_status) {
  674         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE, "RPC Error");
  675         goto out;
  676     }
  677 
  678     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
  679     if (ret < 0) {
  680         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
  681                "Failed to serialize friend"
  682                " update response");
  683         goto out;
  684     }
  685 
  686     ret = 0;
  687 out:
  688     gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
  689            "Received %s from uuid: %s", (ret) ? "RJT" : "ACC",
  690            uuid_utoa(rsp.uuid));
  691 
  692     GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
  693     return ret;
  694 }
  695 
  696 int
  697 glusterd_friend_update_cbk(struct rpc_req *req, struct iovec *iov, int count,
  698                            void *myframe)
  699 {
  700     return glusterd_big_locked_cbk(req, iov, count, myframe,
  701                                    __glusterd_friend_update_cbk);
  702 }
  703 
  704 int32_t
  705 __glusterd_cluster_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
  706                             void *myframe)
  707 {
  708     gd1_mgmt_cluster_lock_rsp rsp = {
  709         {0},
  710     };
  711     int ret = -1;
  712     int32_t op_ret = -1;
  713     glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
  714     xlator_t *this = NULL;
  715     uuid_t *txn_id = NULL;
  716     glusterd_conf_t *priv = NULL;
  717     char *err_str = NULL;
  718 
  719     this = THIS;
  720     GF_ASSERT(this);
  721     priv = this->private;
  722     GF_ASSERT(priv);
  723     GF_ASSERT(req);
  724 
  725     txn_id = &priv->global_txn_id;
  726 
  727     if (-1 == req->rpc_status) {
  728         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_LOCK_RESP_FROM_PEER,
  729                "Lock response is not "
  730                "received from one of the peer");
  731         err_str = "Lock response is not received from one of the peer";
  732         glusterd_set_opinfo(err_str, ENETRESET, -1);
  733         event_type = GD_OP_EVENT_RCVD_RJT;
  734         goto out;
  735     }
  736 
  737     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
  738     if (ret < 0) {
  739         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
  740                "Failed to decode "
  741                "cluster lock response received from peer");
  742         err_str =
  743             "Failed to decode cluster lock response received from"
  744             " peer";
  745         glusterd_set_opinfo(err_str, EINVAL, -1);
  746         event_type = GD_OP_EVENT_RCVD_RJT;
  747         goto out;
  748     }
  749 
  750     op_ret = rsp.op_ret;
  751 
  752     if (op_ret) {
  753         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_LOCK_FROM_UUID_REJCT,
  754                "Received lock RJT from uuid: %s", uuid_utoa(rsp.uuid));
  755     } else {
  756         gf_msg_debug(this->name, 0, "Received lock ACC from uuid: %s",
  757                      uuid_utoa(rsp.uuid));
  758     }
  759 
  760     RCU_READ_LOCK;
  761     ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
  762     RCU_READ_UNLOCK;
  763 
  764     if (ret) {
  765         gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
  766                "cluster lock response received from unknown peer: %s."
  767                "Ignoring response",
  768                uuid_utoa(rsp.uuid));
  769         err_str = "cluster lock response received from unknown peer";
  770         goto out;
  771     }
  772 
  773     if (op_ret) {
  774         event_type = GD_OP_EVENT_RCVD_RJT;
  775         opinfo.op_ret = op_ret;
  776         opinfo.op_errstr = gf_strdup(
  777             "Another transaction could be in "
  778             "progress. Please try again after"
  779             " some time.");
  780     } else {
  781         event_type = GD_OP_EVENT_RCVD_ACC;
  782     }
  783 
  784 out:
  785 
  786     ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
  787     if (ret)
  788         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
  789                "Unable to set "
  790                "transaction's opinfo");
  791 
  792     ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
  793 
  794     if (!ret) {
  795         glusterd_friend_sm();
  796         glusterd_op_sm();
  797     }
  798 
  799     GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
  800     return ret;
  801 }
  802 
  803 int32_t
  804 glusterd_cluster_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
  805                           void *myframe)
  806 {
  807     return glusterd_big_locked_cbk(req, iov, count, myframe,
  808                                    __glusterd_cluster_lock_cbk);
  809 }
  810 
  811 void
  812 glusterd_set_opinfo(char *errstr, int32_t op_errno, int32_t op_ret)
  813 {
  814     opinfo.op_errstr = gf_strdup(errstr);
  815     opinfo.op_errno = op_errno;
  816     opinfo.op_ret = op_ret;
  817 }
  818 
  819 static int32_t
  820 glusterd_mgmt_v3_lock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
  821                                    int count, void *myframe)
  822 {
  823     gd1_mgmt_v3_lock_rsp rsp = {
  824         {0},
  825     };
  826     int ret = -1;
  827     int32_t op_ret = -1;
  828     glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
  829     xlator_t *this = NULL;
  830     call_frame_t *frame = NULL;
  831     uuid_t *txn_id = NULL;
  832     char *err_str = NULL;
  833 
  834     this = THIS;
  835     GF_ASSERT(this);
  836     GF_ASSERT(req);
  837 
  838     frame = myframe;
  839     txn_id = frame->cookie;
  840     frame->cookie = NULL;
  841 
  842     if (-1 == req->rpc_status) {
  843         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_LOCK_RESP_FROM_PEER,
  844                "Lock response is not "
  845                "received from one of the peer");
  846         err_str = "Lock response is not received from one of the peer";
  847         glusterd_set_opinfo(err_str, ENETRESET, -1);
  848         event_type = GD_OP_EVENT_RCVD_RJT;
  849         goto out;
  850     }
  851 
  852     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
  853     if (ret < 0) {
  854         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
  855                "Failed to decode "
  856                "mgmt_v3 lock response received from peer");
  857         err_str =
  858             "Failed to decode mgmt_v3 lock response received from"
  859             " peer";
  860         glusterd_set_opinfo(err_str, EINVAL, -1);
  861         event_type = GD_OP_EVENT_RCVD_RJT;
  862         goto out;
  863     }
  864 
  865     op_ret = rsp.op_ret;
  866 
  867     txn_id = &rsp.txn_id;
  868 
  869     if (op_ret) {
  870         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_FROM_UUID_REJCT,
  871                "Received mgmt_v3 lock RJT from uuid: %s", uuid_utoa(rsp.uuid));
  872     } else {
  873         gf_msg_debug(this->name, 0, "Received mgmt_v3 lock ACC from uuid: %s",
  874                      uuid_utoa(rsp.uuid));
  875     }
  876 
  877     RCU_READ_LOCK;
  878     ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
  879     RCU_READ_UNLOCK;
  880 
  881     if (ret) {
  882         gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
  883                "mgmt_v3 lock response received "
  884                "from unknown peer: %s. Ignoring response",
  885                uuid_utoa(rsp.uuid));
  886         goto out;
  887     }
  888 
  889     if (op_ret) {
  890         event_type = GD_OP_EVENT_RCVD_RJT;
  891         opinfo.op_ret = op_ret;
  892         opinfo.op_errstr = gf_strdup(
  893             "Another transaction could be in "
  894             "progress. Please try again after"
  895             " some time.");
  896     } else {
  897         event_type = GD_OP_EVENT_RCVD_ACC;
  898     }
  899 
  900 out:
  901 
  902     ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
  903     if (ret)
  904         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
  905                "Unable to set "
  906                "transaction's opinfo");
  907 
  908     ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
  909     if (!ret) {
  910         glusterd_friend_sm();
  911         glusterd_op_sm();
  912     }
  913 
  914     GF_FREE(frame->cookie);
  915     GLUSTERD_STACK_DESTROY(frame);
  916     return ret;
  917 }
  918 
  919 int32_t
  920 glusterd_mgmt_v3_lock_peers_cbk(struct rpc_req *req, struct iovec *iov,
  921                                 int count, void *myframe)
  922 {
  923     return glusterd_big_locked_cbk(req, iov, count, myframe,
  924                                    glusterd_mgmt_v3_lock_peers_cbk_fn);
  925 }
  926 
  927 static int32_t
  928 glusterd_mgmt_v3_unlock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
  929                                      int count, void *myframe)
  930 {
  931     gd1_mgmt_v3_unlock_rsp rsp = {
  932         {0},
  933     };
  934     int ret = -1;
  935     int32_t op_ret = -1;
  936     glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
  937     xlator_t *this = NULL;
  938     call_frame_t *frame = NULL;
  939     uuid_t *txn_id = NULL;
  940     char *err_str = NULL;
  941 
  942     this = THIS;
  943     GF_ASSERT(this);
  944     GF_ASSERT(req);
  945 
  946     frame = myframe;
  947     txn_id = frame->cookie;
  948     frame->cookie = NULL;
  949 
  950     if (-1 == req->rpc_status) {
  951         err_str = "Unlock response not received from one of the peer.";
  952         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
  953                "UnLock response is not received from one of the peer");
  954         glusterd_set_opinfo(err_str, 0, 0);
  955         event_type = GD_OP_EVENT_RCVD_RJT;
  956         goto out;
  957     }
  958 
  959     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
  960     if (ret < 0) {
  961         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
  962                "Failed to decode mgmt_v3 unlock response received from"
  963                "peer");
  964         err_str =
  965             "Failed to decode mgmt_v3 unlock response received "
  966             "from peer";
  967         glusterd_set_opinfo(err_str, 0, 0);
  968         event_type = GD_OP_EVENT_RCVD_RJT;
  969         goto out;
  970     }
  971 
  972     op_ret = rsp.op_ret;
  973 
  974     txn_id = &rsp.txn_id;
  975 
  976     if (op_ret) {
  977         gf_msg(
  978             this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FROM_UUID_REJCT,
  979             "Received mgmt_v3 unlock RJT from uuid: %s", uuid_utoa(rsp.uuid));
  980     } else {
  981         gf_msg_debug(this->name, 0, "Received mgmt_v3 unlock ACC from uuid: %s",
  982                      uuid_utoa(rsp.uuid));
  983     }
  984 
  985     RCU_READ_LOCK;
  986     ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
  987     RCU_READ_UNLOCK;
  988 
  989     if (ret) {
  990         gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
  991                "mgmt_v3 unlock response received "
  992                "from unknown peer: %s. Ignoring response",
  993                uuid_utoa(rsp.uuid));
  994         goto out;
  995     }
  996 
  997     if (op_ret) {
  998         event_type = GD_OP_EVENT_RCVD_RJT;
  999         opinfo.op_ret = op_ret;
 1000         opinfo.op_errstr = gf_strdup(
 1001             "Another transaction could be in "
 1002             "progress. Please try again after"
 1003             " some time.");
 1004     } else {
 1005         event_type = GD_OP_EVENT_RCVD_ACC;
 1006     }
 1007 
 1008 out:
 1009 
 1010     ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
 1011     if (ret)
 1012         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
 1013                "Unable to set "
 1014                "transaction's opinfo");
 1015 
 1016     ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
 1017 
 1018     if (!ret) {
 1019         glusterd_friend_sm();
 1020         glusterd_op_sm();
 1021     }
 1022 
 1023     GF_FREE(frame->cookie);
 1024     GLUSTERD_STACK_DESTROY(frame);
 1025     return ret;
 1026 }
 1027 
 1028 int32_t
 1029 glusterd_mgmt_v3_unlock_peers_cbk(struct rpc_req *req, struct iovec *iov,
 1030                                   int count, void *myframe)
 1031 {
 1032     return glusterd_big_locked_cbk(req, iov, count, myframe,
 1033                                    glusterd_mgmt_v3_unlock_peers_cbk_fn);
 1034 }
 1035 
 1036 int32_t
 1037 __glusterd_cluster_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1038                               void *myframe)
 1039 {
 1040     gd1_mgmt_cluster_lock_rsp rsp = {
 1041         {0},
 1042     };
 1043     int ret = -1;
 1044     int32_t op_ret = -1;
 1045     glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
 1046     xlator_t *this = NULL;
 1047     uuid_t *txn_id = NULL;
 1048     glusterd_conf_t *priv = NULL;
 1049     char *err_str = NULL;
 1050 
 1051     this = THIS;
 1052     GF_ASSERT(this);
 1053     priv = this->private;
 1054     GF_ASSERT(priv);
 1055     GF_ASSERT(req);
 1056 
 1057     txn_id = &priv->global_txn_id;
 1058 
 1059     if (-1 == req->rpc_status) {
 1060         err_str = "Unlock response not received from one of the peer.";
 1061         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
 1062                "UnLock response is not received from one of the peer");
 1063         glusterd_set_opinfo(err_str, 0, 0);
 1064         event_type = GD_OP_EVENT_RCVD_RJT;
 1065         goto out;
 1066     }
 1067 
 1068     ret = xdr_to_generic(*iov, &rsp,
 1069                          (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
 1070     if (ret < 0) {
 1071         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
 1072                "Failed to decode unlock response received from peer");
 1073         err_str =
 1074             "Failed to decode cluster unlock response received "
 1075             "from peer";
 1076         glusterd_set_opinfo(err_str, 0, 0);
 1077         event_type = GD_OP_EVENT_RCVD_RJT;
 1078         goto out;
 1079     }
 1080 
 1081     op_ret = rsp.op_ret;
 1082 
 1083     if (op_ret) {
 1084         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNLOCK_FROM_UUID_REJCT,
 1085                "Received unlock RJT from uuid: %s", uuid_utoa(rsp.uuid));
 1086     } else {
 1087         gf_msg_debug(this->name, 0, "Received unlock ACC from uuid: %s",
 1088                      uuid_utoa(rsp.uuid));
 1089     }
 1090 
 1091     RCU_READ_LOCK;
 1092     ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
 1093     RCU_READ_UNLOCK;
 1094 
 1095     if (ret) {
 1096         gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
 1097                "Unlock response received from unknown peer %s",
 1098                uuid_utoa(rsp.uuid));
 1099         goto out;
 1100     }
 1101 
 1102     if (op_ret) {
 1103         event_type = GD_OP_EVENT_RCVD_RJT;
 1104         opinfo.op_ret = op_ret;
 1105     } else {
 1106         event_type = GD_OP_EVENT_RCVD_ACC;
 1107     }
 1108 
 1109 out:
 1110 
 1111     ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
 1112     if (ret)
 1113         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
 1114                "Unable to set "
 1115                "transaction's opinfo");
 1116 
 1117     ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
 1118 
 1119     if (!ret) {
 1120         glusterd_friend_sm();
 1121         glusterd_op_sm();
 1122     }
 1123 
 1124     GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
 1125     return ret;
 1126 }
 1127 
 1128 int32_t
 1129 glusterd_cluster_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1130                             void *myframe)
 1131 {
 1132     return glusterd_big_locked_cbk(req, iov, count, myframe,
 1133                                    __glusterd_cluster_unlock_cbk);
 1134 }
 1135 
 1136 int32_t
 1137 __glusterd_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1138                         void *myframe)
 1139 {
 1140     gd1_mgmt_stage_op_rsp rsp = {
 1141         {0},
 1142     };
 1143     int ret = -1;
 1144     int32_t op_ret = -1;
 1145     glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
 1146     glusterd_peerinfo_t *peerinfo = NULL;
 1147     dict_t *dict = NULL;
 1148     char *peer_str = NULL;
 1149     xlator_t *this = NULL;
 1150     glusterd_conf_t *priv = NULL;
 1151     uuid_t *txn_id = NULL;
 1152     call_frame_t *frame = NULL;
 1153 
 1154     this = THIS;
 1155     GF_ASSERT(this);
 1156     GF_ASSERT(req);
 1157     priv = this->private;
 1158     GF_ASSERT(priv);
 1159     GF_ASSERT(myframe);
 1160 
 1161     frame = myframe;
 1162     txn_id = frame->cookie;
 1163 
 1164     if (-1 == req->rpc_status) {
 1165         rsp.op_ret = -1;
 1166         rsp.op_errno = EINVAL;
 1167         /* use standard allocation because to keep uniformity
 1168            in freeing it */
 1169         rsp.op_errstr = strdup("error");
 1170         goto out;
 1171     }
 1172 
 1173     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
 1174     if (ret < 0) {
 1175         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
 1176                "Failed to decode stage "
 1177                "response received from peer");
 1178         rsp.op_ret = -1;
 1179         rsp.op_errno = EINVAL;
 1180         /* use standard allocation because to keep uniformity
 1181            in freeing it */
 1182         rsp.op_errstr = strdup(
 1183             "Failed to decode stage response "
 1184             "received from peer.");
 1185         goto out;
 1186     }
 1187 
 1188     if (rsp.dict.dict_len) {
 1189         /* Unserialize the dictionary */
 1190         dict = dict_new();
 1191 
 1192         ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
 1193         if (ret < 0) {
 1194             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
 1195                    "failed to "
 1196                    "unserialize rsp-buffer to dictionary");
 1197             event_type = GD_OP_EVENT_RCVD_RJT;
 1198             goto out;
 1199         } else {
 1200             dict->extra_stdfree = rsp.dict.dict_val;
 1201         }
 1202     }
 1203 
 1204 out:
 1205     op_ret = rsp.op_ret;
 1206 
 1207     if (op_ret) {
 1208         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STAGE_FROM_UUID_REJCT,
 1209                "Received stage RJT from uuid: %s", uuid_utoa(rsp.uuid));
 1210     } else {
 1211         gf_msg_debug(this->name, 0, "Received stage ACC from uuid: %s",
 1212                      uuid_utoa(rsp.uuid));
 1213     }
 1214 
 1215     RCU_READ_LOCK;
 1216     peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
 1217     if (peerinfo == NULL) {
 1218         gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
 1219                "Stage response received "
 1220                "from unknown peer: %s. Ignoring response.",
 1221                uuid_utoa(rsp.uuid));
 1222     }
 1223 
 1224     if (op_ret) {
 1225         event_type = GD_OP_EVENT_RCVD_RJT;
 1226         opinfo.op_ret = op_ret;
 1227         if (strcmp("", rsp.op_errstr)) {
 1228             opinfo.op_errstr = gf_strdup(rsp.op_errstr);
 1229         } else {
 1230             if (peerinfo)
 1231                 peer_str = peerinfo->hostname;
 1232             else
 1233                 peer_str = uuid_utoa(rsp.uuid);
 1234             char err_str[2048];
 1235             snprintf(err_str, sizeof(err_str), OPERRSTR_STAGE_FAIL, peer_str);
 1236             opinfo.op_errstr = gf_strdup(err_str);
 1237         }
 1238     } else {
 1239         event_type = GD_OP_EVENT_RCVD_ACC;
 1240     }
 1241 
 1242     RCU_READ_UNLOCK;
 1243 
 1244     ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
 1245     if (ret)
 1246         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
 1247                "Unable to set "
 1248                "transaction's opinfo");
 1249 
 1250     ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
 1251 
 1252     if (!ret) {
 1253         glusterd_friend_sm();
 1254         glusterd_op_sm();
 1255     }
 1256 
 1257     free(rsp.op_errstr);  // malloced by xdr
 1258     if (dict) {
 1259         if (!dict->extra_stdfree && rsp.dict.dict_val)
 1260             free(rsp.dict.dict_val);  // malloced by xdr
 1261         dict_unref(dict);
 1262     } else {
 1263         free(rsp.dict.dict_val);  // malloced by xdr
 1264     }
 1265     GF_FREE(frame->cookie);
 1266     GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
 1267     return ret;
 1268 }
 1269 
 1270 int32_t
 1271 glusterd_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1272                       void *myframe)
 1273 {
 1274     return glusterd_big_locked_cbk(req, iov, count, myframe,
 1275                                    __glusterd_stage_op_cbk);
 1276 }
 1277 
 1278 int32_t
 1279 __glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1280                          void *myframe)
 1281 {
 1282     gd1_mgmt_commit_op_rsp rsp = {
 1283         {0},
 1284     };
 1285     int ret = -1;
 1286     int32_t op_ret = -1;
 1287     glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
 1288     glusterd_peerinfo_t *peerinfo = NULL;
 1289     dict_t *dict = NULL;
 1290     char *peer_str = NULL;
 1291     xlator_t *this = NULL;
 1292     glusterd_conf_t *priv = NULL;
 1293     uuid_t *txn_id = NULL;
 1294     glusterd_op_info_t txn_op_info = {
 1295         {0},
 1296     };
 1297     call_frame_t *frame = NULL;
 1298 
 1299     this = THIS;
 1300     GF_ASSERT(this);
 1301     GF_ASSERT(req);
 1302     priv = this->private;
 1303     GF_ASSERT(priv);
 1304     GF_ASSERT(myframe);
 1305 
 1306     frame = myframe;
 1307     txn_id = frame->cookie;
 1308 
 1309     if (-1 == req->rpc_status) {
 1310         rsp.op_ret = -1;
 1311         rsp.op_errno = EINVAL;
 1312         /* use standard allocation because to keep uniformity
 1313            in freeing it */
 1314         rsp.op_errstr = strdup("error");
 1315         event_type = GD_OP_EVENT_RCVD_RJT;
 1316         goto out;
 1317     }
 1318 
 1319     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
 1320     if (ret < 0) {
 1321         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
 1322                "Failed to decode commit "
 1323                "response received from peer");
 1324         rsp.op_ret = -1;
 1325         rsp.op_errno = EINVAL;
 1326         /* use standard allocation because to keep uniformity
 1327            in freeing it */
 1328         rsp.op_errstr = strdup(
 1329             "Failed to decode commit response "
 1330             "received from peer.");
 1331         event_type = GD_OP_EVENT_RCVD_RJT;
 1332         goto out;
 1333     }
 1334 
 1335     if (rsp.dict.dict_len) {
 1336         /* Unserialize the dictionary */
 1337         dict = dict_new();
 1338 
 1339         ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
 1340         if (ret < 0) {
 1341             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
 1342                    "failed to "
 1343                    "unserialize rsp-buffer to dictionary");
 1344             event_type = GD_OP_EVENT_RCVD_RJT;
 1345             goto out;
 1346         } else {
 1347             dict->extra_stdfree = rsp.dict.dict_val;
 1348         }
 1349     }
 1350 
 1351     op_ret = rsp.op_ret;
 1352 
 1353     if (op_ret) {
 1354         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_FROM_UUID_REJCT,
 1355                "Received commit RJT from uuid: %s", uuid_utoa(rsp.uuid));
 1356     } else {
 1357         gf_msg_debug(this->name, 0, "Received commit ACC from uuid: %s",
 1358                      uuid_utoa(rsp.uuid));
 1359     }
 1360 
 1361     ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
 1362     if (ret) {
 1363         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_GET_FAIL,
 1364                "Failed to get txn_op_info "
 1365                "for txn_id = %s",
 1366                uuid_utoa(*txn_id));
 1367     }
 1368 
 1369     RCU_READ_LOCK;
 1370     peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
 1371     if (peerinfo == NULL) {
 1372         gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
 1373                "Commit response for "
 1374                "'Volume %s' received from unknown peer: %s",
 1375                gd_op_list[opinfo.op], uuid_utoa(rsp.uuid));
 1376     }
 1377 
 1378     if (op_ret) {
 1379         event_type = GD_OP_EVENT_RCVD_RJT;
 1380         opinfo.op_ret = op_ret;
 1381         if (strcmp("", rsp.op_errstr)) {
 1382             opinfo.op_errstr = gf_strdup(rsp.op_errstr);
 1383         } else {
 1384             if (peerinfo)
 1385                 peer_str = peerinfo->hostname;
 1386             else
 1387                 peer_str = uuid_utoa(rsp.uuid);
 1388             char err_str[2048];
 1389             snprintf(err_str, sizeof(err_str), OPERRSTR_COMMIT_FAIL, peer_str);
 1390             opinfo.op_errstr = gf_strdup(err_str);
 1391         }
 1392         if (!opinfo.op_errstr) {
 1393             goto unlock;
 1394         }
 1395     } else {
 1396         event_type = GD_OP_EVENT_RCVD_ACC;
 1397         GF_ASSERT(rsp.op == txn_op_info.op);
 1398 
 1399         switch (rsp.op) {
 1400             case GD_OP_PROFILE_VOLUME:
 1401                 ret = glusterd_profile_volume_use_rsp_dict(txn_op_info.op_ctx,
 1402                                                            dict);
 1403                 if (ret)
 1404                     goto unlock;
 1405                 break;
 1406 
 1407             case GD_OP_REBALANCE:
 1408             case GD_OP_DEFRAG_BRICK_VOLUME:
 1409                 ret = glusterd_volume_rebalance_use_rsp_dict(txn_op_info.op_ctx,
 1410                                                              dict);
 1411                 if (ret)
 1412                     goto unlock;
 1413                 break;
 1414 
 1415             default:
 1416                 break;
 1417         }
 1418     }
 1419 unlock:
 1420     RCU_READ_UNLOCK;
 1421 
 1422 out:
 1423 
 1424     ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
 1425     if (ret)
 1426         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
 1427                "Unable to set "
 1428                "transaction's opinfo");
 1429 
 1430     ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
 1431 
 1432     if (!ret) {
 1433         glusterd_friend_sm();
 1434         glusterd_op_sm();
 1435     }
 1436 
 1437     if (dict)
 1438         dict_unref(dict);
 1439     free(rsp.op_errstr);  // malloced by xdr
 1440     GF_FREE(frame->cookie);
 1441     GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
 1442     return ret;
 1443 }
 1444 
 1445 int32_t
 1446 glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1447                        void *myframe)
 1448 {
 1449     return glusterd_big_locked_cbk(req, iov, count, myframe,
 1450                                    __glusterd_commit_op_cbk);
 1451 }
 1452 
 1453 int32_t
 1454 glusterd_rpc_probe(call_frame_t *frame, xlator_t *this, void *data)
 1455 {
 1456     gd1_mgmt_probe_req req = {
 1457         {0},
 1458     };
 1459     int ret = 0;
 1460     int port = 0;
 1461     char *hostname = NULL;
 1462     glusterd_peerinfo_t *peerinfo = NULL;
 1463     glusterd_conf_t *priv = NULL;
 1464     dict_t *dict = NULL;
 1465 
 1466     if (!frame || !this || !data) {
 1467         ret = -1;
 1468         goto out;
 1469     }
 1470 
 1471     dict = data;
 1472     priv = this->private;
 1473 
 1474     GF_ASSERT(priv);
 1475     ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
 1476     if (ret)
 1477         goto out;
 1478     ret = dict_get_int32n(dict, "port", SLEN("port"), &port);
 1479     if (ret)
 1480         port = GF_DEFAULT_BASE_PORT;
 1481 
 1482     ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
 1483     if (ret)
 1484         goto out;
 1485 
 1486     gf_uuid_copy(req.uuid, MY_UUID);
 1487     req.hostname = gf_strdup(hostname);
 1488     req.port = port;
 1489 
 1490     ret = glusterd_submit_request(
 1491         peerinfo->rpc, &req, frame, peerinfo->peer, GLUSTERD_PROBE_QUERY, NULL,
 1492         this, glusterd_probe_cbk, (xdrproc_t)xdr_gd1_mgmt_probe_req);
 1493 
 1494 out:
 1495     GF_FREE(req.hostname);
 1496     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 1497     return ret;
 1498 }
 1499 
 1500 int32_t
 1501 glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
 1502 {
 1503     gd1_mgmt_friend_req req = {
 1504         {0},
 1505     };
 1506     int ret = 0;
 1507     glusterd_peerinfo_t *peerinfo = NULL;
 1508     glusterd_conf_t *priv = NULL;
 1509     glusterd_friend_sm_event_t *event = NULL;
 1510     dict_t *peer_data = NULL;
 1511 
 1512     if (!frame || !this || !data) {
 1513         ret = -1;
 1514         goto out;
 1515     }
 1516 
 1517     event = data;
 1518     priv = this->private;
 1519 
 1520     GF_ASSERT(priv);
 1521 
 1522     RCU_READ_LOCK;
 1523 
 1524     peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
 1525     if (!peerinfo) {
 1526         RCU_READ_UNLOCK;
 1527         ret = -1;
 1528         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
 1529                "Could not find peer %s(%s)", event->peername,
 1530                uuid_utoa(event->peerid));
 1531         goto out;
 1532     }
 1533 
 1534     req.hostname = gf_strdup(peerinfo->hostname);
 1535     req.port = peerinfo->port;
 1536 
 1537     RCU_READ_UNLOCK;
 1538 
 1539     gf_uuid_copy(req.uuid, MY_UUID);
 1540 
 1541     peer_data = dict_new();
 1542     if (!peer_data) {
 1543         errno = ENOMEM;
 1544         goto out;
 1545     }
 1546 
 1547     ret = dict_set_dynstr_with_alloc(peer_data, "hostname_in_cluster",
 1548                                      peerinfo->hostname);
 1549     if (ret) {
 1550         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
 1551                "Unable to add hostname of the peer");
 1552         goto out;
 1553     }
 1554 
 1555     if (priv->op_version >= GD_OP_VERSION_3_6_0) {
 1556         ret = glusterd_add_missed_snaps_to_export_dict(peer_data);
 1557         if (ret) {
 1558             gf_msg(this->name, GF_LOG_ERROR, 0,
 1559                    GD_MSG_MISSED_SNAP_LIST_STORE_FAIL,
 1560                    "Unable to add list of missed snapshots "
 1561                    "in the peer_data dict for handshake");
 1562             goto out;
 1563         }
 1564 
 1565         ret = glusterd_add_snapshots_to_export_dict(peer_data);
 1566         if (ret) {
 1567             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_LIST_SET_FAIL,
 1568                    "Unable to add list of snapshots "
 1569                    "in the peer_data dict for handshake");
 1570             goto out;
 1571         }
 1572     }
 1573 
 1574     /* Don't add any key-value in peer_data dictionary after call this function
 1575      */
 1576     ret = glusterd_add_volumes_to_export_dict(peer_data, &req.vols.vols_val,
 1577                                               &req.vols.vols_len);
 1578     if (ret) {
 1579         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 1580                "Unable to add list of volumes "
 1581                "in the peer_data dict for handshake");
 1582         goto out;
 1583     }
 1584 
 1585     if (!req.vols.vols_len) {
 1586         ret = dict_allocate_and_serialize(peer_data, &req.vols.vols_val,
 1587                                           &req.vols.vols_len);
 1588         if (ret)
 1589             goto out;
 1590     }
 1591 
 1592     ret = glusterd_submit_request(
 1593         peerinfo->rpc, &req, frame, peerinfo->peer, GLUSTERD_FRIEND_ADD, NULL,
 1594         this, glusterd_friend_add_cbk, (xdrproc_t)xdr_gd1_mgmt_friend_req);
 1595 
 1596 out:
 1597     GF_FREE(req.vols.vols_val);
 1598     GF_FREE(req.hostname);
 1599 
 1600     if (peer_data)
 1601         dict_unref(peer_data);
 1602 
 1603     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 1604     return ret;
 1605 }
 1606 
 1607 int32_t
 1608 glusterd_rpc_friend_remove(call_frame_t *frame, xlator_t *this, void *data)
 1609 {
 1610     gd1_mgmt_friend_req req = {
 1611         {0},
 1612     };
 1613     int ret = 0;
 1614     glusterd_peerinfo_t *peerinfo = NULL;
 1615     glusterd_conf_t *priv = NULL;
 1616     glusterd_friend_sm_event_t *event = NULL;
 1617 
 1618     if (!frame || !this || !data) {
 1619         ret = -1;
 1620         goto out;
 1621     }
 1622 
 1623     event = data;
 1624     priv = this->private;
 1625 
 1626     GF_ASSERT(priv);
 1627 
 1628     RCU_READ_LOCK;
 1629 
 1630     peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
 1631     if (!peerinfo) {
 1632         RCU_READ_UNLOCK;
 1633         ret = -1;
 1634         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
 1635                "Could not find peer %s(%s)", event->peername,
 1636                uuid_utoa(event->peerid));
 1637         goto out;
 1638     }
 1639 
 1640     gf_uuid_copy(req.uuid, MY_UUID);
 1641     req.hostname = gf_strdup(peerinfo->hostname);
 1642     req.port = peerinfo->port;
 1643 
 1644     ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->peer,
 1645                                   GLUSTERD_FRIEND_REMOVE, NULL, this,
 1646                                   glusterd_friend_remove_cbk,
 1647                                   (xdrproc_t)xdr_gd1_mgmt_friend_req);
 1648 
 1649     RCU_READ_UNLOCK;
 1650 out:
 1651     GF_FREE(req.hostname);
 1652 
 1653     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 1654     return ret;
 1655 }
 1656 
 1657 int32_t
 1658 glusterd_rpc_friend_update(call_frame_t *frame, xlator_t *this, void *data)
 1659 {
 1660     gd1_mgmt_friend_update req = {
 1661         {0},
 1662     };
 1663     int ret = 0;
 1664     glusterd_conf_t *priv = NULL;
 1665     dict_t *friends = NULL;
 1666     call_frame_t *dummy_frame = NULL;
 1667     glusterd_peerinfo_t *peerinfo = NULL;
 1668 
 1669     priv = this->private;
 1670     GF_ASSERT(priv);
 1671 
 1672     friends = data;
 1673     if (!friends)
 1674         goto out;
 1675 
 1676     ret = dict_get_ptr(friends, "peerinfo", VOID(&peerinfo));
 1677     if (ret)
 1678         goto out;
 1679     /* Don't want to send the pointer over */
 1680     dict_deln(friends, "peerinfo", SLEN("peerinfo"));
 1681 
 1682     ret = dict_allocate_and_serialize(friends, &req.friends.friends_val,
 1683                                       &req.friends.friends_len);
 1684     if (ret)
 1685         goto out;
 1686 
 1687     gf_uuid_copy(req.uuid, MY_UUID);
 1688 
 1689     dummy_frame = create_frame(this, this->ctx->pool);
 1690     ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
 1691                                   peerinfo->peer, GLUSTERD_FRIEND_UPDATE, NULL,
 1692                                   this, glusterd_friend_update_cbk,
 1693                                   (xdrproc_t)xdr_gd1_mgmt_friend_update);
 1694 
 1695 out:
 1696     GF_FREE(req.friends.friends_val);
 1697 
 1698     if (ret && dummy_frame)
 1699         STACK_DESTROY(dummy_frame->root);
 1700 
 1701     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 1702     return ret;
 1703 }
 1704 
 1705 int32_t
 1706 glusterd_cluster_lock(call_frame_t *frame, xlator_t *this, void *data)
 1707 {
 1708     gd1_mgmt_cluster_lock_req req = {
 1709         {0},
 1710     };
 1711     int ret = -1;
 1712     glusterd_peerinfo_t *peerinfo = NULL;
 1713     glusterd_conf_t *priv = NULL;
 1714     call_frame_t *dummy_frame = NULL;
 1715 
 1716     if (!this)
 1717         goto out;
 1718 
 1719     peerinfo = data;
 1720 
 1721     priv = this->private;
 1722     GF_ASSERT(priv);
 1723 
 1724     glusterd_get_uuid(&req.uuid);
 1725 
 1726     dummy_frame = create_frame(this, this->ctx->pool);
 1727     if (!dummy_frame)
 1728         goto out;
 1729 
 1730     ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
 1731                                   peerinfo->mgmt, GLUSTERD_MGMT_CLUSTER_LOCK,
 1732                                   NULL, this, glusterd_cluster_lock_cbk,
 1733                                   (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
 1734 out:
 1735     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 1736 
 1737     if (ret && dummy_frame)
 1738         STACK_DESTROY(dummy_frame->root);
 1739     return ret;
 1740 }
 1741 
 1742 int32_t
 1743 glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data)
 1744 {
 1745     gd1_mgmt_v3_lock_req req = {
 1746         {0},
 1747     };
 1748     int ret = -1;
 1749     glusterd_peerinfo_t *peerinfo = NULL;
 1750     glusterd_conf_t *priv = NULL;
 1751     dict_t *dict = NULL;
 1752     uuid_t *txn_id = NULL;
 1753 
 1754     if (!this)
 1755         goto out;
 1756 
 1757     dict = data;
 1758 
 1759     priv = this->private;
 1760     GF_ASSERT(priv);
 1761 
 1762     ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
 1763     if (ret)
 1764         goto out;
 1765 
 1766     // peerinfo should not be in payload
 1767     dict_deln(dict, "peerinfo", SLEN("peerinfo"));
 1768 
 1769     glusterd_get_uuid(&req.uuid);
 1770 
 1771     ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
 1772                                       &req.dict.dict_len);
 1773     if (ret) {
 1774         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
 1775                "Failed to serialize dict "
 1776                "to request buffer");
 1777         goto out;
 1778     }
 1779 
 1780     /* Sending valid transaction ID to peers */
 1781     ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
 1782     if (ret) {
 1783         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
 1784                "Failed to get transaction id.");
 1785         goto out;
 1786     } else {
 1787         gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
 1788         gf_uuid_copy(req.txn_id, *txn_id);
 1789     }
 1790 
 1791     if (!frame)
 1792         frame = create_frame(this, this->ctx->pool);
 1793 
 1794     if (!frame) {
 1795         ret = -1;
 1796         goto out;
 1797     }
 1798     frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
 1799     if (!frame->cookie) {
 1800         ret = -1;
 1801         goto out;
 1802     }
 1803     gf_uuid_copy(frame->cookie, req.txn_id);
 1804 
 1805     ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt_v3,
 1806                                   GLUSTERD_MGMT_V3_LOCK, NULL, this,
 1807                                   glusterd_mgmt_v3_lock_peers_cbk,
 1808                                   (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
 1809 out:
 1810     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 1811     if (dict)
 1812         dict_unref(dict);
 1813     if (req.dict.dict_val)
 1814         GF_FREE(req.dict.dict_val);
 1815     return ret;
 1816 }
 1817 
 1818 int32_t
 1819 glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data)
 1820 {
 1821     gd1_mgmt_v3_unlock_req req = {
 1822         {0},
 1823     };
 1824     int ret = -1;
 1825     glusterd_peerinfo_t *peerinfo = NULL;
 1826     glusterd_conf_t *priv = NULL;
 1827     dict_t *dict = NULL;
 1828     uuid_t *txn_id = NULL;
 1829 
 1830     if (!this)
 1831         goto out;
 1832 
 1833     dict = data;
 1834 
 1835     priv = this->private;
 1836     GF_ASSERT(priv);
 1837 
 1838     ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
 1839     if (ret)
 1840         goto out;
 1841 
 1842     // peerinfo should not be in payload
 1843     dict_deln(dict, "peerinfo", SLEN("peerinfo"));
 1844 
 1845     glusterd_get_uuid(&req.uuid);
 1846 
 1847     ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
 1848                                       &req.dict.dict_len);
 1849     if (ret) {
 1850         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
 1851                "Failed to serialize dict "
 1852                "to request buffer");
 1853         goto out;
 1854     }
 1855 
 1856     /* Sending valid transaction ID to peers */
 1857     ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
 1858     if (ret) {
 1859         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
 1860                "Failed to get transaction id.");
 1861         goto out;
 1862     } else {
 1863         gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
 1864         gf_uuid_copy(req.txn_id, *txn_id);
 1865     }
 1866 
 1867     if (!frame)
 1868         frame = create_frame(this, this->ctx->pool);
 1869 
 1870     if (!frame) {
 1871         ret = -1;
 1872         goto out;
 1873     }
 1874     frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
 1875     if (!frame->cookie) {
 1876         ret = -1;
 1877         goto out;
 1878     }
 1879     gf_uuid_copy(frame->cookie, req.txn_id);
 1880 
 1881     ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt_v3,
 1882                                   GLUSTERD_MGMT_V3_UNLOCK, NULL, this,
 1883                                   glusterd_mgmt_v3_unlock_peers_cbk,
 1884                                   (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
 1885 out:
 1886     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 1887     if (dict)
 1888         dict_unref(dict);
 1889 
 1890     if (req.dict.dict_val)
 1891         GF_FREE(req.dict.dict_val);
 1892     return ret;
 1893 }
 1894 
 1895 int32_t
 1896 glusterd_cluster_unlock(call_frame_t *frame, xlator_t *this, void *data)
 1897 {
 1898     gd1_mgmt_cluster_lock_req req = {
 1899         {0},
 1900     };
 1901     int ret = -1;
 1902     glusterd_peerinfo_t *peerinfo = NULL;
 1903     glusterd_conf_t *priv = NULL;
 1904     call_frame_t *dummy_frame = NULL;
 1905 
 1906     if (!this) {
 1907         ret = -1;
 1908         goto out;
 1909     }
 1910     peerinfo = data;
 1911     priv = this->private;
 1912     GF_ASSERT(priv);
 1913 
 1914     glusterd_get_uuid(&req.uuid);
 1915 
 1916     dummy_frame = create_frame(this, this->ctx->pool);
 1917     if (!dummy_frame)
 1918         goto out;
 1919 
 1920     ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
 1921                                   peerinfo->mgmt, GLUSTERD_MGMT_CLUSTER_UNLOCK,
 1922                                   NULL, this, glusterd_cluster_unlock_cbk,
 1923                                   (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
 1924 out:
 1925     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 1926 
 1927     if (ret && dummy_frame)
 1928         STACK_DESTROY(dummy_frame->root);
 1929 
 1930     return ret;
 1931 }
 1932 
 1933 int32_t
 1934 glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data)
 1935 {
 1936     gd1_mgmt_stage_op_req req = {
 1937         {
 1938             0,
 1939         },
 1940     };
 1941     int ret = -1;
 1942     glusterd_peerinfo_t *peerinfo = NULL;
 1943     glusterd_conf_t *priv = NULL;
 1944     dict_t *dict = NULL;
 1945     uuid_t *txn_id = NULL;
 1946 
 1947     if (!this) {
 1948         goto out;
 1949     }
 1950 
 1951     dict = data;
 1952 
 1953     priv = this->private;
 1954     GF_ASSERT(priv);
 1955 
 1956     ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
 1957     if (ret)
 1958         goto out;
 1959 
 1960     // peerinfo should not be in payload
 1961     dict_deln(dict, "peerinfo", SLEN("peerinfo"));
 1962 
 1963     glusterd_get_uuid(&req.uuid);
 1964     req.op = glusterd_op_get_op();
 1965 
 1966     ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len);
 1967     if (ret) {
 1968         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
 1969                "Failed to serialize dict "
 1970                "to request buffer");
 1971         goto out;
 1972     }
 1973     /* Sending valid transaction ID to peers */
 1974     ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
 1975     if (ret) {
 1976         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
 1977                "Failed to get transaction id.");
 1978         goto out;
 1979     } else {
 1980         gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
 1981     }
 1982 
 1983     if (!frame)
 1984         frame = create_frame(this, this->ctx->pool);
 1985 
 1986     if (!frame) {
 1987         ret = -1;
 1988         goto out;
 1989     }
 1990     frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
 1991     if (!frame->cookie) {
 1992         ret = -1;
 1993         goto out;
 1994     }
 1995     gf_uuid_copy(frame->cookie, *txn_id);
 1996 
 1997     ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt,
 1998                                   GLUSTERD_MGMT_STAGE_OP, NULL, this,
 1999                                   glusterd_stage_op_cbk,
 2000                                   (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
 2001 
 2002 out:
 2003     if (req.buf.buf_val)
 2004         GF_FREE(req.buf.buf_val);
 2005 
 2006     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 2007     return ret;
 2008 }
 2009 
 2010 int32_t
 2011 glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data)
 2012 {
 2013     gd1_mgmt_commit_op_req req = {
 2014         {
 2015             0,
 2016         },
 2017     };
 2018     int ret = -1;
 2019     glusterd_peerinfo_t *peerinfo = NULL;
 2020     glusterd_conf_t *priv = NULL;
 2021     dict_t *dict = NULL;
 2022     uuid_t *txn_id = NULL;
 2023 
 2024     if (!this) {
 2025         goto out;
 2026     }
 2027 
 2028     dict = data;
 2029     priv = this->private;
 2030     GF_ASSERT(priv);
 2031 
 2032     ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
 2033     if (ret)
 2034         goto out;
 2035 
 2036     // peerinfo should not be in payload
 2037     dict_deln(dict, "peerinfo", SLEN("peerinfo"));
 2038 
 2039     glusterd_get_uuid(&req.uuid);
 2040     req.op = glusterd_op_get_op();
 2041 
 2042     ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len);
 2043     if (ret) {
 2044         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
 2045                "Failed to serialize dict to "
 2046                "request buffer");
 2047         goto out;
 2048     }
 2049     /* Sending valid transaction ID to peers */
 2050     ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
 2051     if (ret) {
 2052         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
 2053                "Failed to get transaction id.");
 2054         goto out;
 2055     } else {
 2056         gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
 2057     }
 2058 
 2059     if (!frame)
 2060         frame = create_frame(this, this->ctx->pool);
 2061 
 2062     if (!frame) {
 2063         ret = -1;
 2064         goto out;
 2065     }
 2066     frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
 2067     if (!frame->cookie) {
 2068         ret = -1;
 2069         goto out;
 2070     }
 2071     gf_uuid_copy(frame->cookie, *txn_id);
 2072 
 2073     ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt,
 2074                                   GLUSTERD_MGMT_COMMIT_OP, NULL, this,
 2075                                   glusterd_commit_op_cbk,
 2076                                   (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
 2077 
 2078 out:
 2079     if (req.buf.buf_val)
 2080         GF_FREE(req.buf.buf_val);
 2081 
 2082     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 2083     return ret;
 2084 }
 2085 
 2086 int32_t
 2087 __glusterd_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
 2088                         void *myframe)
 2089 {
 2090     gd1_mgmt_brick_op_rsp rsp = {0};
 2091     int ret = -1;
 2092     int32_t op_ret = -1;
 2093     glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
 2094     call_frame_t *frame = NULL;
 2095     glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
 2096     dict_t *dict = NULL;
 2097     int index = 0;
 2098     glusterd_req_ctx_t *req_ctx = NULL;
 2099     glusterd_pending_node_t *node = NULL;
 2100     xlator_t *this = NULL;
 2101     uuid_t *txn_id = NULL;
 2102     glusterd_conf_t *priv = NULL;
 2103 
 2104     this = THIS;
 2105     GF_ASSERT(this);
 2106     priv = this->private;
 2107     GF_ASSERT(priv);
 2108     GF_ASSERT(req);
 2109 
 2110     txn_id = &priv->global_txn_id;
 2111     frame = myframe;
 2112     req_ctx = frame->local;
 2113 
 2114     if (-1 == req->rpc_status) {
 2115         rsp.op_ret = -1;
 2116         rsp.op_errno = EINVAL;
 2117         /* use standard allocation because to keep uniformity
 2118            in freeing it */
 2119         rsp.op_errstr = strdup("error");
 2120         event_type = GD_OP_EVENT_RCVD_RJT;
 2121         goto out;
 2122     }
 2123 
 2124     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
 2125     if (ret < 0) {
 2126         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
 2127                "Failed to decode brick op "
 2128                "response received");
 2129         rsp.op_ret = -1;
 2130         rsp.op_errno = EINVAL;
 2131         rsp.op_errstr = strdup("Unable to decode brick op response");
 2132         event_type = GD_OP_EVENT_RCVD_RJT;
 2133         goto out;
 2134     }
 2135 
 2136     if (rsp.output.output_len) {
 2137         /* Unserialize the dictionary */
 2138         dict = dict_new();
 2139 
 2140         ret = dict_unserialize(rsp.output.output_val, rsp.output.output_len,
 2141                                &dict);
 2142         if (ret < 0) {
 2143             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
 2144                    "Failed to "
 2145                    "unserialize rsp-buffer to dictionary");
 2146             event_type = GD_OP_EVENT_RCVD_RJT;
 2147             goto out;
 2148         } else {
 2149             dict->extra_stdfree = rsp.output.output_val;
 2150         }
 2151     }
 2152 
 2153     op_ret = rsp.op_ret;
 2154 
 2155     /* Add index to rsp_dict for GD_OP_STATUS_VOLUME */
 2156     if (GD_OP_STATUS_VOLUME == req_ctx->op) {
 2157         node = frame->cookie;
 2158         index = node->index;
 2159         ret = dict_set_int32n(dict, "index", SLEN("index"), index);
 2160         if (ret) {
 2161             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2162                    "Error setting index on brick status rsp dict");
 2163             rsp.op_ret = -1;
 2164             event_type = GD_OP_EVENT_RCVD_RJT;
 2165             goto out;
 2166         }
 2167     }
 2168 out:
 2169 
 2170     if (req_ctx && req_ctx->dict) {
 2171         ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
 2172         gf_msg_debug(this->name, -ret, "transaction ID = %s",
 2173                      uuid_utoa(*txn_id));
 2174     }
 2175 
 2176     ev_ctx = GF_CALLOC(1, sizeof(*ev_ctx), gf_gld_mt_brick_rsp_ctx_t);
 2177     if (ev_ctx) {
 2178         if (op_ret) {
 2179             event_type = GD_OP_EVENT_RCVD_RJT;
 2180             ev_ctx->op_ret = op_ret;
 2181             ev_ctx->op_errstr = gf_strdup(rsp.op_errstr);
 2182         } else {
 2183             event_type = GD_OP_EVENT_RCVD_ACC;
 2184         }
 2185         ev_ctx->pending_node = frame->cookie;
 2186         ev_ctx->rsp_dict = dict;
 2187         ev_ctx->commit_ctx = frame->local;
 2188         ret = glusterd_op_sm_inject_event(event_type, txn_id, ev_ctx);
 2189     }
 2190     if (!ret) {
 2191         glusterd_friend_sm();
 2192         glusterd_op_sm();
 2193     }
 2194 
 2195     if (ret) {
 2196         if (dict) {
 2197             dict_unref(dict);
 2198         }
 2199         if (ev_ctx) {
 2200             GF_FREE(ev_ctx->op_errstr);
 2201             GF_FREE(ev_ctx);
 2202         }
 2203     }
 2204     free(rsp.op_errstr);  // malloced by xdr
 2205     GLUSTERD_STACK_DESTROY(frame);
 2206     return ret;
 2207 }
 2208 
 2209 int32_t
 2210 glusterd_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
 2211                       void *myframe)
 2212 {
 2213     return glusterd_big_locked_cbk(req, iov, count, myframe,
 2214                                    __glusterd_brick_op_cbk);
 2215 }
 2216 
 2217 int32_t
 2218 glusterd_brick_op(call_frame_t *frame, xlator_t *this, void *data)
 2219 {
 2220     gd1_mgmt_brick_op_req *req = NULL;
 2221     int ret = 0;
 2222     int ret1 = 0;
 2223     glusterd_conf_t *priv = NULL;
 2224     call_frame_t *dummy_frame = NULL;
 2225     char *op_errstr = NULL;
 2226     int pending_bricks = 0;
 2227     glusterd_pending_node_t *pending_node;
 2228     glusterd_req_ctx_t *req_ctx = NULL;
 2229     struct rpc_clnt *rpc = NULL;
 2230     dict_t *op_ctx = NULL;
 2231     uuid_t *txn_id = NULL;
 2232 
 2233     if (!this) {
 2234         ret = -1;
 2235         goto out;
 2236     }
 2237     priv = this->private;
 2238     GF_ASSERT(priv);
 2239 
 2240     txn_id = &priv->global_txn_id;
 2241 
 2242     req_ctx = data;
 2243     GF_ASSERT(req_ctx);
 2244     CDS_INIT_LIST_HEAD(&opinfo.pending_bricks);
 2245 
 2246     ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
 2247     if (ret) {
 2248         gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_BRICK_SELECT_FAIL,
 2249                "Could not get transaction ID from dict, global"
 2250                "transaction ID = %s",
 2251                uuid_utoa(*txn_id));
 2252     } else {
 2253         gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
 2254     }
 2255     ret = glusterd_op_bricks_select(req_ctx->op, req_ctx->dict, &op_errstr,
 2256                                     &opinfo.pending_bricks, NULL);
 2257 
 2258     if (ret) {
 2259         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_SELECT_FAIL,
 2260                "Failed to select bricks "
 2261                "while performing brick op during 'Volume %s'",
 2262                gd_op_list[opinfo.op]);
 2263         opinfo.op_errstr = op_errstr;
 2264         goto out;
 2265     }
 2266 
 2267     cds_list_for_each_entry(pending_node, &opinfo.pending_bricks, list)
 2268     {
 2269         dummy_frame = create_frame(this, this->ctx->pool);
 2270         if (!dummy_frame)
 2271             continue;
 2272 
 2273         if ((pending_node->type == GD_NODE_NFS) ||
 2274             (pending_node->type == GD_NODE_QUOTAD) ||
 2275             (pending_node->type == GD_NODE_SNAPD) ||
 2276             (pending_node->type == GD_NODE_SCRUB) ||
 2277             ((pending_node->type == GD_NODE_SHD) &&
 2278              (req_ctx->op == GD_OP_STATUS_VOLUME))) {
 2279             ret = glusterd_node_op_build_payload(
 2280                 req_ctx->op, (gd1_mgmt_brick_op_req **)&req, req_ctx->dict);
 2281         } else {
 2282             ret = glusterd_brick_op_build_payload(
 2283                 req_ctx->op, pending_node->node, (gd1_mgmt_brick_op_req **)&req,
 2284                 req_ctx->dict);
 2285         }
 2286         if (ret || !req) {
 2287             gf_msg(this->name, GF_LOG_ERROR, 0,
 2288                    GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
 2289                    "Failed to "
 2290                    "build op payload during "
 2291                    "'Volume %s'",
 2292                    gd_op_list[req_ctx->op]);
 2293             goto out;
 2294         }
 2295 
 2296         dummy_frame->local = data;
 2297         dummy_frame->cookie = pending_node;
 2298 
 2299         rpc = glusterd_pending_node_get_rpc(pending_node);
 2300         if (!rpc) {
 2301             if (pending_node->type == GD_NODE_REBALANCE) {
 2302                 opinfo.brick_pending_count = 0;
 2303                 ret = 0;
 2304                 GF_FREE(req->input.input_val);
 2305                 GF_FREE(req);
 2306                 req = NULL;
 2307                 GLUSTERD_STACK_DESTROY(dummy_frame);
 2308 
 2309                 op_ctx = glusterd_op_get_ctx();
 2310                 if (!op_ctx)
 2311                     goto out;
 2312                 glusterd_defrag_volume_node_rsp(req_ctx->dict, NULL, op_ctx);
 2313 
 2314                 goto out;
 2315             }
 2316 
 2317             ret = -1;
 2318             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
 2319                    "Brick Op failed "
 2320                    "due to rpc failure.");
 2321             goto out;
 2322         }
 2323 
 2324         ret = glusterd_submit_request(
 2325             rpc, req, dummy_frame, priv->gfs_mgmt, req->op, NULL, this,
 2326             glusterd_brick_op_cbk, (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
 2327         GF_FREE(req->input.input_val);
 2328         GF_FREE(req);
 2329         req = NULL;
 2330 
 2331         if (!ret)
 2332             pending_bricks++;
 2333 
 2334         glusterd_pending_node_put_rpc(pending_node);
 2335     }
 2336 
 2337     gf_msg_trace(this->name, 0,
 2338                  "Sent brick op req for operation "
 2339                  "'Volume %s' to %d bricks",
 2340                  gd_op_list[req_ctx->op], pending_bricks);
 2341     opinfo.brick_pending_count = pending_bricks;
 2342 
 2343 out:
 2344 
 2345     if (ret)
 2346         opinfo.op_ret = ret;
 2347 
 2348     ret1 = glusterd_set_txn_opinfo(txn_id, &opinfo);
 2349     if (ret1)
 2350         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
 2351                "Unable to set "
 2352                "transaction's opinfo");
 2353 
 2354     if (ret) {
 2355         glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, txn_id, data);
 2356         opinfo.op_ret = ret;
 2357     }
 2358 
 2359     gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
 2360     return ret;
 2361 }
 2362 
 2363 struct rpc_clnt_procedure gd_brick_actors[GLUSTERD_BRICK_MAXVALUE] = {
 2364     [GLUSTERD_BRICK_NULL] = {"NULL", NULL},
 2365     [GLUSTERD_BRICK_OP] = {"BRICK_OP", glusterd_brick_op},
 2366 };
 2367 
 2368 struct rpc_clnt_procedure gd_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
 2369     [GLUSTERD_FRIEND_NULL] = {"NULL", NULL},
 2370     [GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", glusterd_rpc_probe},
 2371     [GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", glusterd_rpc_friend_add},
 2372     [GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE", glusterd_rpc_friend_remove},
 2373     [GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", glusterd_rpc_friend_update},
 2374 };
 2375 
 2376 struct rpc_clnt_procedure gd_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
 2377     [GLUSTERD_MGMT_NULL] = {"NULL", NULL},
 2378     [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", glusterd_cluster_lock},
 2379     [GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK",
 2380                                       glusterd_cluster_unlock},
 2381     [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd_stage_op},
 2382     [GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd_commit_op},
 2383 };
 2384 
 2385 struct rpc_clnt_procedure gd_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
 2386     [GLUSTERD_MGMT_V3_NULL] = {"NULL", NULL},
 2387     [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", glusterd_mgmt_v3_lock_peers},
 2388     [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK",
 2389                                  glusterd_mgmt_v3_unlock_peers},
 2390 };
 2391 
 2392 struct rpc_clnt_program gd_mgmt_prog = {
 2393     .progname = "glusterd mgmt",
 2394     .prognum = GD_MGMT_PROGRAM,
 2395     .progver = GD_MGMT_VERSION,
 2396     .proctable = gd_mgmt_actors,
 2397     .numproc = GLUSTERD_MGMT_MAXVALUE,
 2398 };
 2399 
 2400 struct rpc_clnt_program gd_brick_prog = {
 2401     .progname = "brick operations",
 2402     .prognum = GD_BRICK_PROGRAM,
 2403     .progver = GD_BRICK_VERSION,
 2404     .proctable = gd_brick_actors,
 2405     .numproc = GLUSTERD_BRICK_MAXVALUE,
 2406 };
 2407 
 2408 struct rpc_clnt_program gd_peer_prog = {
 2409     .progname = "Peer mgmt",
 2410     .prognum = GD_FRIEND_PROGRAM,
 2411     .progver = GD_FRIEND_VERSION,
 2412     .proctable = gd_peer_actors,
 2413     .numproc = GLUSTERD_FRIEND_MAXVALUE,
 2414 };
 2415 
 2416 struct rpc_clnt_program gd_mgmt_v3_prog = {
 2417     .progname = "glusterd mgmt v3",
 2418     .prognum = GD_MGMT_PROGRAM,
 2419     .progver = GD_MGMT_V3_VERSION,
 2420     .proctable = gd_mgmt_v3_actors,
 2421     .numproc = GLUSTERD_MGMT_V3_MAXVALUE,
 2422 };