"Fossies" - the Fresh Open Source Software Archive

Member "glusterfs-8.2/xlators/mgmt/glusterd/src/glusterd-mgmt.c" (16 Sep 2020, 80120 Bytes) of package /linux/misc/glusterfs-8.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "glusterd-mgmt.c" see the Fossies "Dox" file reference documentation.

    1 /*
    2    Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
    3    This file is part of GlusterFS.
    4 
    5    This file is licensed to you under your choice of the GNU Lesser
    6    General Public License, version 3 or any later version (LGPLv3 or
    7    later), or the GNU General Public License, version 2 (GPLv2), in all
    8    cases as published by the Free Software Foundation.
    9 */
   10 /* rpc related syncops */
   11 #include "rpc-clnt.h"
   12 #include "protocol-common.h"
   13 #include "xdr-generic.h"
   14 #include "glusterd1-xdr.h"
   15 #include "glusterd-syncop.h"
   16 
   17 #include "glusterd.h"
   18 #include "glusterd-utils.h"
   19 #include "glusterd-locks.h"
   20 #include "glusterd-mgmt.h"
   21 #include "glusterd-op-sm.h"
   22 #include "glusterd-server-quorum.h"
   23 #include "glusterd-volgen.h"
   24 #include "glusterd-store.h"
   25 #include "glusterd-snapshot-utils.h"
   26 #include "glusterd-messages.h"
   27 #include "glusterd-errno.h"
   28 #include "glusterd-hooks.h"
   29 
   30 extern struct rpc_clnt_program gd_mgmt_v3_prog;
   31 
   32 void
   33 gd_mgmt_v3_collate_errors(struct syncargs *args, int op_ret, int op_errno,
   34                           char *op_errstr, int op_code, uuid_t peerid,
   35                           u_char *uuid)
   36 {
   37     char *peer_str = NULL;
   38     char err_str[PATH_MAX] = "Please check log file for details.";
   39     char op_err[PATH_MAX] = "";
   40     xlator_t *this = NULL;
   41     int is_operrstr_blk = 0;
   42     char *err_string = NULL;
   43     glusterd_peerinfo_t *peerinfo = NULL;
   44     int32_t len = 0;
   45 
   46     this = THIS;
   47     GF_ASSERT(this);
   48     GF_ASSERT(args);
   49     GF_ASSERT(uuid);
   50 
   51     if (op_ret) {
   52         args->op_ret = op_ret;
   53         args->op_errno = op_errno;
   54 
   55         RCU_READ_LOCK;
   56         peerinfo = glusterd_peerinfo_find(peerid, NULL);
   57         if (peerinfo)
   58             peer_str = gf_strdup(peerinfo->hostname);
   59         else
   60             peer_str = gf_strdup(uuid_utoa(uuid));
   61 
   62         RCU_READ_UNLOCK;
   63 
   64         is_operrstr_blk = (op_errstr && strcmp(op_errstr, ""));
   65         err_string = (is_operrstr_blk) ? op_errstr : err_str;
   66 
   67         switch (op_code) {
   68             case GLUSTERD_MGMT_V3_LOCK: {
   69                 snprintf(op_err, sizeof(op_err), "Locking failed on %s. %s",
   70                          peer_str, err_string);
   71                 break;
   72             }
   73             case GLUSTERD_MGMT_V3_PRE_VALIDATE: {
   74                 snprintf(op_err, sizeof(op_err),
   75                          "Pre Validation failed on %s. %s", peer_str,
   76                          err_string);
   77                 break;
   78             }
   79             case GLUSTERD_MGMT_V3_BRICK_OP: {
   80                 snprintf(op_err, sizeof(op_err), "Brick ops failed on %s. %s",
   81                          peer_str, err_string);
   82                 break;
   83             }
   84             case GLUSTERD_MGMT_V3_COMMIT: {
   85                 snprintf(op_err, sizeof(op_err), "Commit failed on %s. %s",
   86                          peer_str, err_string);
   87                 break;
   88             }
   89             case GLUSTERD_MGMT_V3_POST_VALIDATE: {
   90                 snprintf(op_err, sizeof(op_err),
   91                          "Post Validation failed on %s. %s", peer_str,
   92                          err_string);
   93                 break;
   94             }
   95             case GLUSTERD_MGMT_V3_UNLOCK: {
   96                 snprintf(op_err, sizeof(op_err), "Unlocking failed on %s. %s",
   97                          peer_str, err_string);
   98                 break;
   99             }
  100             default:
  101                 snprintf(op_err, sizeof(op_err), "Unknown error! on %s. %s",
  102                          peer_str, err_string);
  103         }
  104 
  105         if (args->errstr) {
  106             len = snprintf(err_str, sizeof(err_str), "%s\n%s", args->errstr,
  107                            op_err);
  108             if (len < 0) {
  109                 strcpy(err_str, "<error>");
  110             }
  111             GF_FREE(args->errstr);
  112             args->errstr = NULL;
  113         } else
  114             snprintf(err_str, sizeof(err_str), "%s", op_err);
  115 
  116         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_FAIL, "%s",
  117                op_err);
  118         args->errstr = gf_strdup(err_str);
  119     }
  120 
  121     GF_FREE(peer_str);
  122 
  123     return;
  124 }
  125 
  126 int32_t
  127 gd_mgmt_v3_pre_validate_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
  128                            dict_t *rsp_dict, uint32_t *op_errno)
  129 {
  130     int32_t ret = -1;
  131     xlator_t *this = NULL;
  132 
  133     this = THIS;
  134     GF_ASSERT(this);
  135     GF_ASSERT(dict);
  136     GF_ASSERT(op_errstr);
  137     GF_ASSERT(rsp_dict);
  138     GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
  139 
  140     switch (op) {
  141         case GD_OP_SNAP:
  142             ret = glusterd_snapshot_prevalidate(dict, op_errstr, rsp_dict,
  143                                                 op_errno);
  144 
  145             if (ret) {
  146                 gf_msg(this->name, GF_LOG_WARNING, 0,
  147                        GD_MSG_PRE_VALIDATION_FAIL,
  148                        "Snapshot Prevalidate Failed");
  149                 goto out;
  150             }
  151 
  152             break;
  153 
  154         case GD_OP_REPLACE_BRICK:
  155             ret = glusterd_op_stage_replace_brick(dict, op_errstr, rsp_dict);
  156             if (ret) {
  157                 gf_msg(this->name, GF_LOG_WARNING, 0,
  158                        GD_MSG_PRE_VALIDATION_FAIL,
  159                        "Replace-brick prevalidation failed.");
  160                 goto out;
  161             }
  162             break;
  163         case GD_OP_ADD_BRICK:
  164             ret = glusterd_op_stage_add_brick(dict, op_errstr, rsp_dict);
  165             if (ret) {
  166                 gf_msg(this->name, GF_LOG_WARNING, 0,
  167                        GD_MSG_PRE_VALIDATION_FAIL,
  168                        "ADD-brick prevalidation failed.");
  169                 goto out;
  170             }
  171             break;
  172         case GD_OP_START_VOLUME:
  173             ret = glusterd_op_stage_start_volume(dict, op_errstr, rsp_dict);
  174             if (ret) {
  175                 gf_msg(this->name, GF_LOG_WARNING, 0,
  176                        GD_MSG_PRE_VALIDATION_FAIL,
  177                        "Volume start prevalidation failed.");
  178                 goto out;
  179             }
  180             break;
  181         case GD_OP_STOP_VOLUME:
  182             ret = glusterd_op_stage_stop_volume(dict, op_errstr);
  183             if (ret) {
  184                 gf_msg(this->name, GF_LOG_WARNING, 0,
  185                        GD_MSG_PRE_VALIDATION_FAIL,
  186                        "Volume stop prevalidation failed.");
  187                 goto out;
  188             }
  189             break;
  190         case GD_OP_RESET_BRICK:
  191             ret = glusterd_reset_brick_prevalidate(dict, op_errstr, rsp_dict);
  192             if (ret) {
  193                 gf_msg(this->name, GF_LOG_WARNING, 0,
  194                        GD_MSG_PRE_VALIDATION_FAIL,
  195                        "Reset brick prevalidation failed.");
  196                 goto out;
  197             }
  198             break;
  199 
  200         case GD_OP_PROFILE_VOLUME:
  201             ret = glusterd_op_stage_stats_volume(dict, op_errstr);
  202             if (ret) {
  203                 gf_msg(this->name, GF_LOG_WARNING, 0,
  204                        GD_MSG_PRE_VALIDATION_FAIL,
  205                        "prevalidation failed for profile operation.");
  206                 goto out;
  207             }
  208             break;
  209         case GD_OP_REBALANCE:
  210         case GD_OP_DEFRAG_BRICK_VOLUME:
  211             ret = glusterd_mgmt_v3_op_stage_rebalance(dict, op_errstr);
  212             if (ret) {
  213                 gf_log(this->name, GF_LOG_WARNING,
  214                        "Rebalance Prevalidate Failed");
  215                 goto out;
  216             }
  217             break;
  218 
  219         case GD_OP_MAX_OPVERSION:
  220             ret = 0;
  221             break;
  222 
  223         default:
  224             break;
  225     }
  226 
  227     ret = 0;
  228 out:
  229     gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
  230     return ret;
  231 }
  232 
  233 int32_t
  234 gd_mgmt_v3_brick_op_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
  235                        dict_t *rsp_dict)
  236 {
  237     int32_t ret = -1;
  238     xlator_t *this = NULL;
  239 
  240     this = THIS;
  241     GF_ASSERT(this);
  242     GF_ASSERT(dict);
  243     GF_ASSERT(op_errstr);
  244     GF_ASSERT(rsp_dict);
  245 
  246     switch (op) {
  247         case GD_OP_SNAP: {
  248             ret = glusterd_snapshot_brickop(dict, op_errstr, rsp_dict);
  249             if (ret) {
  250                 gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_BRICK_OP_FAIL,
  251                        "snapshot brickop failed");
  252                 goto out;
  253             }
  254             break;
  255         }
  256         case GD_OP_PROFILE_VOLUME:
  257         case GD_OP_REBALANCE:
  258         case GD_OP_DEFRAG_BRICK_VOLUME: {
  259             ret = gd_brick_op_phase(op, rsp_dict, dict, op_errstr);
  260             if (ret) {
  261                 gf_log(this->name, GF_LOG_WARNING,
  262                        "%s brickop "
  263                        "failed",
  264                        gd_op_list[op]);
  265                 goto out;
  266             }
  267             break;
  268         }
  269         default:
  270             break;
  271     }
  272 
  273     ret = 0;
  274 out:
  275     gf_msg_trace(this->name, 0, "OP = %d. Returning %d", op, ret);
  276     return ret;
  277 }
  278 
  279 int32_t
  280 gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
  281                      uint32_t *op_errno, dict_t *rsp_dict)
  282 {
  283     int32_t ret = -1;
  284     xlator_t *this = NULL;
  285 
  286     this = THIS;
  287     GF_ASSERT(this);
  288     GF_ASSERT(dict);
  289     GF_ASSERT(op_errstr);
  290     GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
  291     GF_ASSERT(rsp_dict);
  292 
  293     glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_PRE);
  294     switch (op) {
  295         case GD_OP_SNAP: {
  296             ret = glusterd_snapshot(dict, op_errstr, op_errno, rsp_dict);
  297             if (ret) {
  298                 gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_COMMIT_OP_FAIL,
  299                        "Snapshot Commit Failed");
  300                 goto out;
  301             }
  302             break;
  303         }
  304         case GD_OP_REPLACE_BRICK: {
  305             ret = glusterd_op_replace_brick(dict, rsp_dict);
  306             if (ret) {
  307                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
  308                        "Replace-brick commit failed.");
  309                 goto out;
  310             }
  311             break;
  312         }
  313         case GD_OP_ADD_BRICK: {
  314             ret = glusterd_op_add_brick(dict, op_errstr);
  315             if (ret) {
  316                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
  317                        "Add-brick commit failed.");
  318                 goto out;
  319             }
  320             break;
  321         }
  322         case GD_OP_START_VOLUME: {
  323             ret = glusterd_op_start_volume(dict, op_errstr);
  324             if (ret) {
  325                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
  326                        "Volume start commit failed.");
  327                 goto out;
  328             }
  329             break;
  330         }
  331         case GD_OP_STOP_VOLUME: {
  332             ret = glusterd_op_stop_volume(dict);
  333             if (ret) {
  334                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
  335                        "Volume stop commit failed.");
  336                 goto out;
  337             }
  338             break;
  339         }
  340         case GD_OP_RESET_BRICK: {
  341             ret = glusterd_op_reset_brick(dict, rsp_dict);
  342             if (ret) {
  343                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
  344                        "Reset-brick commit failed.");
  345                 goto out;
  346             }
  347             break;
  348         }
  349         case GD_OP_MAX_OPVERSION: {
  350             ret = glusterd_op_get_max_opversion(op_errstr, rsp_dict);
  351             if (ret) {
  352                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
  353                        "Commit failed.");
  354                 goto out;
  355             }
  356             break;
  357         }
  358         case GD_OP_PROFILE_VOLUME: {
  359             ret = glusterd_op_stats_volume(dict, op_errstr, rsp_dict);
  360             if (ret) {
  361                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
  362                        "commit failed for volume profile operation.");
  363                 goto out;
  364             }
  365             break;
  366         }
  367         case GD_OP_REBALANCE:
  368         case GD_OP_DEFRAG_BRICK_VOLUME: {
  369             ret = glusterd_mgmt_v3_op_rebalance(dict, op_errstr, rsp_dict);
  370             if (ret) {
  371                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
  372                        "Rebalance Commit Failed");
  373                 goto out;
  374             }
  375             break;
  376         }
  377 
  378         default:
  379             break;
  380     }
  381 
  382     ret = 0;
  383 out:
  384     gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
  385     return ret;
  386 }
  387 
  388 int32_t
  389 gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict,
  390                             char **op_errstr, dict_t *rsp_dict)
  391 {
  392     int32_t ret = -1;
  393     xlator_t *this = NULL;
  394     char *volname = NULL;
  395     glusterd_volinfo_t *volinfo = NULL;
  396 
  397     this = THIS;
  398     GF_ASSERT(this);
  399     GF_ASSERT(dict);
  400     GF_ASSERT(op_errstr);
  401     GF_ASSERT(rsp_dict);
  402 
  403     if (op_ret == 0)
  404         glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_POST);
  405 
  406     switch (op) {
  407         case GD_OP_SNAP: {
  408             ret = glusterd_snapshot_postvalidate(dict, op_ret, op_errstr,
  409                                                  rsp_dict);
  410             if (ret) {
  411                 gf_msg(this->name, GF_LOG_WARNING, 0,
  412                        GD_MSG_POST_VALIDATION_FAIL,
  413                        "postvalidate operation failed");
  414                 goto out;
  415             }
  416             break;
  417         }
  418         case GD_OP_ADD_BRICK: {
  419             ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
  420             if (ret) {
  421                 gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
  422                        "Unable to get"
  423                        " volume name");
  424                 goto out;
  425             }
  426 
  427             ret = glusterd_volinfo_find(volname, &volinfo);
  428             if (ret) {
  429                 gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
  430                        "Unable to "
  431                        "allocate memory");
  432                 goto out;
  433             }
  434             ret = glusterd_create_volfiles_and_notify_services(volinfo);
  435             if (ret)
  436                 goto out;
  437             ret = glusterd_store_volinfo(volinfo,
  438                                          GLUSTERD_VOLINFO_VER_AC_INCREMENT);
  439             if (ret)
  440                 goto out;
  441             break;
  442         }
  443         case GD_OP_START_VOLUME: {
  444             ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
  445             if (ret) {
  446                 gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
  447                        "Unable to get"
  448                        " volume name");
  449                 goto out;
  450             }
  451 
  452             ret = glusterd_volinfo_find(volname, &volinfo);
  453             if (ret) {
  454                 gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
  455                        "Unable to "
  456                        "allocate memory");
  457                 goto out;
  458             }
  459 
  460             break;
  461         }
  462         case GD_OP_STOP_VOLUME: {
  463             ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
  464             if (ret) {
  465                 gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
  466                        "Unable to get"
  467                        " volume name");
  468                 goto out;
  469             }
  470 
  471             ret = glusterd_volinfo_find(volname, &volinfo);
  472             if (ret) {
  473                 gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
  474                        "Unable to "
  475                        "allocate memory");
  476                 goto out;
  477             }
  478             break;
  479         }
  480 
  481         default:
  482             break;
  483     }
  484 
  485     ret = 0;
  486 
  487 out:
  488     gf_msg_trace(this->name, 0, "OP = %d. Returning %d", op, ret);
  489     return ret;
  490 }
  491 
  492 int32_t
  493 gd_mgmt_v3_lock_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
  494                        void *myframe)
  495 {
  496     int32_t ret = -1;
  497     struct syncargs *args = NULL;
  498     gd1_mgmt_v3_lock_rsp rsp = {
  499         {0},
  500     };
  501     call_frame_t *frame = NULL;
  502     int32_t op_ret = -1;
  503     int32_t op_errno = -1;
  504     xlator_t *this = NULL;
  505     uuid_t *peerid = NULL;
  506 
  507     this = THIS;
  508     GF_ASSERT(this);
  509     GF_ASSERT(req);
  510     GF_ASSERT(myframe);
  511 
  512     /* Even though the lock command has failed, while collating the errors
  513        (gd_mgmt_v3_collate_errors), args->op_ret and args->op_errno will be
  514        used. @args is obtained from frame->local. So before checking the
  515        status of the request and going out if its a failure, args should be
  516        set to frame->local. Otherwise, while collating args will be NULL.
  517        This applies to other phases such as prevalidate, brickop, commit and
  518        postvalidate also.
  519     */
  520     frame = myframe;
  521     args = frame->local;
  522     peerid = frame->cookie;
  523     frame->local = NULL;
  524     frame->cookie = NULL;
  525 
  526     if (-1 == req->rpc_status) {
  527         op_errno = ENOTCONN;
  528         goto out;
  529     }
  530 
  531     GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
  532 
  533     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
  534     if (ret < 0)
  535         goto out;
  536 
  537     gf_uuid_copy(args->uuid, rsp.uuid);
  538 
  539     op_ret = rsp.op_ret;
  540     op_errno = rsp.op_errno;
  541 
  542 out:
  543     gd_mgmt_v3_collate_errors(args, op_ret, op_errno, NULL,
  544                               GLUSTERD_MGMT_V3_LOCK, *peerid, rsp.uuid);
  545     GF_FREE(peerid);
  546 
  547     if (rsp.dict.dict_val)
  548         free(rsp.dict.dict_val);
  549     /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
  550      * the caller function.
  551      */
  552     if (req->rpc_status != -1)
  553         STACK_DESTROY(frame->root);
  554     synctask_barrier_wake(args);
  555     return 0;
  556 }
  557 
  558 int32_t
  559 gd_mgmt_v3_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
  560                     void *myframe)
  561 {
  562     return glusterd_big_locked_cbk(req, iov, count, myframe,
  563                                    gd_mgmt_v3_lock_cbk_fn);
  564 }
  565 
  566 int
  567 gd_mgmt_v3_lock(glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
  568                 struct syncargs *args, uuid_t my_uuid, uuid_t recv_uuid)
  569 {
  570     gd1_mgmt_v3_lock_req req = {
  571         {0},
  572     };
  573     int32_t ret = -1;
  574     xlator_t *this = NULL;
  575     uuid_t *peerid = NULL;
  576 
  577     this = THIS;
  578     GF_ASSERT(this);
  579     GF_ASSERT(op_ctx);
  580     GF_ASSERT(peerinfo);
  581     GF_ASSERT(args);
  582 
  583     ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
  584                                       &req.dict.dict_len);
  585     if (ret)
  586         goto out;
  587 
  588     gf_uuid_copy(req.uuid, my_uuid);
  589     req.op = op;
  590 
  591     GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
  592     if (ret)
  593         goto out;
  594 
  595     ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
  596                                    &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_LOCK,
  597                                    gd_mgmt_v3_lock_cbk,
  598                                    (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
  599 out:
  600     GF_FREE(req.dict.dict_val);
  601     gf_msg_trace(this->name, 0, "Returning %d", ret);
  602     return ret;
  603 }
  604 
  605 int
  606 glusterd_mgmt_v3_initiate_lockdown(glusterd_op_t op, dict_t *dict,
  607                                    char **op_errstr, uint32_t *op_errno,
  608                                    gf_boolean_t *is_acquired,
  609                                    uint32_t txn_generation)
  610 {
  611     glusterd_peerinfo_t *peerinfo = NULL;
  612     int32_t ret = -1;
  613     int32_t peer_cnt = 0;
  614     struct syncargs args = {0};
  615     uuid_t peer_uuid = {0};
  616     xlator_t *this = NULL;
  617     glusterd_conf_t *conf = NULL;
  618     uint32_t timeout = 0;
  619 
  620     this = THIS;
  621     GF_ASSERT(this);
  622     conf = this->private;
  623     GF_ASSERT(conf);
  624 
  625     GF_ASSERT(dict);
  626     GF_ASSERT(op_errstr);
  627     GF_ASSERT(is_acquired);
  628 
  629     /* Cli will add timeout key to dict if the default timeout is
  630      * other than 2 minutes. Here we use this value to check whether
  631      * mgmt_v3_lock_timeout should be set to default value or we
  632      * need to change the value according to timeout value
  633      * i.e, timeout + 120 seconds. */
  634     ret = dict_get_uint32(dict, "timeout", &timeout);
  635     if (!ret)
  636         conf->mgmt_v3_lock_timeout = timeout + 120;
  637 
  638     /* Trying to acquire multiple mgmt_v3 locks on local node */
  639     ret = glusterd_multiple_mgmt_v3_lock(dict, MY_UUID, op_errno);
  640     if (ret) {
  641         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
  642                "Failed to acquire mgmt_v3 locks on localhost");
  643         goto out;
  644     }
  645 
  646     *is_acquired = _gf_true;
  647 
  648     /* Sending mgmt_v3 lock req to other nodes in the cluster */
  649     gd_syncargs_init(&args, NULL);
  650     ret = synctask_barrier_init((&args));
  651     if (ret)
  652         goto out;
  653 
  654     peer_cnt = 0;
  655 
  656     RCU_READ_LOCK;
  657     cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
  658     {
  659         /* Only send requests to peers who were available before the
  660          * transaction started
  661          */
  662         if (peerinfo->generation > txn_generation)
  663             continue;
  664 
  665         if (!peerinfo->connected)
  666             continue;
  667         if (op != GD_OP_SYNC_VOLUME &&
  668             peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
  669             continue;
  670 
  671         gd_mgmt_v3_lock(op, dict, peerinfo, &args, MY_UUID, peer_uuid);
  672         peer_cnt++;
  673     }
  674     RCU_READ_UNLOCK;
  675 
  676     if (0 == peer_cnt) {
  677         ret = 0;
  678         goto out;
  679     }
  680 
  681     gd_synctask_barrier_wait((&args), peer_cnt);
  682 
  683     if (args.errstr)
  684         *op_errstr = gf_strdup(args.errstr);
  685 
  686     ret = args.op_ret;
  687     *op_errno = args.op_errno;
  688 
  689     gf_msg_debug(this->name, 0,
  690                  "Sent lock op req for %s "
  691                  "to %d peers. Returning %d",
  692                  gd_op_list[op], peer_cnt, ret);
  693 out:
  694     if (ret) {
  695         if (*op_errstr)
  696             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
  697                    "%s", *op_errstr);
  698 
  699         ret = gf_asprintf(op_errstr,
  700                           "Another transaction is in progress. "
  701                           "Please try again after some time.");
  702 
  703         if (ret == -1)
  704             *op_errstr = NULL;
  705 
  706         ret = -1;
  707     }
  708 
  709     return ret;
  710 }
  711 
  712 int
  713 glusterd_pre_validate_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp)
  714 {
  715     int32_t ret = 0;
  716     xlator_t *this = NULL;
  717 
  718     this = THIS;
  719     GF_ASSERT(this);
  720     GF_ASSERT(aggr);
  721     GF_ASSERT(rsp);
  722 
  723     switch (op) {
  724         case GD_OP_SNAP:
  725             ret = glusterd_snap_pre_validate_use_rsp_dict(aggr, rsp);
  726             if (ret) {
  727                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
  728                        "Failed to aggregate prevalidate "
  729                        "response dictionaries.");
  730                 goto out;
  731             }
  732             break;
  733         case GD_OP_REPLACE_BRICK:
  734             ret = glusterd_rb_use_rsp_dict(aggr, rsp);
  735             if (ret) {
  736                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
  737                        "Failed to aggregate prevalidate "
  738                        "response dictionaries.");
  739                 goto out;
  740             }
  741             break;
  742         case GD_OP_START_VOLUME:
  743         case GD_OP_ADD_BRICK:
  744             ret = glusterd_aggr_brick_mount_dirs(aggr, rsp);
  745             if (ret) {
  746                 gf_msg(this->name, GF_LOG_ERROR, 0,
  747                        GD_MSG_BRICK_MOUNDIRS_AGGR_FAIL,
  748                        "Failed to "
  749                        "aggregate brick mount dirs");
  750                 goto out;
  751             }
  752             break;
  753         case GD_OP_RESET_BRICK:
  754             ret = glusterd_rb_use_rsp_dict(aggr, rsp);
  755             if (ret) {
  756                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
  757                        "Failed to aggregate prevalidate "
  758                        "response dictionaries.");
  759                 goto out;
  760             }
  761         case GD_OP_STOP_VOLUME:
  762         case GD_OP_PROFILE_VOLUME:
  763         case GD_OP_DEFRAG_BRICK_VOLUME:
  764         case GD_OP_REBALANCE:
  765             break;
  766         case GD_OP_MAX_OPVERSION:
  767             break;
  768         default:
  769             ret = -1;
  770             gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
  771                    "Invalid op (%s)", gd_op_list[op]);
  772 
  773             break;
  774     }
  775 out:
  776     return ret;
  777 }
  778 
  779 int32_t
  780 gd_mgmt_v3_pre_validate_cbk_fn(struct rpc_req *req, struct iovec *iov,
  781                                int count, void *myframe)
  782 {
  783     int32_t ret = -1;
  784     struct syncargs *args = NULL;
  785     gd1_mgmt_v3_pre_val_rsp rsp = {
  786         {0},
  787     };
  788     call_frame_t *frame = NULL;
  789     int32_t op_ret = -1;
  790     int32_t op_errno = -1;
  791     dict_t *rsp_dict = NULL;
  792     xlator_t *this = NULL;
  793     uuid_t *peerid = NULL;
  794 
  795     this = THIS;
  796     GF_ASSERT(this);
  797     GF_ASSERT(req);
  798     GF_ASSERT(myframe);
  799 
  800     frame = myframe;
  801     args = frame->local;
  802     peerid = frame->cookie;
  803     frame->local = NULL;
  804     frame->cookie = NULL;
  805 
  806     if (-1 == req->rpc_status) {
  807         op_errno = ENOTCONN;
  808         goto out;
  809     }
  810 
  811     GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
  812 
  813     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
  814     if (ret < 0)
  815         goto out;
  816 
  817     if (rsp.dict.dict_len) {
  818         /* Unserialize the dictionary */
  819         rsp_dict = dict_new();
  820 
  821         ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
  822         if (ret < 0) {
  823             free(rsp.dict.dict_val);
  824             goto out;
  825         } else {
  826             rsp_dict->extra_stdfree = rsp.dict.dict_val;
  827         }
  828     }
  829 
  830     gf_uuid_copy(args->uuid, rsp.uuid);
  831     pthread_mutex_lock(&args->lock_dict);
  832     {
  833         ret = glusterd_pre_validate_aggr_rsp_dict(rsp.op, args->dict, rsp_dict);
  834     }
  835     pthread_mutex_unlock(&args->lock_dict);
  836 
  837     if (ret) {
  838         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
  839                "Failed to aggregate response from "
  840                " node/brick");
  841         if (!rsp.op_ret)
  842             op_ret = ret;
  843         else {
  844             op_ret = rsp.op_ret;
  845             op_errno = rsp.op_errno;
  846         }
  847     } else {
  848         op_ret = rsp.op_ret;
  849         op_errno = rsp.op_errno;
  850     }
  851 
  852 out:
  853     if (rsp_dict)
  854         dict_unref(rsp_dict);
  855 
  856     gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
  857                               GLUSTERD_MGMT_V3_PRE_VALIDATE, *peerid, rsp.uuid);
  858 
  859     if (rsp.op_errstr)
  860         free(rsp.op_errstr);
  861     GF_FREE(peerid);
  862     /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
  863      * the caller function.
  864      */
  865     if (req->rpc_status != -1)
  866         STACK_DESTROY(frame->root);
  867     synctask_barrier_wake(args);
  868     return 0;
  869 }
  870 
  871 int32_t
  872 gd_mgmt_v3_pre_validate_cbk(struct rpc_req *req, struct iovec *iov, int count,
  873                             void *myframe)
  874 {
  875     return glusterd_big_locked_cbk(req, iov, count, myframe,
  876                                    gd_mgmt_v3_pre_validate_cbk_fn);
  877 }
  878 
  879 int
  880 gd_mgmt_v3_pre_validate_req(glusterd_op_t op, dict_t *op_ctx,
  881                             glusterd_peerinfo_t *peerinfo,
  882                             struct syncargs *args, uuid_t my_uuid,
  883                             uuid_t recv_uuid)
  884 {
  885     int32_t ret = -1;
  886     gd1_mgmt_v3_pre_val_req req = {
  887         {0},
  888     };
  889     xlator_t *this = NULL;
  890     uuid_t *peerid = NULL;
  891 
  892     this = THIS;
  893     GF_ASSERT(this);
  894     GF_ASSERT(op_ctx);
  895     GF_ASSERT(peerinfo);
  896     GF_ASSERT(args);
  897 
  898     ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
  899                                       &req.dict.dict_len);
  900     if (ret)
  901         goto out;
  902 
  903     gf_uuid_copy(req.uuid, my_uuid);
  904     req.op = op;
  905 
  906     GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
  907     if (ret)
  908         goto out;
  909 
  910     ret = gd_syncop_submit_request(
  911         peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog,
  912         GLUSTERD_MGMT_V3_PRE_VALIDATE, gd_mgmt_v3_pre_validate_cbk,
  913         (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_req);
  914 out:
  915     GF_FREE(req.dict.dict_val);
  916     gf_msg_trace(this->name, 0, "Returning %d", ret);
  917     return ret;
  918 }
  919 
  920 int
  921 glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict,
  922                               char **op_errstr, uint32_t *op_errno,
  923                               uint32_t txn_generation)
  924 {
  925     int32_t ret = -1;
  926     int32_t peer_cnt = 0;
  927     dict_t *rsp_dict = NULL;
  928     glusterd_peerinfo_t *peerinfo = NULL;
  929     struct syncargs args = {0};
  930     uuid_t peer_uuid = {0};
  931     xlator_t *this = NULL;
  932     glusterd_conf_t *conf = NULL;
  933 
  934     this = THIS;
  935     GF_ASSERT(this);
  936     conf = this->private;
  937     GF_ASSERT(conf);
  938 
  939     GF_ASSERT(req_dict);
  940     GF_ASSERT(op_errstr);
  941     GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
  942 
  943     rsp_dict = dict_new();
  944     if (!rsp_dict) {
  945         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
  946                "Failed to create response dictionary");
  947         goto out;
  948     }
  949 
  950     if (op == GD_OP_PROFILE_VOLUME || op == GD_OP_STOP_VOLUME ||
  951         op == GD_OP_REBALANCE) {
  952         ret = glusterd_validate_quorum(this, op, req_dict, op_errstr);
  953         if (ret) {
  954             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
  955                    "Server quorum not met. Rejecting operation.");
  956             goto out;
  957         }
  958     }
  959 
  960     /* Pre Validation on local node */
  961     ret = gd_mgmt_v3_pre_validate_fn(op, req_dict, op_errstr, rsp_dict,
  962                                      op_errno);
  963 
  964     if (ret) {
  965         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
  966                "Pre Validation failed for "
  967                "operation %s on local node",
  968                gd_op_list[op]);
  969 
  970         if (*op_errstr == NULL) {
  971             ret = gf_asprintf(op_errstr,
  972                               "Pre-validation failed "
  973                               "on localhost. Please "
  974                               "check log file for details");
  975             if (ret == -1)
  976                 *op_errstr = NULL;
  977 
  978             ret = -1;
  979         }
  980         goto out;
  981     }
  982 
  983     if (op != GD_OP_MAX_OPVERSION) {
  984         ret = glusterd_pre_validate_aggr_rsp_dict(op, req_dict, rsp_dict);
  985         if (ret) {
  986             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
  987                    "%s",
  988                    "Failed to aggregate response from "
  989                    " node/brick");
  990             goto out;
  991         }
  992 
  993         dict_unref(rsp_dict);
  994         rsp_dict = NULL;
  995     }
  996 
  997     /* Sending Pre Validation req to other nodes in the cluster */
  998     gd_syncargs_init(&args, req_dict);
  999     ret = synctask_barrier_init((&args));
 1000     if (ret)
 1001         goto out;
 1002 
 1003     peer_cnt = 0;
 1004 
 1005     RCU_READ_LOCK;
 1006     cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
 1007     {
 1008         /* Only send requests to peers who were available before the
 1009          * transaction started
 1010          */
 1011         if (peerinfo->generation > txn_generation)
 1012             continue;
 1013 
 1014         if (!peerinfo->connected)
 1015             continue;
 1016         if (op != GD_OP_SYNC_VOLUME &&
 1017             peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
 1018             continue;
 1019 
 1020         gd_mgmt_v3_pre_validate_req(op, req_dict, peerinfo, &args, MY_UUID,
 1021                                     peer_uuid);
 1022         peer_cnt++;
 1023     }
 1024     RCU_READ_UNLOCK;
 1025 
 1026     if (0 == peer_cnt) {
 1027         ret = 0;
 1028         goto out;
 1029     }
 1030 
 1031     gd_synctask_barrier_wait((&args), peer_cnt);
 1032 
 1033     if (args.op_ret) {
 1034         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
 1035                "Pre Validation failed on peers");
 1036 
 1037         if (args.errstr)
 1038             *op_errstr = gf_strdup(args.errstr);
 1039     }
 1040 
 1041     ret = args.op_ret;
 1042     *op_errno = args.op_errno;
 1043 
 1044     gf_msg_debug(this->name, 0,
 1045                  "Sent pre valaidation req for %s "
 1046                  "to %d peers. Returning %d",
 1047                  gd_op_list[op], peer_cnt, ret);
 1048 out:
 1049     return ret;
 1050 }
 1051 
 1052 int
 1053 glusterd_mgmt_v3_build_payload(dict_t **req, char **op_errstr, dict_t *dict,
 1054                                glusterd_op_t op)
 1055 {
 1056     int32_t ret = -1;
 1057     dict_t *req_dict = NULL;
 1058     xlator_t *this = NULL;
 1059     char *volname = NULL;
 1060 
 1061     this = THIS;
 1062     GF_ASSERT(this);
 1063     GF_ASSERT(req);
 1064     GF_ASSERT(op_errstr);
 1065     GF_ASSERT(dict);
 1066 
 1067     req_dict = dict_new();
 1068     if (!req_dict)
 1069         goto out;
 1070 
 1071     switch (op) {
 1072         case GD_OP_MAX_OPVERSION:
 1073         case GD_OP_SNAP:
 1074             dict_copy(dict, req_dict);
 1075             break;
 1076         case GD_OP_START_VOLUME:
 1077         case GD_OP_STOP_VOLUME:
 1078         case GD_OP_ADD_BRICK:
 1079         case GD_OP_DEFRAG_BRICK_VOLUME:
 1080         case GD_OP_REPLACE_BRICK:
 1081         case GD_OP_RESET_BRICK:
 1082         case GD_OP_PROFILE_VOLUME: {
 1083             ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
 1084             if (ret) {
 1085                 gf_msg(this->name, GF_LOG_CRITICAL, errno,
 1086                        GD_MSG_DICT_GET_FAILED,
 1087                        "volname is not present in "
 1088                        "operation ctx");
 1089                 goto out;
 1090             }
 1091 
 1092             if (strcasecmp(volname, "all")) {
 1093                 ret = glusterd_dict_set_volid(dict, volname, op_errstr);
 1094                 if (ret)
 1095                     goto out;
 1096             }
 1097             dict_copy(dict, req_dict);
 1098         } break;
 1099 
 1100         case GD_OP_REBALANCE: {
 1101             if (gd_set_commit_hash(dict) != 0) {
 1102                 ret = -1;
 1103                 goto out;
 1104             }
 1105             ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
 1106             if (ret) {
 1107                 gf_msg(this->name, GF_LOG_CRITICAL, errno,
 1108                        GD_MSG_DICT_GET_FAILED,
 1109                        "volname is not present in "
 1110                        "operation ctx");
 1111                 goto out;
 1112             }
 1113 
 1114             if (strcasecmp(volname, "all")) {
 1115                 ret = glusterd_dict_set_volid(dict, volname, op_errstr);
 1116                 if (ret)
 1117                     goto out;
 1118             }
 1119             dict_copy(dict, req_dict);
 1120         } break;
 1121 
 1122         default:
 1123             break;
 1124     }
 1125 
 1126     *req = req_dict;
 1127     ret = 0;
 1128 out:
 1129     return ret;
 1130 }
 1131 
 1132 int32_t
 1133 gd_mgmt_v3_brick_op_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
 1134                            void *myframe)
 1135 {
 1136     int32_t ret = -1;
 1137     struct syncargs *args = NULL;
 1138     gd1_mgmt_v3_brick_op_rsp rsp = {
 1139         {0},
 1140     };
 1141     call_frame_t *frame = NULL;
 1142     int32_t op_ret = -1;
 1143     int32_t op_errno = -1;
 1144     dict_t *rsp_dict = NULL;
 1145     xlator_t *this = NULL;
 1146     uuid_t *peerid = NULL;
 1147 
 1148     this = THIS;
 1149     GF_ASSERT(this);
 1150     GF_ASSERT(req);
 1151     GF_ASSERT(myframe);
 1152 
 1153     frame = myframe;
 1154     args = frame->local;
 1155     peerid = frame->cookie;
 1156     frame->local = NULL;
 1157     frame->cookie = NULL;
 1158 
 1159     /* If the operation failed, then iov can be NULL. So better check the
 1160        status of the operation and then worry about iov (if the status of
 1161        the command is success)
 1162     */
 1163     if (-1 == req->rpc_status) {
 1164         op_errno = ENOTCONN;
 1165         goto out;
 1166     }
 1167 
 1168     GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
 1169 
 1170     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
 1171     if (ret < 0)
 1172         goto out;
 1173 
 1174     if (rsp.dict.dict_len) {
 1175         /* Unserialize the dictionary */
 1176         rsp_dict = dict_new();
 1177 
 1178         ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
 1179         if (ret < 0) {
 1180             goto out;
 1181         } else {
 1182             rsp_dict->extra_stdfree = rsp.dict.dict_val;
 1183         }
 1184     }
 1185 
 1186     gf_uuid_copy(args->uuid, rsp.uuid);
 1187     pthread_mutex_lock(&args->lock_dict);
 1188     {
 1189         if (rsp.op == GD_OP_DEFRAG_BRICK_VOLUME ||
 1190             rsp.op == GD_OP_PROFILE_VOLUME)
 1191             ret = glusterd_syncop_aggr_rsp_dict(rsp.op, args->dict, rsp_dict);
 1192     }
 1193     pthread_mutex_unlock(&args->lock_dict);
 1194 
 1195     if (ret) {
 1196         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
 1197                "Failed to aggregate response from "
 1198                " node/brick");
 1199         if (!rsp.op_ret)
 1200             op_ret = ret;
 1201         else {
 1202             op_ret = rsp.op_ret;
 1203             op_errno = rsp.op_errno;
 1204         }
 1205     } else {
 1206         op_ret = rsp.op_ret;
 1207         op_errno = rsp.op_errno;
 1208     }
 1209 
 1210 out:
 1211 
 1212     gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
 1213                               GLUSTERD_MGMT_V3_BRICK_OP, *peerid, rsp.uuid);
 1214 
 1215     if (rsp.op_errstr)
 1216         free(rsp.op_errstr);
 1217 
 1218     if (rsp_dict)
 1219         dict_unref(rsp_dict);
 1220 
 1221     GF_FREE(peerid);
 1222     /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
 1223      * the caller function.
 1224      */
 1225     if (req->rpc_status != -1)
 1226         STACK_DESTROY(frame->root);
 1227     synctask_barrier_wake(args);
 1228     return 0;
 1229 }
 1230 
 1231 int32_t
 1232 gd_mgmt_v3_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1233                         void *myframe)
 1234 {
 1235     return glusterd_big_locked_cbk(req, iov, count, myframe,
 1236                                    gd_mgmt_v3_brick_op_cbk_fn);
 1237 }
 1238 
 1239 int
 1240 gd_mgmt_v3_brick_op_req(glusterd_op_t op, dict_t *op_ctx,
 1241                         glusterd_peerinfo_t *peerinfo, struct syncargs *args,
 1242                         uuid_t my_uuid, uuid_t recv_uuid)
 1243 {
 1244     int32_t ret = -1;
 1245     gd1_mgmt_v3_brick_op_req req = {
 1246         {0},
 1247     };
 1248     xlator_t *this = NULL;
 1249     uuid_t *peerid = {
 1250         0,
 1251     };
 1252 
 1253     this = THIS;
 1254     GF_ASSERT(this);
 1255     GF_ASSERT(op_ctx);
 1256     GF_ASSERT(peerinfo);
 1257     GF_ASSERT(args);
 1258 
 1259     ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
 1260                                       &req.dict.dict_len);
 1261     if (ret)
 1262         goto out;
 1263 
 1264     gf_uuid_copy(req.uuid, my_uuid);
 1265     req.op = op;
 1266 
 1267     GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
 1268     if (ret)
 1269         goto out;
 1270 
 1271     ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
 1272                                    &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_BRICK_OP,
 1273                                    gd_mgmt_v3_brick_op_cbk,
 1274                                    (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_req);
 1275 out:
 1276     GF_FREE(req.dict.dict_val);
 1277     gf_msg_trace(this->name, 0, "Returning %d", ret);
 1278     return ret;
 1279 }
 1280 
 1281 int
 1282 glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
 1283                           char **op_errstr, uint32_t txn_generation)
 1284 {
 1285     int32_t ret = -1;
 1286     int32_t peer_cnt = 0;
 1287     dict_t *rsp_dict = NULL;
 1288     glusterd_peerinfo_t *peerinfo = NULL;
 1289     struct syncargs args = {0};
 1290     uuid_t peer_uuid = {0};
 1291     xlator_t *this = NULL;
 1292     glusterd_conf_t *conf = NULL;
 1293 
 1294     this = THIS;
 1295     GF_ASSERT(this);
 1296     conf = this->private;
 1297     GF_ASSERT(conf);
 1298 
 1299     GF_ASSERT(req_dict);
 1300     GF_ASSERT(op_errstr);
 1301 
 1302     rsp_dict = dict_new();
 1303     if (!rsp_dict) {
 1304         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
 1305                "Failed to create response dictionary");
 1306         goto out;
 1307     }
 1308 
 1309     /* Perform brick op on local node */
 1310     ret = gd_mgmt_v3_brick_op_fn(op, req_dict, op_errstr, rsp_dict);
 1311 
 1312     if (ret) {
 1313         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
 1314                "Brick ops failed for "
 1315                "operation %s on local node",
 1316                gd_op_list[op]);
 1317 
 1318         if (*op_errstr == NULL) {
 1319             ret = gf_asprintf(op_errstr,
 1320                               "Brick ops failed "
 1321                               "on localhost. Please "
 1322                               "check log file for details");
 1323             if (ret == -1)
 1324                 *op_errstr = NULL;
 1325 
 1326             ret = -1;
 1327         }
 1328         goto out;
 1329     }
 1330     if (op == GD_OP_DEFRAG_BRICK_VOLUME || op == GD_OP_PROFILE_VOLUME) {
 1331         ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict);
 1332         if (ret) {
 1333             gf_log(this->name, GF_LOG_ERROR, "%s",
 1334                    "Failed to aggregate response from "
 1335                    " node/brick");
 1336             goto out;
 1337         }
 1338     }
 1339 
 1340     dict_unref(rsp_dict);
 1341     rsp_dict = NULL;
 1342 
 1343     /* Sending brick op req to other nodes in the cluster */
 1344     gd_syncargs_init(&args, op_ctx);
 1345     ret = synctask_barrier_init((&args));
 1346     if (ret)
 1347         goto out;
 1348 
 1349     peer_cnt = 0;
 1350 
 1351     RCU_READ_LOCK;
 1352     cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
 1353     {
 1354         /* Only send requests to peers who were available before the
 1355          * transaction started
 1356          */
 1357         if (peerinfo->generation > txn_generation)
 1358             continue;
 1359 
 1360         if (!peerinfo->connected)
 1361             continue;
 1362         if (op != GD_OP_SYNC_VOLUME &&
 1363             peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
 1364             continue;
 1365 
 1366         gd_mgmt_v3_brick_op_req(op, req_dict, peerinfo, &args, MY_UUID,
 1367                                 peer_uuid);
 1368         peer_cnt++;
 1369     }
 1370     RCU_READ_UNLOCK;
 1371 
 1372     if (0 == peer_cnt) {
 1373         ret = 0;
 1374         goto out;
 1375     }
 1376 
 1377     gd_synctask_barrier_wait((&args), peer_cnt);
 1378 
 1379     if (args.op_ret) {
 1380         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
 1381                "Brick ops failed on peers");
 1382 
 1383         if (args.errstr)
 1384             *op_errstr = gf_strdup(args.errstr);
 1385     }
 1386 
 1387     ret = args.op_ret;
 1388 
 1389     gf_msg_debug(this->name, 0,
 1390                  "Sent brick op req for %s "
 1391                  "to %d peers. Returning %d",
 1392                  gd_op_list[op], peer_cnt, ret);
 1393 out:
 1394     return ret;
 1395 }
 1396 
 1397 int32_t
 1398 gd_mgmt_v3_commit_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
 1399                          void *myframe)
 1400 {
 1401     int32_t ret = -1;
 1402     struct syncargs *args = NULL;
 1403     gd1_mgmt_v3_commit_rsp rsp = {
 1404         {0},
 1405     };
 1406     call_frame_t *frame = NULL;
 1407     int32_t op_ret = -1;
 1408     int32_t op_errno = -1;
 1409     dict_t *rsp_dict = NULL;
 1410     xlator_t *this = NULL;
 1411     uuid_t *peerid = NULL;
 1412 
 1413     this = THIS;
 1414     GF_ASSERT(this);
 1415     GF_ASSERT(req);
 1416     GF_ASSERT(myframe);
 1417 
 1418     frame = myframe;
 1419     args = frame->local;
 1420     peerid = frame->cookie;
 1421     frame->local = NULL;
 1422     frame->cookie = NULL;
 1423 
 1424     if (-1 == req->rpc_status) {
 1425         op_errno = ENOTCONN;
 1426         goto out;
 1427     }
 1428 
 1429     GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
 1430 
 1431     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
 1432     if (ret < 0)
 1433         goto out;
 1434 
 1435     if (rsp.dict.dict_len) {
 1436         /* Unserialize the dictionary */
 1437         rsp_dict = dict_new();
 1438 
 1439         ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
 1440         if (ret < 0) {
 1441             free(rsp.dict.dict_val);
 1442             goto out;
 1443         } else {
 1444             rsp_dict->extra_stdfree = rsp.dict.dict_val;
 1445         }
 1446     }
 1447 
 1448     gf_uuid_copy(args->uuid, rsp.uuid);
 1449     pthread_mutex_lock(&args->lock_dict);
 1450     {
 1451         ret = glusterd_syncop_aggr_rsp_dict(rsp.op, args->dict, rsp_dict);
 1452     }
 1453     pthread_mutex_unlock(&args->lock_dict);
 1454 
 1455     if (ret) {
 1456         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
 1457                "Failed to aggregate response from "
 1458                " node/brick");
 1459         if (!rsp.op_ret)
 1460             op_ret = ret;
 1461         else {
 1462             op_ret = rsp.op_ret;
 1463             op_errno = rsp.op_errno;
 1464         }
 1465     } else {
 1466         op_ret = rsp.op_ret;
 1467         op_errno = rsp.op_errno;
 1468     }
 1469 
 1470 out:
 1471     if (rsp_dict)
 1472         dict_unref(rsp_dict);
 1473 
 1474     gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
 1475                               GLUSTERD_MGMT_V3_COMMIT, *peerid, rsp.uuid);
 1476     GF_FREE(peerid);
 1477 
 1478     if (rsp.op_errstr)
 1479         free(rsp.op_errstr);
 1480 
 1481     /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
 1482      * the caller function.
 1483      */
 1484     if (req->rpc_status != -1)
 1485         STACK_DESTROY(frame->root);
 1486     synctask_barrier_wake(args);
 1487     return 0;
 1488 }
 1489 
 1490 int32_t
 1491 gd_mgmt_v3_commit_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1492                       void *myframe)
 1493 {
 1494     return glusterd_big_locked_cbk(req, iov, count, myframe,
 1495                                    gd_mgmt_v3_commit_cbk_fn);
 1496 }
 1497 
 1498 int
 1499 gd_mgmt_v3_commit_req(glusterd_op_t op, dict_t *op_ctx,
 1500                       glusterd_peerinfo_t *peerinfo, struct syncargs *args,
 1501                       uuid_t my_uuid, uuid_t recv_uuid)
 1502 {
 1503     int32_t ret = -1;
 1504     gd1_mgmt_v3_commit_req req = {
 1505         {0},
 1506     };
 1507     xlator_t *this = NULL;
 1508     uuid_t *peerid = NULL;
 1509 
 1510     this = THIS;
 1511     GF_ASSERT(this);
 1512     GF_ASSERT(op_ctx);
 1513     GF_ASSERT(peerinfo);
 1514     GF_ASSERT(args);
 1515 
 1516     ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
 1517                                       &req.dict.dict_len);
 1518     if (ret)
 1519         goto out;
 1520 
 1521     gf_uuid_copy(req.uuid, my_uuid);
 1522     req.op = op;
 1523 
 1524     GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
 1525     if (ret)
 1526         goto out;
 1527 
 1528     ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
 1529                                    &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_COMMIT,
 1530                                    gd_mgmt_v3_commit_cbk,
 1531                                    (xdrproc_t)xdr_gd1_mgmt_v3_commit_req);
 1532 out:
 1533     GF_FREE(req.dict.dict_val);
 1534     gf_msg_trace(this->name, 0, "Returning %d", ret);
 1535     return ret;
 1536 }
 1537 
 1538 int
 1539 glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
 1540                         char **op_errstr, uint32_t *op_errno,
 1541                         uint32_t txn_generation)
 1542 {
 1543     int32_t ret = -1;
 1544     int32_t peer_cnt = 0;
 1545     dict_t *rsp_dict = NULL;
 1546     glusterd_peerinfo_t *peerinfo = NULL;
 1547     struct syncargs args = {0};
 1548     uuid_t peer_uuid = {0};
 1549     xlator_t *this = NULL;
 1550     glusterd_conf_t *conf = NULL;
 1551 
 1552     this = THIS;
 1553     GF_ASSERT(this);
 1554     conf = this->private;
 1555     GF_ASSERT(conf);
 1556 
 1557     GF_ASSERT(op_ctx);
 1558     GF_ASSERT(req_dict);
 1559     GF_ASSERT(op_errstr);
 1560     GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
 1561 
 1562     if (op == GD_OP_REBALANCE || op == GD_OP_DEFRAG_BRICK_VOLUME) {
 1563         ret = glusterd_set_rebalance_id_in_rsp_dict(req_dict, op_ctx);
 1564         if (ret) {
 1565             gf_log(this->name, GF_LOG_WARNING,
 1566                    "Failed to set rebalance id in dict.");
 1567         }
 1568     }
 1569     rsp_dict = dict_new();
 1570     if (!rsp_dict) {
 1571         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
 1572                "Failed to create response dictionary");
 1573         goto out;
 1574     }
 1575 
 1576     /* Commit on local node */
 1577     ret = gd_mgmt_v3_commit_fn(op, req_dict, op_errstr, op_errno, rsp_dict);
 1578 
 1579     if (ret) {
 1580         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
 1581                "Commit failed for "
 1582                "operation %s on local node",
 1583                gd_op_list[op]);
 1584 
 1585         if (*op_errstr == NULL) {
 1586             ret = gf_asprintf(op_errstr,
 1587                               "Commit failed "
 1588                               "on localhost. Please "
 1589                               "check log file for details.");
 1590             if (ret == -1)
 1591                 *op_errstr = NULL;
 1592 
 1593             ret = -1;
 1594         }
 1595         goto out;
 1596     }
 1597 
 1598     ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict);
 1599     if (ret) {
 1600         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
 1601                "Failed to aggregate response from "
 1602                " node/brick");
 1603         goto out;
 1604     }
 1605 
 1606     dict_unref(rsp_dict);
 1607     rsp_dict = NULL;
 1608 
 1609     /* Sending commit req to other nodes in the cluster */
 1610     gd_syncargs_init(&args, op_ctx);
 1611     ret = synctask_barrier_init((&args));
 1612     if (ret)
 1613         goto out;
 1614     peer_cnt = 0;
 1615 
 1616     RCU_READ_LOCK;
 1617     cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
 1618     {
 1619         /* Only send requests to peers who were available before the
 1620          * transaction started
 1621          */
 1622         if (peerinfo->generation > txn_generation)
 1623             continue;
 1624         if (!peerinfo->connected)
 1625             continue;
 1626 
 1627         if (op != GD_OP_SYNC_VOLUME &&
 1628             peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
 1629             continue;
 1630 
 1631         gd_mgmt_v3_commit_req(op, req_dict, peerinfo, &args, MY_UUID,
 1632                               peer_uuid);
 1633         peer_cnt++;
 1634     }
 1635     RCU_READ_UNLOCK;
 1636 
 1637     if (0 == peer_cnt) {
 1638         ret = 0;
 1639         goto out;
 1640     }
 1641 
 1642     gd_synctask_barrier_wait((&args), peer_cnt);
 1643 
 1644     if (args.op_ret) {
 1645         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
 1646                "Commit failed on peers");
 1647 
 1648         if (args.errstr)
 1649             *op_errstr = gf_strdup(args.errstr);
 1650     }
 1651 
 1652     ret = args.op_ret;
 1653     *op_errno = args.op_errno;
 1654 
 1655     gf_msg_debug(this->name, 0,
 1656                  "Sent commit req for %s to %d "
 1657                  "peers. Returning %d",
 1658                  gd_op_list[op], peer_cnt, ret);
 1659 out:
 1660     glusterd_op_modify_op_ctx(op, op_ctx);
 1661     return ret;
 1662 }
 1663 
 1664 int32_t
 1665 gd_mgmt_v3_post_validate_cbk_fn(struct rpc_req *req, struct iovec *iov,
 1666                                 int count, void *myframe)
 1667 {
 1668     int32_t ret = -1;
 1669     struct syncargs *args = NULL;
 1670     gd1_mgmt_v3_post_val_rsp rsp = {
 1671         {0},
 1672     };
 1673     call_frame_t *frame = NULL;
 1674     int32_t op_ret = -1;
 1675     int32_t op_errno = -1;
 1676     xlator_t *this = NULL;
 1677     uuid_t *peerid = NULL;
 1678 
 1679     this = THIS;
 1680     GF_ASSERT(this);
 1681     GF_ASSERT(req);
 1682     GF_ASSERT(myframe);
 1683 
 1684     frame = myframe;
 1685     args = frame->local;
 1686     peerid = frame->cookie;
 1687     frame->local = NULL;
 1688     frame->cookie = NULL;
 1689 
 1690     if (-1 == req->rpc_status) {
 1691         op_errno = ENOTCONN;
 1692         goto out;
 1693     }
 1694 
 1695     GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
 1696 
 1697     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
 1698     if (ret < 0)
 1699         goto out;
 1700 
 1701     gf_uuid_copy(args->uuid, rsp.uuid);
 1702 
 1703     op_ret = rsp.op_ret;
 1704     op_errno = rsp.op_errno;
 1705 
 1706 out:
 1707     gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
 1708                               GLUSTERD_MGMT_V3_POST_VALIDATE, *peerid,
 1709                               rsp.uuid);
 1710     if (rsp.op_errstr)
 1711         free(rsp.op_errstr);
 1712 
 1713     if (rsp.dict.dict_val)
 1714         free(rsp.dict.dict_val);
 1715     GF_FREE(peerid);
 1716     /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
 1717      * the caller function.
 1718      */
 1719     if (req->rpc_status != -1)
 1720         STACK_DESTROY(frame->root);
 1721     synctask_barrier_wake(args);
 1722     return 0;
 1723 }
 1724 
 1725 int32_t
 1726 gd_mgmt_v3_post_validate_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1727                              void *myframe)
 1728 {
 1729     return glusterd_big_locked_cbk(req, iov, count, myframe,
 1730                                    gd_mgmt_v3_post_validate_cbk_fn);
 1731 }
 1732 
 1733 int
 1734 gd_mgmt_v3_post_validate_req(glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
 1735                              glusterd_peerinfo_t *peerinfo,
 1736                              struct syncargs *args, uuid_t my_uuid,
 1737                              uuid_t recv_uuid)
 1738 {
 1739     int32_t ret = -1;
 1740     gd1_mgmt_v3_post_val_req req = {
 1741         {0},
 1742     };
 1743     xlator_t *this = NULL;
 1744     uuid_t *peerid = NULL;
 1745 
 1746     this = THIS;
 1747     GF_ASSERT(this);
 1748     GF_ASSERT(op_ctx);
 1749     GF_ASSERT(peerinfo);
 1750     GF_ASSERT(args);
 1751 
 1752     ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
 1753                                       &req.dict.dict_len);
 1754     if (ret)
 1755         goto out;
 1756 
 1757     gf_uuid_copy(req.uuid, my_uuid);
 1758     req.op = op;
 1759     req.op_ret = op_ret;
 1760 
 1761     GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
 1762     if (ret)
 1763         goto out;
 1764 
 1765     ret = gd_syncop_submit_request(
 1766         peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog,
 1767         GLUSTERD_MGMT_V3_POST_VALIDATE, gd_mgmt_v3_post_validate_cbk,
 1768         (xdrproc_t)xdr_gd1_mgmt_v3_post_val_req);
 1769 out:
 1770     GF_FREE(req.dict.dict_val);
 1771     gf_msg_trace(this->name, 0, "Returning %d", ret);
 1772     return ret;
 1773 }
 1774 
 1775 int
 1776 glusterd_mgmt_v3_post_validate(glusterd_op_t op, int32_t op_ret, dict_t *dict,
 1777                                dict_t *req_dict, char **op_errstr,
 1778                                uint32_t txn_generation)
 1779 {
 1780     int32_t ret = -1;
 1781     int32_t peer_cnt = 0;
 1782     dict_t *rsp_dict = NULL;
 1783     glusterd_peerinfo_t *peerinfo = NULL;
 1784     struct syncargs args = {0};
 1785     uuid_t peer_uuid = {0};
 1786     xlator_t *this = NULL;
 1787     glusterd_conf_t *conf = NULL;
 1788 
 1789     this = THIS;
 1790     GF_ASSERT(this);
 1791     conf = this->private;
 1792     GF_ASSERT(conf);
 1793 
 1794     GF_ASSERT(dict);
 1795     GF_VALIDATE_OR_GOTO(this->name, req_dict, out);
 1796     GF_ASSERT(op_errstr);
 1797 
 1798     rsp_dict = dict_new();
 1799     if (!rsp_dict) {
 1800         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
 1801                "Failed to create response dictionary");
 1802         goto out;
 1803     }
 1804 
 1805     /* Post Validation on local node */
 1806     ret = gd_mgmt_v3_post_validate_fn(op, op_ret, req_dict, op_errstr,
 1807                                       rsp_dict);
 1808 
 1809     if (ret) {
 1810         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
 1811                "Post Validation failed for "
 1812                "operation %s on local node",
 1813                gd_op_list[op]);
 1814 
 1815         if (*op_errstr == NULL) {
 1816             ret = gf_asprintf(op_errstr,
 1817                               "Post-validation failed "
 1818                               "on localhost. Please check "
 1819                               "log file for details");
 1820             if (ret == -1)
 1821                 *op_errstr = NULL;
 1822 
 1823             ret = -1;
 1824         }
 1825         goto out;
 1826     }
 1827 
 1828     dict_unref(rsp_dict);
 1829     rsp_dict = NULL;
 1830 
 1831     /* Sending Post Validation req to other nodes in the cluster */
 1832     gd_syncargs_init(&args, req_dict);
 1833     ret = synctask_barrier_init((&args));
 1834     if (ret)
 1835         goto out;
 1836 
 1837     peer_cnt = 0;
 1838 
 1839     RCU_READ_LOCK;
 1840     cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
 1841     {
 1842         /* Only send requests to peers who were available before the
 1843          * transaction started
 1844          */
 1845         if (peerinfo->generation > txn_generation)
 1846             continue;
 1847 
 1848         if (!peerinfo->connected)
 1849             continue;
 1850         if (op != GD_OP_SYNC_VOLUME &&
 1851             peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
 1852             continue;
 1853 
 1854         gd_mgmt_v3_post_validate_req(op, op_ret, req_dict, peerinfo, &args,
 1855                                      MY_UUID, peer_uuid);
 1856         peer_cnt++;
 1857     }
 1858     RCU_READ_UNLOCK;
 1859 
 1860     if (0 == peer_cnt) {
 1861         ret = 0;
 1862         goto out;
 1863     }
 1864 
 1865     gd_synctask_barrier_wait((&args), peer_cnt);
 1866 
 1867     if (args.op_ret) {
 1868         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
 1869                "Post Validation failed on peers");
 1870 
 1871         if (args.errstr)
 1872             *op_errstr = gf_strdup(args.errstr);
 1873     }
 1874 
 1875     ret = args.op_ret;
 1876 
 1877     gf_msg_debug(this->name, 0,
 1878                  "Sent post valaidation req for %s "
 1879                  "to %d peers. Returning %d",
 1880                  gd_op_list[op], peer_cnt, ret);
 1881 out:
 1882     return ret;
 1883 }
 1884 
 1885 int32_t
 1886 gd_mgmt_v3_unlock_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
 1887                          void *myframe)
 1888 {
 1889     int32_t ret = -1;
 1890     struct syncargs *args = NULL;
 1891     gd1_mgmt_v3_unlock_rsp rsp = {
 1892         {0},
 1893     };
 1894     call_frame_t *frame = NULL;
 1895     int32_t op_ret = -1;
 1896     int32_t op_errno = -1;
 1897     xlator_t *this = NULL;
 1898     uuid_t *peerid = NULL;
 1899 
 1900     this = THIS;
 1901     GF_ASSERT(this);
 1902     GF_ASSERT(req);
 1903     GF_ASSERT(myframe);
 1904 
 1905     frame = myframe;
 1906     args = frame->local;
 1907     peerid = frame->cookie;
 1908     frame->local = NULL;
 1909     frame->cookie = NULL;
 1910 
 1911     if (-1 == req->rpc_status) {
 1912         op_errno = ENOTCONN;
 1913         goto out;
 1914     }
 1915 
 1916     GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
 1917 
 1918     ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
 1919     if (ret < 0)
 1920         goto out;
 1921 
 1922     gf_uuid_copy(args->uuid, rsp.uuid);
 1923 
 1924     op_ret = rsp.op_ret;
 1925     op_errno = rsp.op_errno;
 1926 
 1927 out:
 1928     gd_mgmt_v3_collate_errors(args, op_ret, op_errno, NULL,
 1929                               GLUSTERD_MGMT_V3_UNLOCK, *peerid, rsp.uuid);
 1930     if (rsp.dict.dict_val)
 1931         free(rsp.dict.dict_val);
 1932     GF_FREE(peerid);
 1933     /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
 1934      * the caller function.
 1935      */
 1936     if (req->rpc_status != -1)
 1937         STACK_DESTROY(frame->root);
 1938     synctask_barrier_wake(args);
 1939     return 0;
 1940 }
 1941 
 1942 int32_t
 1943 gd_mgmt_v3_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
 1944                       void *myframe)
 1945 {
 1946     return glusterd_big_locked_cbk(req, iov, count, myframe,
 1947                                    gd_mgmt_v3_unlock_cbk_fn);
 1948 }
 1949 
 1950 int
 1951 gd_mgmt_v3_unlock(glusterd_op_t op, dict_t *op_ctx,
 1952                   glusterd_peerinfo_t *peerinfo, struct syncargs *args,
 1953                   uuid_t my_uuid, uuid_t recv_uuid)
 1954 {
 1955     int32_t ret = -1;
 1956     gd1_mgmt_v3_unlock_req req = {
 1957         {0},
 1958     };
 1959     xlator_t *this = NULL;
 1960     uuid_t *peerid = NULL;
 1961 
 1962     this = THIS;
 1963     GF_ASSERT(this);
 1964     GF_ASSERT(op_ctx);
 1965     GF_ASSERT(peerinfo);
 1966     GF_ASSERT(args);
 1967 
 1968     ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
 1969                                       &req.dict.dict_len);
 1970     if (ret)
 1971         goto out;
 1972 
 1973     gf_uuid_copy(req.uuid, my_uuid);
 1974     req.op = op;
 1975 
 1976     GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
 1977     if (ret)
 1978         goto out;
 1979 
 1980     ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
 1981                                    &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_UNLOCK,
 1982                                    gd_mgmt_v3_unlock_cbk,
 1983                                    (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
 1984 out:
 1985     GF_FREE(req.dict.dict_val);
 1986     gf_msg_trace(this->name, 0, "Returning %d", ret);
 1987     return ret;
 1988 }
 1989 
 1990 int
 1991 glusterd_mgmt_v3_release_peer_locks(glusterd_op_t op, dict_t *dict,
 1992                                     int32_t op_ret, char **op_errstr,
 1993                                     gf_boolean_t is_acquired,
 1994                                     uint32_t txn_generation)
 1995 {
 1996     int32_t ret = -1;
 1997     int32_t peer_cnt = 0;
 1998     uuid_t peer_uuid = {0};
 1999     xlator_t *this = NULL;
 2000     glusterd_peerinfo_t *peerinfo = NULL;
 2001     struct syncargs args = {0};
 2002     glusterd_conf_t *conf = NULL;
 2003 
 2004     this = THIS;
 2005     GF_ASSERT(this);
 2006     conf = this->private;
 2007     GF_ASSERT(conf);
 2008 
 2009     GF_ASSERT(dict);
 2010     GF_ASSERT(op_errstr);
 2011 
 2012     /* If the lock has not been held during this
 2013      * transaction, do not send unlock requests */
 2014     if (!is_acquired)
 2015         goto out;
 2016 
 2017     /* Sending mgmt_v3 unlock req to other nodes in the cluster */
 2018     gd_syncargs_init(&args, NULL);
 2019     ret = synctask_barrier_init((&args));
 2020     if (ret)
 2021         goto out;
 2022     peer_cnt = 0;
 2023     RCU_READ_LOCK;
 2024     cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
 2025     {
 2026         /* Only send requests to peers who were available before the
 2027          * transaction started
 2028          */
 2029         if (peerinfo->generation > txn_generation)
 2030             continue;
 2031 
 2032         if (!peerinfo->connected)
 2033             continue;
 2034         if (op != GD_OP_SYNC_VOLUME &&
 2035             peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
 2036             continue;
 2037 
 2038         gd_mgmt_v3_unlock(op, dict, peerinfo, &args, MY_UUID, peer_uuid);
 2039         peer_cnt++;
 2040     }
 2041     RCU_READ_UNLOCK;
 2042 
 2043     if (0 == peer_cnt) {
 2044         ret = 0;
 2045         goto out;
 2046     }
 2047 
 2048     gd_synctask_barrier_wait((&args), peer_cnt);
 2049 
 2050     if (args.op_ret) {
 2051         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
 2052                "Unlock failed on peers");
 2053 
 2054         if (!op_ret && args.errstr)
 2055             *op_errstr = gf_strdup(args.errstr);
 2056     }
 2057 
 2058     ret = args.op_ret;
 2059 
 2060     gf_msg_debug(this->name, 0,
 2061                  "Sent unlock op req for %s "
 2062                  "to %d peers. Returning %d",
 2063                  gd_op_list[op], peer_cnt, ret);
 2064 
 2065 out:
 2066     return ret;
 2067 }
 2068 
 2069 int32_t
 2070 glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(rpcsvc_request_t *req,
 2071                                                         glusterd_op_t op,
 2072                                                         dict_t *dict)
 2073 {
 2074     int32_t ret = -1;
 2075     int32_t op_ret = -1;
 2076     dict_t *req_dict = NULL;
 2077     dict_t *tmp_dict = NULL;
 2078     glusterd_conf_t *conf = NULL;
 2079     char *op_errstr = NULL;
 2080     xlator_t *this = NULL;
 2081     gf_boolean_t is_acquired = _gf_false;
 2082     uuid_t *originator_uuid = NULL;
 2083     uint32_t txn_generation = 0;
 2084     uint32_t op_errno = 0;
 2085 
 2086     this = THIS;
 2087     GF_ASSERT(this);
 2088     GF_ASSERT(req);
 2089     GF_ASSERT(dict);
 2090     conf = this->private;
 2091     GF_ASSERT(conf);
 2092 
 2093     /* Save the peer list generation */
 2094     txn_generation = conf->generation;
 2095     cmm_smp_rmb();
 2096     /* This read memory barrier makes sure that this assignment happens here
 2097      * only and is not reordered and optimized by either the compiler or the
 2098      * processor.
 2099      */
 2100 
 2101     /* Save the MY_UUID as the originator_uuid. This originator_uuid
 2102      * will be used by is_origin_glusterd() to determine if a node
 2103      * is the originator node for a command. */
 2104     originator_uuid = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
 2105     if (!originator_uuid) {
 2106         ret = -1;
 2107         goto out;
 2108     }
 2109 
 2110     gf_uuid_copy(*originator_uuid, MY_UUID);
 2111     ret = dict_set_bin(dict, "originator_uuid", originator_uuid,
 2112                        sizeof(uuid_t));
 2113     if (ret) {
 2114         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2115                "Failed to set originator_uuid.");
 2116         GF_FREE(originator_uuid);
 2117         goto out;
 2118     }
 2119 
 2120     /* Marking the operation as complete synctasked */
 2121     ret = dict_set_int32(dict, "is_synctasked", _gf_true);
 2122     if (ret) {
 2123         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2124                "Failed to set synctasked flag.");
 2125         goto out;
 2126     }
 2127 
 2128     /* Use a copy at local unlock as cli response will be sent before
 2129      * the unlock and the volname in the dict might be removed */
 2130     tmp_dict = dict_new();
 2131     if (!tmp_dict) {
 2132         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
 2133                "Unable to create dict");
 2134         goto out;
 2135     }
 2136     dict_copy(dict, tmp_dict);
 2137 
 2138     /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
 2139     ret = glusterd_mgmt_v3_initiate_lockdown(op, dict, &op_errstr, &op_errno,
 2140                                              &is_acquired, txn_generation);
 2141     if (ret) {
 2142         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCKDOWN_FAIL,
 2143                "mgmt_v3 lockdown failed.");
 2144         goto out;
 2145     }
 2146 
 2147     /* BUILD PAYLOAD */
 2148     ret = glusterd_mgmt_v3_build_payload(&req_dict, &op_errstr, dict, op);
 2149     if (ret) {
 2150         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL,
 2151                LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
 2152         if (op_errstr == NULL)
 2153             gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
 2154         goto out;
 2155     }
 2156 
 2157     /* PRE-COMMIT VALIDATE PHASE */
 2158     ret = glusterd_mgmt_v3_pre_validate(op, req_dict, &op_errstr, &op_errno,
 2159                                         txn_generation);
 2160     if (ret) {
 2161         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
 2162                "Pre Validation Failed");
 2163         goto out;
 2164     }
 2165 
 2166     /* BRICK-OPS */
 2167     ret = glusterd_mgmt_v3_brick_op(op, dict, req_dict, &op_errstr,
 2168                                     txn_generation);
 2169     if (ret) {
 2170         gf_log(this->name, GF_LOG_ERROR, "Brick Op Failed");
 2171         goto out;
 2172     }
 2173 
 2174     /* COMMIT OP PHASE */
 2175     ret = glusterd_mgmt_v3_commit(op, dict, req_dict, &op_errstr, &op_errno,
 2176                                   txn_generation);
 2177     if (ret) {
 2178         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
 2179                "Commit Op Failed");
 2180         goto out;
 2181     }
 2182 
 2183     /* POST-COMMIT VALIDATE PHASE */
 2184     /* As of now, post_validate is not trying to cleanup any failed
 2185        commands. So as of now, I am sending 0 (op_ret as 0).
 2186     */
 2187     ret = glusterd_mgmt_v3_post_validate(op, 0, dict, req_dict, &op_errstr,
 2188                                          txn_generation);
 2189     if (ret) {
 2190         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
 2191                "Post Validation Failed");
 2192         goto out;
 2193     }
 2194 
 2195     ret = 0;
 2196 out:
 2197     op_ret = ret;
 2198     /* UNLOCK PHASE FOR PEERS*/
 2199     (void)glusterd_mgmt_v3_release_peer_locks(op, dict, op_ret, &op_errstr,
 2200                                               is_acquired, txn_generation);
 2201 
 2202     /* LOCAL VOLUME(S) UNLOCK */
 2203     if (is_acquired) {
 2204         /* Trying to release multiple mgmt_v3 locks */
 2205         ret = glusterd_multiple_mgmt_v3_unlock(tmp_dict, MY_UUID);
 2206         if (ret) {
 2207             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
 2208                    "Failed to release mgmt_v3 locks on localhost");
 2209             op_ret = ret;
 2210         }
 2211     }
 2212 
 2213     if (op_ret && (op_errno == 0))
 2214         op_errno = EG_INTRNL;
 2215 
 2216     if (op != GD_OP_MAX_OPVERSION) {
 2217         /* SEND CLI RESPONSE */
 2218         glusterd_op_send_cli_response(op, op_ret, op_errno, req, dict,
 2219                                       op_errstr);
 2220     }
 2221 
 2222     if (req_dict)
 2223         dict_unref(req_dict);
 2224 
 2225     if (tmp_dict)
 2226         dict_unref(tmp_dict);
 2227 
 2228     if (op_errstr) {
 2229         GF_FREE(op_errstr);
 2230         op_errstr = NULL;
 2231     }
 2232 
 2233     return 0;
 2234 }
 2235 
 2236 int32_t
 2237 glusterd_mgmt_v3_initiate_all_phases(rpcsvc_request_t *req, glusterd_op_t op,
 2238                                      dict_t *dict)
 2239 {
 2240     int32_t ret = -1;
 2241     int32_t op_ret = -1;
 2242     dict_t *req_dict = NULL;
 2243     dict_t *tmp_dict = NULL;
 2244     glusterd_conf_t *conf = NULL;
 2245     char *op_errstr = NULL;
 2246     xlator_t *this = NULL;
 2247     gf_boolean_t is_acquired = _gf_false;
 2248     uuid_t *originator_uuid = NULL;
 2249     uint32_t txn_generation = 0;
 2250     uint32_t op_errno = 0;
 2251 
 2252     this = THIS;
 2253     GF_ASSERT(this);
 2254     GF_ASSERT(req);
 2255     GF_ASSERT(dict);
 2256     conf = this->private;
 2257     GF_ASSERT(conf);
 2258 
 2259     /* Save the peer list generation */
 2260     txn_generation = conf->generation;
 2261     cmm_smp_rmb();
 2262     /* This read memory barrier makes sure that this assignment happens here
 2263      * only and is not reordered and optimized by either the compiler or the
 2264      * processor.
 2265      */
 2266 
 2267     /* Save the MY_UUID as the originator_uuid. This originator_uuid
 2268      * will be used by is_origin_glusterd() to determine if a node
 2269      * is the originator node for a command. */
 2270     originator_uuid = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
 2271     if (!originator_uuid) {
 2272         ret = -1;
 2273         goto out;
 2274     }
 2275 
 2276     gf_uuid_copy(*originator_uuid, MY_UUID);
 2277     ret = dict_set_bin(dict, "originator_uuid", originator_uuid,
 2278                        sizeof(uuid_t));
 2279     if (ret) {
 2280         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2281                "Failed to set originator_uuid.");
 2282         GF_FREE(originator_uuid);
 2283         goto out;
 2284     }
 2285 
 2286     /* Marking the operation as complete synctasked */
 2287     ret = dict_set_int32(dict, "is_synctasked", _gf_true);
 2288     if (ret) {
 2289         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2290                "Failed to set synctasked flag.");
 2291         goto out;
 2292     }
 2293 
 2294     /* Use a copy at local unlock as cli response will be sent before
 2295      * the unlock and the volname in the dict might be removed */
 2296     tmp_dict = dict_new();
 2297     if (!tmp_dict) {
 2298         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
 2299                "Unable to create dict");
 2300         goto out;
 2301     }
 2302     dict_copy(dict, tmp_dict);
 2303 
 2304     /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
 2305     ret = glusterd_mgmt_v3_initiate_lockdown(op, dict, &op_errstr, &op_errno,
 2306                                              &is_acquired, txn_generation);
 2307     if (ret) {
 2308         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCKDOWN_FAIL,
 2309                "mgmt_v3 lockdown failed.");
 2310         goto out;
 2311     }
 2312 
 2313     /* BUILD PAYLOAD */
 2314     ret = glusterd_mgmt_v3_build_payload(&req_dict, &op_errstr, dict, op);
 2315     if (ret) {
 2316         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL,
 2317                LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
 2318         if (op_errstr == NULL)
 2319             gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
 2320         goto out;
 2321     }
 2322 
 2323     /* PRE-COMMIT VALIDATE PHASE */
 2324     ret = glusterd_mgmt_v3_pre_validate(op, req_dict, &op_errstr, &op_errno,
 2325                                         txn_generation);
 2326     if (ret) {
 2327         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
 2328                "Pre Validation Failed");
 2329         goto out;
 2330     }
 2331 
 2332     /* COMMIT OP PHASE */
 2333     ret = glusterd_mgmt_v3_commit(op, dict, req_dict, &op_errstr, &op_errno,
 2334                                   txn_generation);
 2335     if (ret) {
 2336         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
 2337                "Commit Op Failed");
 2338         goto out;
 2339     }
 2340 
 2341     /* POST-COMMIT VALIDATE PHASE */
 2342     /* As of now, post_validate is not trying to cleanup any failed
 2343        commands. So as of now, I am sending 0 (op_ret as 0).
 2344     */
 2345     ret = glusterd_mgmt_v3_post_validate(op, 0, dict, req_dict, &op_errstr,
 2346                                          txn_generation);
 2347     if (ret) {
 2348         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
 2349                "Post Validation Failed");
 2350         goto out;
 2351     }
 2352 
 2353     ret = 0;
 2354 out:
 2355     op_ret = ret;
 2356     /* UNLOCK PHASE FOR PEERS*/
 2357     (void)glusterd_mgmt_v3_release_peer_locks(op, dict, op_ret, &op_errstr,
 2358                                               is_acquired, txn_generation);
 2359 
 2360     /* LOCAL VOLUME(S) UNLOCK */
 2361     if (is_acquired) {
 2362         /* Trying to release multiple mgmt_v3 locks */
 2363         ret = glusterd_multiple_mgmt_v3_unlock(tmp_dict, MY_UUID);
 2364         if (ret) {
 2365             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
 2366                    "Failed to release mgmt_v3 locks on localhost");
 2367             op_ret = ret;
 2368         }
 2369     }
 2370 
 2371     if (op_ret && (op_errno == 0))
 2372         op_errno = EG_INTRNL;
 2373 
 2374     if (op != GD_OP_MAX_OPVERSION) {
 2375         /* SEND CLI RESPONSE */
 2376         glusterd_op_send_cli_response(op, op_ret, op_errno, req, dict,
 2377                                       op_errstr);
 2378     }
 2379 
 2380     if (req_dict)
 2381         dict_unref(req_dict);
 2382 
 2383     if (tmp_dict)
 2384         dict_unref(tmp_dict);
 2385 
 2386     if (op_errstr) {
 2387         GF_FREE(op_errstr);
 2388         op_errstr = NULL;
 2389     }
 2390 
 2391     return 0;
 2392 }
 2393 
 2394 int32_t
 2395 glusterd_set_barrier_value(dict_t *dict, char *option)
 2396 {
 2397     int32_t ret = -1;
 2398     xlator_t *this = NULL;
 2399     glusterd_volinfo_t *vol = NULL;
 2400     char *volname = NULL;
 2401 
 2402     this = THIS;
 2403     GF_ASSERT(this);
 2404 
 2405     GF_ASSERT(dict);
 2406     GF_ASSERT(option);
 2407 
 2408     /* TODO : Change this when we support multiple volume.
 2409      * As of now only snapshot of single volume is supported,
 2410      * Hence volname1 is directly fetched
 2411      */
 2412     ret = dict_get_strn(dict, "volname1", SLEN("volname1"), &volname);
 2413     if (ret) {
 2414         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
 2415                "Volname not present in "
 2416                "dict");
 2417         goto out;
 2418     }
 2419 
 2420     ret = glusterd_volinfo_find(volname, &vol);
 2421     if (ret) {
 2422         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
 2423                "Volume %s not found ", volname);
 2424         goto out;
 2425     }
 2426 
 2427     ret = dict_set_dynstr_with_alloc(dict, "barrier", option);
 2428     if (ret) {
 2429         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2430                "Failed to set barrier op "
 2431                "in request dictionary");
 2432         goto out;
 2433     }
 2434 
 2435     ret = dict_set_dynstr_with_alloc(vol->dict, "features.barrier", option);
 2436     if (ret) {
 2437         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2438                "Failed to set barrier op "
 2439                "in volume option dict");
 2440         goto out;
 2441     }
 2442 
 2443     gd_update_volume_op_versions(vol);
 2444 
 2445     ret = glusterd_create_volfiles(vol);
 2446     if (ret) {
 2447         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
 2448                "Failed to create volfiles");
 2449         goto out;
 2450     }
 2451 
 2452     ret = glusterd_store_volinfo(vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
 2453 
 2454 out:
 2455     gf_msg_debug(this->name, 0, "Returning %d", ret);
 2456     return ret;
 2457 }
 2458 
 2459 int32_t
 2460 glusterd_mgmt_v3_initiate_snap_phases(rpcsvc_request_t *req, glusterd_op_t op,
 2461                                       dict_t *dict)
 2462 {
 2463     int32_t ret = -1;
 2464     int32_t op_ret = -1;
 2465     dict_t *req_dict = NULL;
 2466     dict_t *tmp_dict = NULL;
 2467     glusterd_conf_t *conf = NULL;
 2468     char *op_errstr = NULL;
 2469     xlator_t *this = NULL;
 2470     gf_boolean_t is_acquired = _gf_false;
 2471     uuid_t *originator_uuid = NULL;
 2472     gf_boolean_t success = _gf_false;
 2473     char *cli_errstr = NULL;
 2474     uint32_t txn_generation = 0;
 2475     uint32_t op_errno = 0;
 2476 
 2477     this = THIS;
 2478     GF_ASSERT(this);
 2479     GF_ASSERT(req);
 2480     GF_ASSERT(dict);
 2481     conf = this->private;
 2482     GF_ASSERT(conf);
 2483 
 2484     /* Save the peer list generation */
 2485     txn_generation = conf->generation;
 2486     cmm_smp_rmb();
 2487     /* This read memory barrier makes sure that this assignment happens here
 2488      * only and is not reordered and optimized by either the compiler or the
 2489      * processor.
 2490      */
 2491 
 2492     /* Save the MY_UUID as the originator_uuid. This originator_uuid
 2493      * will be used by is_origin_glusterd() to determine if a node
 2494      * is the originator node for a command. */
 2495     originator_uuid = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
 2496     if (!originator_uuid) {
 2497         ret = -1;
 2498         goto out;
 2499     }
 2500 
 2501     gf_uuid_copy(*originator_uuid, MY_UUID);
 2502     ret = dict_set_bin(dict, "originator_uuid", originator_uuid,
 2503                        sizeof(uuid_t));
 2504     if (ret) {
 2505         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2506                "Failed to set originator_uuid.");
 2507         GF_FREE(originator_uuid);
 2508         goto out;
 2509     }
 2510 
 2511     /* Marking the operation as complete synctasked */
 2512     ret = dict_set_int32n(dict, "is_synctasked", SLEN("is_synctasked"),
 2513                           _gf_true);
 2514     if (ret) {
 2515         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2516                "Failed to set synctasked flag.");
 2517         goto out;
 2518     }
 2519 
 2520     /* Use a copy at local unlock as cli response will be sent before
 2521      * the unlock and the volname in the dict might be removed */
 2522     tmp_dict = dict_new();
 2523     if (!tmp_dict) {
 2524         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
 2525                "Unable to create dict");
 2526         goto out;
 2527     }
 2528     dict_copy(dict, tmp_dict);
 2529 
 2530     /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
 2531     ret = glusterd_mgmt_v3_initiate_lockdown(op, dict, &op_errstr, &op_errno,
 2532                                              &is_acquired, txn_generation);
 2533     if (ret) {
 2534         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCKDOWN_FAIL,
 2535                "mgmt_v3 lockdown failed.");
 2536         goto out;
 2537     }
 2538 
 2539     /* BUILD PAYLOAD */
 2540     ret = glusterd_mgmt_v3_build_payload(&req_dict, &op_errstr, dict, op);
 2541     if (ret) {
 2542         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL,
 2543                LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
 2544         if (op_errstr == NULL)
 2545             gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
 2546         goto out;
 2547     }
 2548 
 2549     /* PRE-COMMIT VALIDATE PHASE */
 2550     ret = glusterd_mgmt_v3_pre_validate(op, req_dict, &op_errstr, &op_errno,
 2551                                         txn_generation);
 2552     if (ret) {
 2553         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
 2554                "Pre Validation Failed");
 2555         goto out;
 2556     }
 2557 
 2558     /* quorum check of the volume is done here */
 2559     ret = glusterd_snap_quorum_check(req_dict, _gf_false, &op_errstr,
 2560                                      &op_errno);
 2561     if (ret) {
 2562         gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_QUORUM_CHECK_FAIL,
 2563                "Volume quorum check failed");
 2564         goto out;
 2565     }
 2566 
 2567     /* Set the operation type as pre, so that differentiation can be
 2568      * made whether the brickop is sent during pre-commit or post-commit
 2569      */
 2570     ret = dict_set_dynstr_with_alloc(req_dict, "operation-type", "pre");
 2571     if (ret) {
 2572         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2573                "Failed to set "
 2574                "operation-type in dictionary");
 2575         goto out;
 2576     }
 2577 
 2578     ret = glusterd_mgmt_v3_brick_op(op, dict, req_dict, &op_errstr,
 2579                                     txn_generation);
 2580     if (ret) {
 2581         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
 2582                "Brick Ops Failed");
 2583         goto unbarrier;
 2584     }
 2585 
 2586     /* COMMIT OP PHASE */
 2587     /* TODO: As of now, the plan is to do quorum check before sending the
 2588        commit fop and if the quorum succeeds, then commit is sent to all
 2589        the other glusterds.
 2590        snap create functionality now creates the in memory and on disk
 2591        objects for the snapshot (marking them as incomplete), takes the lvm
 2592        snapshot and then updates the status of the in memory and on disk
 2593        snap objects as complete. Suppose one of the glusterds goes down
 2594        after taking the lvm snapshot, but before updating the snap object,
 2595        then treat it as a snapshot create failure and trigger cleanup.
 2596        i.e the number of commit responses received by the originator
 2597        glusterd shold be the same as the number of peers it has sent the
 2598        request to (i.e npeers variable). If not, then originator glusterd
 2599        will initiate cleanup in post-validate fop.
 2600        Question: What if one of the other glusterds goes down as explained
 2601        above and along with it the originator glusterd also goes down?
 2602        Who will initiate the cleanup?
 2603     */
 2604     ret = dict_set_int32n(req_dict, "cleanup", SLEN("cleanup"), 1);
 2605     if (ret) {
 2606         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2607                "failed to set dict");
 2608         goto unbarrier;
 2609     }
 2610 
 2611     ret = glusterd_mgmt_v3_commit(op, dict, req_dict, &op_errstr, &op_errno,
 2612                                   txn_generation);
 2613     if (ret) {
 2614         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
 2615                "Commit Op Failed");
 2616         /* If the main op fails, we should save the error string.
 2617            Because, op_errstr will be used for unbarrier and
 2618            unlock ops also. We might lose the actual error that
 2619            caused the failure.
 2620         */
 2621         cli_errstr = op_errstr;
 2622         op_errstr = NULL;
 2623         goto unbarrier;
 2624     }
 2625 
 2626     success = _gf_true;
 2627 unbarrier:
 2628     /* Set the operation type as post, so that differentiation can be
 2629      * made whether the brickop is sent during pre-commit or post-commit
 2630      */
 2631     ret = dict_set_dynstr_with_alloc(req_dict, "operation-type", "post");
 2632     if (ret) {
 2633         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2634                "Failed to set "
 2635                "operation-type in dictionary");
 2636         goto out;
 2637     }
 2638 
 2639     ret = glusterd_mgmt_v3_brick_op(op, dict, req_dict, &op_errstr,
 2640                                     txn_generation);
 2641 
 2642     if (ret) {
 2643         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
 2644                "Brick Ops Failed");
 2645         goto out;
 2646     }
 2647 
 2648     /*Do a quorum check if the commit phase is successful*/
 2649     if (success) {
 2650         // quorum check of the snapshot volume
 2651         ret = glusterd_snap_quorum_check(dict, _gf_true, &op_errstr, &op_errno);
 2652         if (ret) {
 2653             gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_QUORUM_CHECK_FAIL,
 2654                    "Snapshot Volume quorum check failed");
 2655             goto out;
 2656         }
 2657     }
 2658 
 2659     ret = 0;
 2660 
 2661 out:
 2662     op_ret = ret;
 2663 
 2664     if (success == _gf_false)
 2665         op_ret = -1;
 2666 
 2667     /* POST-COMMIT VALIDATE PHASE */
 2668     ret = glusterd_mgmt_v3_post_validate(op, op_ret, dict, req_dict, &op_errstr,
 2669                                          txn_generation);
 2670     if (ret) {
 2671         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
 2672                "Post Validation Failed");
 2673         op_ret = -1;
 2674     }
 2675 
 2676     /* UNLOCK PHASE FOR PEERS*/
 2677     (void)glusterd_mgmt_v3_release_peer_locks(op, dict, op_ret, &op_errstr,
 2678                                               is_acquired, txn_generation);
 2679 
 2680     /* If the commit op (snapshot taking) failed, then the error is stored
 2681        in cli_errstr and unbarrier is called. Suppose, if unbarrier also
 2682        fails, then the error happened in unbarrier is logged and freed.
 2683        The error happened in commit op, which is stored in cli_errstr
 2684        is sent to cli.
 2685     */
 2686     if (cli_errstr) {
 2687         GF_FREE(op_errstr);
 2688         op_errstr = NULL;
 2689         op_errstr = cli_errstr;
 2690     }
 2691 
 2692     /* LOCAL VOLUME(S) UNLOCK */
 2693     if (is_acquired) {
 2694         /* Trying to release multiple mgmt_v3 locks */
 2695         ret = glusterd_multiple_mgmt_v3_unlock(tmp_dict, MY_UUID);
 2696         if (ret) {
 2697             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
 2698                    "Failed to release mgmt_v3 locks on localhost");
 2699             op_ret = ret;
 2700         }
 2701     }
 2702 
 2703     if (op_ret && (op_errno == 0))
 2704         op_errno = EG_INTRNL;
 2705 
 2706     /* SEND CLI RESPONSE */
 2707     glusterd_op_send_cli_response(op, op_ret, op_errno, req, dict, op_errstr);
 2708 
 2709     if (req_dict)
 2710         dict_unref(req_dict);
 2711 
 2712     if (tmp_dict)
 2713         dict_unref(tmp_dict);
 2714 
 2715     if (op_errstr) {
 2716         GF_FREE(op_errstr);
 2717         op_errstr = NULL;
 2718     }
 2719 
 2720     return 0;
 2721 }