"Fossies" - the Fresh Open Source Software Archive

Member "glusterfs-8.2/xlators/mgmt/glusterd/src/glusterd-brick-ops.c" (16 Sep 2020, 85467 Bytes) of package /linux/misc/glusterfs-8.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "glusterd-brick-ops.c" see the Fossies "Dox" file reference documentation.

    1 /*
    2    Copyright (c) 2011-2012 Red Hat, Inc. <http://www.redhat.com>
    3    This file is part of GlusterFS.
    4 
    5    This file is licensed to you under your choice of the GNU Lesser
    6    General Public License, version 3 or any later version (LGPLv3 or
    7    later), or the GNU General Public License, version 2 (GPLv2), in all
    8    cases as published by the Free Software Foundation.
    9 */
   10 #include <glusterfs/common-utils.h>
   11 #include "cli1-xdr.h"
   12 #include "xdr-generic.h"
   13 #include "glusterd.h"
   14 #include "glusterd-op-sm.h"
   15 #include "glusterd-geo-rep.h"
   16 #include "glusterd-store.h"
   17 #include "glusterd-mgmt.h"
   18 #include "glusterd-utils.h"
   19 #include "glusterd-volgen.h"
   20 #include "glusterd-svc-helper.h"
   21 #include "glusterd-messages.h"
   22 #include "glusterd-server-quorum.h"
   23 #include <glusterfs/run.h>
   24 #include "glusterd-volgen.h"
   25 #include <glusterfs/syscall.h>
   26 #include <sys/signal.h>
   27 
   28 /* misc */
   29 
   30 /* In this function, we decide, based on the 'count' of the brick,
   31    where to add it in the current volume. 'count' tells us already
   32    how many of the given bricks are added. other argument are self-
   33    descriptive. */
   34 int
   35 add_brick_at_right_order(glusterd_brickinfo_t *brickinfo,
   36                          glusterd_volinfo_t *volinfo, int count,
   37                          int32_t stripe_cnt, int32_t replica_cnt)
   38 {
   39     int idx = 0;
   40     int i = 0;
   41     int sub_cnt = 0;
   42     glusterd_brickinfo_t *brick = NULL;
   43 
   44     /* The complexity of the function is in deciding at which index
   45        to add new brick. Even though it can be defined with a complex
   46        single formula for all volume, it is separated out to make it
   47        more readable */
   48     if (stripe_cnt) {
   49         /* common formula when 'stripe_count' is set */
   50         /* idx = ((count / ((stripe_cnt * volinfo->replica_count) -
   51            volinfo->dist_leaf_count)) * volinfo->dist_leaf_count) +
   52            (count + volinfo->dist_leaf_count);
   53         */
   54 
   55         sub_cnt = volinfo->dist_leaf_count;
   56 
   57         idx = ((count / ((stripe_cnt * volinfo->replica_count) - sub_cnt)) *
   58                sub_cnt) +
   59               (count + sub_cnt);
   60 
   61         goto insert_brick;
   62     }
   63 
   64     /* replica count is set */
   65     /* common formula when 'replica_count' is set */
   66     /* idx = ((count / (replica_cnt - existing_replica_count)) *
   67        existing_replica_count) +
   68        (count + existing_replica_count);
   69     */
   70 
   71     sub_cnt = volinfo->replica_count;
   72     idx = (count / (replica_cnt - sub_cnt) * sub_cnt) + (count + sub_cnt);
   73 
   74 insert_brick:
   75     i = 0;
   76     cds_list_for_each_entry(brick, &volinfo->bricks, brick_list)
   77     {
   78         i++;
   79         if (i < idx)
   80             continue;
   81         gf_msg_debug(THIS->name, 0, "brick:%s index=%d, count=%d", brick->path,
   82                      idx, count);
   83 
   84         cds_list_add(&brickinfo->brick_list, &brick->brick_list);
   85         break;
   86     }
   87 
   88     return 0;
   89 }
   90 
   91 static int
   92 gd_addbr_validate_replica_count(glusterd_volinfo_t *volinfo, int replica_count,
   93                                 int arbiter_count, int total_bricks, int *type,
   94                                 char *err_str, int err_len)
   95 {
   96     int ret = -1;
   97 
   98     /* replica count is set */
   99     switch (volinfo->type) {
  100         case GF_CLUSTER_TYPE_NONE:
  101             if ((volinfo->brick_count * replica_count) == total_bricks) {
  102                 /* Change the volume type */
  103                 *type = GF_CLUSTER_TYPE_REPLICATE;
  104                 gf_msg(THIS->name, GF_LOG_INFO, 0,
  105                        GD_MSG_VOL_TYPE_CHANGING_INFO,
  106                        "Changing the type of volume %s from "
  107                        "'distribute' to 'replica'",
  108                        volinfo->volname);
  109                 ret = 0;
  110                 goto out;
  111 
  112             } else {
  113                 snprintf(err_str, err_len,
  114                          "Incorrect number of "
  115                          "bricks (%d) supplied for replica count (%d).",
  116                          (total_bricks - volinfo->brick_count), replica_count);
  117                 gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
  118                        "%s", err_str);
  119                 goto out;
  120             }
  121             break;
  122         case GF_CLUSTER_TYPE_REPLICATE:
  123             if (replica_count < volinfo->replica_count) {
  124                 snprintf(err_str, err_len,
  125                          "Incorrect replica count (%d) supplied. "
  126                          "Volume already has (%d)",
  127                          replica_count, volinfo->replica_count);
  128                 gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
  129                        "%s", err_str);
  130                 goto out;
  131             }
  132             if (replica_count == volinfo->replica_count) {
  133                 if (arbiter_count && !volinfo->arbiter_count) {
  134                     snprintf(err_str, err_len,
  135                              "Cannot convert replica 3 volume "
  136                              "to arbiter volume.");
  137                     gf_msg(THIS->name, GF_LOG_ERROR, EINVAL,
  138                            GD_MSG_INVALID_ENTRY, "%s", err_str);
  139                     goto out;
  140                 }
  141                 if (!(total_bricks % volinfo->dist_leaf_count)) {
  142                     ret = 1;
  143                     goto out;
  144                 }
  145             }
  146             if (replica_count > volinfo->replica_count) {
  147                 /* We have to make sure before and after 'add-brick',
  148                    the number or subvolumes for distribute will remain
  149                    same, when replica count is given */
  150                 if ((total_bricks * volinfo->dist_leaf_count) ==
  151                     (volinfo->brick_count *
  152                      (replica_count * volinfo->stripe_count))) {
  153                     /* Change the dist_leaf_count */
  154                     gf_msg(THIS->name, GF_LOG_INFO, 0,
  155                            GD_MSG_REPLICA_COUNT_CHANGE_INFO,
  156                            "Changing the replica count of "
  157                            "volume %s from %d to %d",
  158                            volinfo->volname, volinfo->replica_count,
  159                            replica_count);
  160                     ret = 0;
  161                     goto out;
  162                 }
  163             }
  164             break;
  165         case GF_CLUSTER_TYPE_DISPERSE:
  166             snprintf(err_str, err_len,
  167                      "Volume %s cannot be converted "
  168                      "from dispersed to replicated-"
  169                      "dispersed",
  170                      volinfo->volname);
  171             gf_msg(THIS->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED,
  172                    "%s", err_str);
  173             goto out;
  174     }
  175 out:
  176     return ret;
  177 }
  178 
  179 static int
  180 gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
  181                                int32_t replica_count, int32_t brick_count,
  182                                char *err_str, size_t err_len)
  183 {
  184     int ret = -1;
  185     int replica_nodes = 0;
  186 
  187     switch (volinfo->type) {
  188         case GF_CLUSTER_TYPE_NONE:
  189         case GF_CLUSTER_TYPE_DISPERSE:
  190             snprintf(err_str, err_len,
  191                      "replica count (%d) option given for non replicate "
  192                      "volume %s",
  193                      replica_count, volinfo->volname);
  194             gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_REPLICA, "%s",
  195                    err_str);
  196             goto out;
  197 
  198         case GF_CLUSTER_TYPE_REPLICATE:
  199             /* in remove brick, you can only reduce the replica count */
  200             if (replica_count > volinfo->replica_count) {
  201                 snprintf(err_str, err_len,
  202                          "given replica count (%d) option is more "
  203                          "than volume %s's replica count (%d)",
  204                          replica_count, volinfo->volname,
  205                          volinfo->replica_count);
  206                 gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY,
  207                        "%s", err_str);
  208                 goto out;
  209             }
  210             if (replica_count == volinfo->replica_count) {
  211                 /* This means the 'replica N' option on CLI was
  212                    redundant. Check if the total number of bricks given
  213                    for removal is same as 'dist_leaf_count' */
  214                 if (brick_count % volinfo->dist_leaf_count) {
  215                     snprintf(err_str, err_len,
  216                              "number of bricks provided (%d) is "
  217                              "not valid. need at least %d "
  218                              "(or %dxN)",
  219                              brick_count, volinfo->dist_leaf_count,
  220                              volinfo->dist_leaf_count);
  221                     gf_msg(THIS->name, GF_LOG_WARNING, EINVAL,
  222                            GD_MSG_INVALID_ENTRY, "%s", err_str);
  223                     goto out;
  224                 }
  225                 ret = 1;
  226                 goto out;
  227             }
  228 
  229             replica_nodes = ((volinfo->brick_count / volinfo->replica_count) *
  230                              (volinfo->replica_count - replica_count));
  231 
  232             if (brick_count % replica_nodes) {
  233                 snprintf(err_str, err_len,
  234                          "need %d(xN) bricks for reducing replica "
  235                          "count of the volume from %d to %d",
  236                          replica_nodes, volinfo->replica_count, replica_count);
  237                 goto out;
  238             }
  239             break;
  240     }
  241 
  242     ret = 0;
  243 out:
  244     return ret;
  245 }
  246 
  247 /* Handler functions */
  248 int
  249 __glusterd_handle_add_brick(rpcsvc_request_t *req)
  250 {
  251     int32_t ret = -1;
  252     gf_cli_req cli_req = {{
  253         0,
  254     }};
  255     dict_t *dict = NULL;
  256     char *bricks = NULL;
  257     char *volname = NULL;
  258     int brick_count = 0;
  259     void *cli_rsp = NULL;
  260     char err_str[2048] = "";
  261     gf_cli_rsp rsp = {
  262         0,
  263     };
  264     glusterd_volinfo_t *volinfo = NULL;
  265     xlator_t *this = NULL;
  266     int total_bricks = 0;
  267     int32_t replica_count = 0;
  268     int32_t arbiter_count = 0;
  269     int32_t stripe_count = 0;
  270     int type = 0;
  271     glusterd_conf_t *conf = NULL;
  272 
  273     this = THIS;
  274     GF_ASSERT(this);
  275 
  276     GF_ASSERT(req);
  277 
  278     conf = this->private;
  279     GF_ASSERT(conf);
  280 
  281     ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
  282     if (ret < 0) {
  283         // failed to decode msg;
  284         req->rpc_err = GARBAGE_ARGS;
  285         snprintf(err_str, sizeof(err_str), "Garbage args received");
  286         goto out;
  287     }
  288 
  289     gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_ADD_BRICK_REQ_RECVD,
  290            "Received add brick req");
  291 
  292     if (cli_req.dict.dict_len) {
  293         /* Unserialize the dictionary */
  294         dict = dict_new();
  295 
  296         ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
  297                                &dict);
  298         if (ret < 0) {
  299             gf_msg(this->name, GF_LOG_ERROR, errno,
  300                    GD_MSG_DICT_UNSERIALIZE_FAIL,
  301                    "failed to "
  302                    "unserialize req-buffer to dictionary");
  303             snprintf(err_str, sizeof(err_str),
  304                      "Unable to decode "
  305                      "the command");
  306             goto out;
  307         }
  308     }
  309 
  310     ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
  311 
  312     if (ret) {
  313         snprintf(err_str, sizeof(err_str),
  314                  "Unable to get volume "
  315                  "name");
  316         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
  317                err_str);
  318         goto out;
  319     }
  320 
  321     ret = glusterd_volinfo_find(volname, &volinfo);
  322     if (ret) {
  323         snprintf(err_str, sizeof(err_str),
  324                  "Unable to get volinfo "
  325                  "for volume name %s",
  326                  volname);
  327         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
  328                err_str);
  329         goto out;
  330     }
  331 
  332     ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
  333     if (ret) {
  334         snprintf(err_str, sizeof(err_str),
  335                  "Unable to get volume "
  336                  "brick count");
  337         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
  338                err_str);
  339         goto out;
  340     }
  341 
  342     ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
  343                           &replica_count);
  344     if (!ret) {
  345         gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
  346                "replica-count is %d", replica_count);
  347     }
  348 
  349     ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
  350                           &arbiter_count);
  351     if (!ret) {
  352         gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
  353                "arbiter-count is %d", arbiter_count);
  354     }
  355 
  356     ret = dict_get_int32n(dict, "stripe-count", SLEN("stripe-count"),
  357                           &stripe_count);
  358     if (!ret) {
  359         gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
  360                "stripe-count is %d", stripe_count);
  361     }
  362 
  363     if (!dict_getn(dict, "force", SLEN("force"))) {
  364         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
  365                "Failed to get flag");
  366         goto out;
  367     }
  368 
  369     total_bricks = volinfo->brick_count + brick_count;
  370 
  371     if (!stripe_count && !replica_count) {
  372         if (volinfo->type == GF_CLUSTER_TYPE_NONE)
  373             goto brick_val;
  374 
  375         if ((volinfo->brick_count < volinfo->dist_leaf_count) &&
  376             (total_bricks <= volinfo->dist_leaf_count))
  377             goto brick_val;
  378 
  379         if ((brick_count % volinfo->dist_leaf_count) != 0) {
  380             snprintf(err_str, sizeof(err_str),
  381                      "Incorrect number "
  382                      "of bricks supplied %d with count %d",
  383                      brick_count, volinfo->dist_leaf_count);
  384             gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_REPLICA,
  385                    "%s", err_str);
  386             ret = -1;
  387             goto out;
  388         }
  389         goto brick_val;
  390         /* done with validation.. below section is if stripe|replica
  391            count is given */
  392     }
  393 
  394     ret = gd_addbr_validate_replica_count(volinfo, replica_count, arbiter_count,
  395                                           total_bricks, &type, err_str,
  396                                           sizeof(err_str));
  397     if (ret == -1) {
  398         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COUNT_VALIDATE_FAILED, "%s",
  399                err_str);
  400         goto out;
  401     }
  402 
  403     /* if replica count is same as earlier, set it back to 0 */
  404     if (ret == 1)
  405         replica_count = 0;
  406 
  407     ret = dict_set_int32n(dict, "replica-count", SLEN("replica-count"),
  408                           replica_count);
  409     if (ret) {
  410         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
  411                "failed to set the replica-count in dict");
  412         goto out;
  413     }
  414 
  415 brick_val:
  416     ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
  417     if (ret) {
  418         snprintf(err_str, sizeof(err_str),
  419                  "Unable to get volume "
  420                  "bricks");
  421         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
  422                err_str);
  423         goto out;
  424     }
  425 
  426     if (type != volinfo->type) {
  427         ret = dict_set_int32n(dict, "type", SLEN("type"), type);
  428         if (ret) {
  429             gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
  430                    "failed to set the new type in dict");
  431             goto out;
  432         }
  433     }
  434 
  435     if (conf->op_version <= GD_OP_VERSION_3_7_5) {
  436         gf_msg_debug(this->name, 0,
  437                      "The cluster is operating at "
  438                      "version less than or equal to %d. Falling back "
  439                      "to syncop framework.",
  440                      GD_OP_VERSION_3_7_5);
  441         ret = glusterd_op_begin_synctask(req, GD_OP_ADD_BRICK, dict);
  442     } else {
  443         ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_ADD_BRICK, dict);
  444     }
  445 
  446 out:
  447     if (ret) {
  448         rsp.op_ret = -1;
  449         rsp.op_errno = 0;
  450         if (err_str[0] == '\0')
  451             snprintf(err_str, sizeof(err_str), "Operation failed");
  452         rsp.op_errstr = err_str;
  453         cli_rsp = &rsp;
  454         glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
  455                         dict);
  456         ret = 0;  // sent error to cli, prevent second reply
  457     }
  458 
  459     free(cli_req.dict.dict_val);  // its malloced by xdr
  460 
  461     return ret;
  462 }
  463 
  464 int
  465 glusterd_handle_add_brick(rpcsvc_request_t *req)
  466 {
  467     return glusterd_big_locked_handler(req, __glusterd_handle_add_brick);
  468 }
  469 
  470 static int
  471 subvol_matcher_init(int **subvols, int count)
  472 {
  473     int ret = -1;
  474 
  475     *subvols = GF_CALLOC(count, sizeof(int), gf_gld_mt_int);
  476     if (*subvols)
  477         ret = 0;
  478 
  479     return ret;
  480 }
  481 
  482 static void
  483 subvol_matcher_update(int *subvols, glusterd_volinfo_t *volinfo,
  484                       glusterd_brickinfo_t *brickinfo)
  485 {
  486     glusterd_brickinfo_t *tmp = NULL;
  487     int32_t sub_volume = 0;
  488     int pos = 0;
  489     if (subvols) {
  490         cds_list_for_each_entry(tmp, &volinfo->bricks, brick_list)
  491         {
  492             if (strcmp(tmp->hostname, brickinfo->hostname) ||
  493                 strcmp(tmp->path, brickinfo->path)) {
  494                 pos++;
  495                 continue;
  496             }
  497             gf_msg_debug(THIS->name, 0, LOGSTR_FOUND_BRICK, brickinfo->hostname,
  498                          brickinfo->path, volinfo->volname);
  499             sub_volume = (pos / volinfo->dist_leaf_count);
  500             subvols[sub_volume]++;
  501             break;
  502         }
  503     }
  504 }
  505 
  506 static int
  507 subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
  508                       size_t err_len, char *vol_type, int replica_count)
  509 {
  510     int i = 0;
  511     int ret = 0;
  512     int count = volinfo->replica_count - replica_count;
  513 
  514     if (replica_count && subvols) {
  515         for (i = 0; i < volinfo->subvol_count; i++) {
  516             if (subvols[i] != count) {
  517                 ret = -1;
  518                 snprintf(err_str, err_len,
  519                          "Remove exactly %d"
  520                          " brick(s) from each subvolume.",
  521                          count);
  522                 break;
  523             }
  524         }
  525         return ret;
  526     }
  527 
  528     do {
  529         if (subvols && (subvols[i] % volinfo->dist_leaf_count == 0)) {
  530             continue;
  531         } else {
  532             ret = -1;
  533             snprintf(err_str, err_len, "Bricks not from same subvol for %s",
  534                      vol_type);
  535             break;
  536         }
  537     } while (++i < volinfo->subvol_count);
  538 
  539     return ret;
  540 }
  541 
  542 static void
  543 subvol_matcher_destroy(int *subvols)
  544 {
  545     GF_FREE(subvols);
  546 }
  547 
  548 static int
  549 glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
  550                                         int32_t count, int32_t replica_count,
  551                                         glusterd_brickinfo_t **brickinfo_list,
  552                                         char *err_str, size_t err_len)
  553 {
  554     int i = 0;
  555     int ret = 0;
  556     glusterd_brickinfo_t *brickinfo = NULL;
  557     glusterd_brickinfo_t *last = NULL;
  558     char *arbiter_array = NULL;
  559 
  560     if (volinfo->type != GF_CLUSTER_TYPE_REPLICATE)
  561         goto out;
  562 
  563     if (!replica_count || !volinfo->arbiter_count)
  564         goto out;
  565 
  566     if (replica_count == 2) {
  567         /* If it is an arbiter to replica 2 conversion, only permit
  568          *  removal of the arbiter brick.*/
  569         for (i = 0; i < count; i++) {
  570             brickinfo = brickinfo_list[i];
  571             last = get_last_brick_of_brick_group(volinfo, brickinfo);
  572             if (last != brickinfo) {
  573                 snprintf(err_str, err_len,
  574                          "Remove arbiter "
  575                          "brick(s) only when converting from "
  576                          "arbiter to replica 2 subvolume.");
  577                 ret = -1;
  578                 goto out;
  579             }
  580         }
  581     } else if (replica_count == 1) {
  582         /* If it is an arbiter to plain distribute conversion, in every
  583          * replica subvol, the arbiter has to be one of the bricks that
  584          * are removed. */
  585         arbiter_array = GF_CALLOC(volinfo->subvol_count, sizeof(*arbiter_array),
  586                                   gf_common_mt_char);
  587         if (!arbiter_array)
  588             return -1;
  589         for (i = 0; i < count; i++) {
  590             brickinfo = brickinfo_list[i];
  591             last = get_last_brick_of_brick_group(volinfo, brickinfo);
  592             if (last == brickinfo)
  593                 arbiter_array[brickinfo->group] = 1;
  594         }
  595         for (i = 0; i < volinfo->subvol_count; i++)
  596             if (!arbiter_array[i]) {
  597                 snprintf(err_str, err_len,
  598                          "Removed bricks "
  599                          "must contain arbiter when converting"
  600                          " to plain distribute.");
  601                 ret = -1;
  602                 break;
  603             }
  604         GF_FREE(arbiter_array);
  605     }
  606 
  607 out:
  608     return ret;
  609 }
  610 
  611 int
  612 __glusterd_handle_remove_brick(rpcsvc_request_t *req)
  613 {
  614     int32_t ret = -1;
  615     gf_cli_req cli_req = {{
  616         0,
  617     }};
  618     dict_t *dict = NULL;
  619     int32_t count = 0;
  620     char *brick = NULL;
  621     char key[64] = "";
  622     int keylen;
  623     int i = 1;
  624     glusterd_volinfo_t *volinfo = NULL;
  625     glusterd_brickinfo_t *brickinfo = NULL;
  626     glusterd_brickinfo_t **brickinfo_list = NULL;
  627     int *subvols = NULL;
  628     char err_str[2048] = "";
  629     gf_cli_rsp rsp = {
  630         0,
  631     };
  632     void *cli_rsp = NULL;
  633     char vol_type[256] = "";
  634     int32_t replica_count = 0;
  635     char *volname = 0;
  636     xlator_t *this = NULL;
  637     int cmd = -1;
  638 
  639     GF_ASSERT(req);
  640     this = THIS;
  641     GF_ASSERT(this);
  642 
  643     ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
  644     if (ret < 0) {
  645         // failed to decode msg;
  646         req->rpc_err = GARBAGE_ARGS;
  647         snprintf(err_str, sizeof(err_str), "Received garbage args");
  648         goto out;
  649     }
  650 
  651     gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_REM_BRICK_REQ_RECVD,
  652            "Received rem brick req");
  653 
  654     if (cli_req.dict.dict_len) {
  655         /* Unserialize the dictionary */
  656         dict = dict_new();
  657 
  658         ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
  659                                &dict);
  660         if (ret < 0) {
  661             gf_msg(this->name, GF_LOG_ERROR, errno,
  662                    GD_MSG_DICT_UNSERIALIZE_FAIL,
  663                    "failed to "
  664                    "unserialize req-buffer to dictionary");
  665             snprintf(err_str, sizeof(err_str),
  666                      "Unable to decode "
  667                      "the command");
  668             goto out;
  669         }
  670     }
  671 
  672     ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
  673     if (ret) {
  674         snprintf(err_str, sizeof(err_str),
  675                  "Unable to get volume "
  676                  "name");
  677         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
  678                err_str);
  679         goto out;
  680     }
  681 
  682     ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
  683     if (ret) {
  684         snprintf(err_str, sizeof(err_str),
  685                  "Unable to get brick "
  686                  "count");
  687         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
  688                err_str);
  689         goto out;
  690     }
  691 
  692     ret = glusterd_volinfo_find(volname, &volinfo);
  693     if (ret) {
  694         snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname);
  695         gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
  696                err_str);
  697         goto out;
  698     }
  699 
  700     ret = dict_get_int32n(dict, "command", SLEN("command"), &cmd);
  701     if (ret) {
  702         snprintf(err_str, sizeof(err_str),
  703                  "Unable to get cmd "
  704                  "ccommand");
  705         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
  706                err_str);
  707         goto out;
  708     }
  709 
  710     ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
  711                           &replica_count);
  712     if (!ret) {
  713         gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
  714                "request to change replica-count to %d", replica_count);
  715         ret = gd_rmbr_validate_replica_count(volinfo, replica_count, count,
  716                                              err_str, sizeof(err_str));
  717         if (ret < 0) {
  718             /* logging and error msg are done in above function
  719                itself */
  720             goto out;
  721         }
  722         dict_deln(dict, "replica-count", SLEN("replica-count"));
  723         if (ret) {
  724             replica_count = 0;
  725         } else {
  726             ret = dict_set_int32n(dict, "replica-count", SLEN("replica-count"),
  727                                   replica_count);
  728             if (ret) {
  729                 gf_msg(this->name, GF_LOG_WARNING, errno,
  730                        GD_MSG_DICT_SET_FAILED,
  731                        "failed to set the replica_count "
  732                        "in dict");
  733                 goto out;
  734             }
  735         }
  736     }
  737 
  738     /* 'vol_type' is used for giving the meaning full error msg for user */
  739     if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
  740         strcpy(vol_type, "replica");
  741     } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
  742         strcpy(vol_type, "disperse");
  743     } else {
  744         strcpy(vol_type, "distribute");
  745     }
  746 
  747     if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
  748         (volinfo->brick_count == volinfo->dist_leaf_count)) {
  749         snprintf(err_str, sizeof(err_str),
  750                  "Removing bricks from replicate configuration "
  751                  "is not allowed without reducing replica count "
  752                  "explicitly.");
  753         gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED_AC_REQD,
  754                "%s", err_str);
  755         ret = -1;
  756         goto out;
  757     }
  758 
  759     /* Do not allow remove-brick if the bricks given is less than
  760        the replica count or stripe count */
  761     if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE)) {
  762         if (volinfo->dist_leaf_count && (count % volinfo->dist_leaf_count)) {
  763             snprintf(err_str, sizeof(err_str),
  764                      "Remove brick "
  765                      "incorrect brick count of %d for %s %d",
  766                      count, vol_type, volinfo->dist_leaf_count);
  767             gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
  768                    err_str);
  769             ret = -1;
  770             goto out;
  771         }
  772     }
  773 
  774     if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
  775         (volinfo->subvol_count > 1)) {
  776         ret = subvol_matcher_init(&subvols, volinfo->subvol_count);
  777         if (ret)
  778             goto out;
  779     }
  780 
  781     brickinfo_list = GF_CALLOC(count, sizeof(*brickinfo_list),
  782                                gf_common_mt_pointer);
  783     if (!brickinfo_list) {
  784         ret = -1;
  785         goto out;
  786     }
  787 
  788     while (i <= count) {
  789         keylen = snprintf(key, sizeof(key), "brick%d", i);
  790         ret = dict_get_strn(dict, key, keylen, &brick);
  791         if (ret) {
  792             snprintf(err_str, sizeof(err_str), "Unable to get %s", key);
  793             gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
  794                    "%s", err_str);
  795             goto out;
  796         }
  797         gf_msg_debug(this->name, 0,
  798                      "Remove brick count %d brick:"
  799                      " %s",
  800                      i, brick);
  801 
  802         ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
  803                                                      _gf_false);
  804 
  805         if (ret) {
  806             snprintf(err_str, sizeof(err_str),
  807                      "Incorrect brick "
  808                      "%s for volume %s",
  809                      brick, volname);
  810             gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_BRICK_NOT_FOUND,
  811                    "%s", err_str);
  812             goto out;
  813         }
  814         brickinfo_list[i - 1] = brickinfo;
  815 
  816         i++;
  817         if ((volinfo->type == GF_CLUSTER_TYPE_NONE) ||
  818             (volinfo->brick_count <= volinfo->dist_leaf_count))
  819             continue;
  820 
  821         subvol_matcher_update(subvols, volinfo, brickinfo);
  822     }
  823 
  824     if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
  825         (volinfo->subvol_count > 1)) {
  826         ret = subvol_matcher_verify(subvols, volinfo, err_str, sizeof(err_str),
  827                                     vol_type, replica_count);
  828         if (ret)
  829             goto out;
  830     }
  831 
  832     ret = glusterd_remove_brick_validate_arbiters(volinfo, count, replica_count,
  833                                                   brickinfo_list, err_str,
  834                                                   sizeof(err_str));
  835     if (ret)
  836         goto out;
  837 
  838     ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict);
  839 
  840 out:
  841     if (ret) {
  842         rsp.op_ret = -1;
  843         rsp.op_errno = 0;
  844         if (err_str[0] == '\0')
  845             snprintf(err_str, sizeof(err_str), "Operation failed");
  846         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_OP_FAILED, "%s",
  847                err_str);
  848         rsp.op_errstr = err_str;
  849         cli_rsp = &rsp;
  850         glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
  851                         dict);
  852 
  853         ret = 0;  // sent error to cli, prevent second reply
  854     }
  855 
  856     if (brickinfo_list)
  857         GF_FREE(brickinfo_list);
  858     subvol_matcher_destroy(subvols);
  859     free(cli_req.dict.dict_val);  // its malloced by xdr
  860 
  861     return ret;
  862 }
  863 
  864 int
  865 glusterd_handle_remove_brick(rpcsvc_request_t *req)
  866 {
  867     return glusterd_big_locked_handler(req, __glusterd_handle_remove_brick);
  868 }
  869 
  870 static int
  871 _glusterd_restart_gsync_session(dict_t *this, char *key, data_t *value,
  872                                 void *data)
  873 {
  874     char *slave = NULL;
  875     char *slave_buf = NULL;
  876     char *path_list = NULL;
  877     char *slave_vol = NULL;
  878     char *slave_host = NULL;
  879     char *slave_url = NULL;
  880     char *conf_path = NULL;
  881     char **errmsg = NULL;
  882     int ret = -1;
  883     glusterd_gsync_status_temp_t *param = NULL;
  884     gf_boolean_t is_running = _gf_false;
  885 
  886     param = (glusterd_gsync_status_temp_t *)data;
  887 
  888     GF_ASSERT(param);
  889     GF_ASSERT(param->volinfo);
  890 
  891     slave = strchr(value->data, ':');
  892     if (slave) {
  893         slave++;
  894         slave_buf = gf_strdup(slave);
  895         if (!slave_buf) {
  896             gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
  897                    "Failed to gf_strdup");
  898             ret = -1;
  899             goto out;
  900         }
  901     } else
  902         return 0;
  903 
  904     ret = dict_set_dynstrn(param->rsp_dict, "slave", SLEN("slave"), slave_buf);
  905     if (ret) {
  906         gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
  907                "Unable to store slave");
  908         if (slave_buf)
  909             GF_FREE(slave_buf);
  910         goto out;
  911     }
  912 
  913     ret = glusterd_get_slave_details_confpath(param->volinfo, param->rsp_dict,
  914                                               &slave_url, &slave_host,
  915                                               &slave_vol, &conf_path, errmsg);
  916     if (ret) {
  917         if (errmsg && *errmsg)
  918             gf_msg("glusterd", GF_LOG_ERROR, 0,
  919                    GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL, "%s", *errmsg);
  920         else
  921             gf_msg("glusterd", GF_LOG_ERROR, 0,
  922                    GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL,
  923                    "Unable to fetch slave or confpath details.");
  924         goto out;
  925     }
  926 
  927     /* In cases that gsyncd is not running, we will not invoke it
  928      * because of add-brick. */
  929     ret = glusterd_check_gsync_running_local(param->volinfo->volname, slave,
  930                                              conf_path, &is_running);
  931     if (ret) {
  932         gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_GSYNC_VALIDATION_FAIL,
  933                "gsync running validation failed.");
  934         goto out;
  935     }
  936     if (_gf_false == is_running) {
  937         gf_msg_debug("glusterd", 0,
  938                      "gsync session for %s and %s is"
  939                      " not running on this node. Hence not restarting.",
  940                      param->volinfo->volname, slave);
  941         ret = 0;
  942         goto out;
  943     }
  944 
  945     ret = glusterd_get_local_brickpaths(param->volinfo, &path_list);
  946     if (!path_list) {
  947         gf_msg_debug("glusterd", 0,
  948                      "This node not being part of"
  949                      " volume should not be running gsyncd. Hence"
  950                      " no gsyncd process to restart.");
  951         ret = 0;
  952         goto out;
  953     }
  954 
  955     ret = glusterd_check_restart_gsync_session(
  956         param->volinfo, slave, param->rsp_dict, path_list, conf_path, 0);
  957     if (ret)
  958         gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_GSYNC_RESTART_FAIL,
  959                "Unable to restart gsync session.");
  960 
  961 out:
  962     gf_msg_debug("glusterd", 0, "Returning %d.", ret);
  963     return ret;
  964 }
  965 
  966 /* op-sm */
  967 
  968 int
  969 glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
  970                                char *bricks, dict_t *dict)
  971 {
  972     char *brick = NULL;
  973     int32_t i = 1;
  974     char *brick_list = NULL;
  975     char *free_ptr1 = NULL;
  976     char *free_ptr2 = NULL;
  977     char *saveptr = NULL;
  978     int32_t ret = -1;
  979     int32_t stripe_count = 0;
  980     int32_t replica_count = 0;
  981     int32_t arbiter_count = 0;
  982     int32_t type = 0;
  983     glusterd_brickinfo_t *brickinfo = NULL;
  984     glusterd_gsync_status_temp_t param = {
  985         0,
  986     };
  987     gf_boolean_t restart_needed = 0;
  988     int brickid = 0;
  989     char key[64] = "";
  990     char *brick_mount_dir = NULL;
  991     xlator_t *this = NULL;
  992     glusterd_conf_t *conf = NULL;
  993     gf_boolean_t is_valid_add_brick = _gf_false;
  994     struct statvfs brickstat = {
  995         0,
  996     };
  997 
  998     this = THIS;
  999     GF_ASSERT(this);
 1000     GF_ASSERT(volinfo);
 1001 
 1002     conf = this->private;
 1003     GF_ASSERT(conf);
 1004 
 1005     if (bricks) {
 1006         brick_list = gf_strdup(bricks);
 1007         free_ptr1 = brick_list;
 1008     }
 1009 
 1010     if (count)
 1011         brick = strtok_r(brick_list + 1, " \n", &saveptr);
 1012 
 1013     if (dict) {
 1014         ret = dict_get_int32n(dict, "stripe-count", SLEN("stripe-count"),
 1015                               &stripe_count);
 1016         if (!ret)
 1017             gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
 1018                    "stripe-count is set %d", stripe_count);
 1019 
 1020         ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
 1021                               &replica_count);
 1022         if (!ret)
 1023             gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
 1024                    "replica-count is set %d", replica_count);
 1025         ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
 1026                               &arbiter_count);
 1027         if (!ret)
 1028             gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
 1029                    "arbiter-count is set %d", arbiter_count);
 1030         ret = dict_get_int32n(dict, "type", SLEN("type"), &type);
 1031         if (!ret)
 1032             gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
 1033                    "type is set %d, need to change it", type);
 1034     }
 1035 
 1036     brickid = glusterd_get_next_available_brickid(volinfo);
 1037     if (brickid < 0)
 1038         goto out;
 1039     while (i <= count) {
 1040         ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true,
 1041                                                 NULL);
 1042         if (ret)
 1043             goto out;
 1044 
 1045         GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO(brickinfo, volinfo, brickid++);
 1046 
 1047         /* A bricks mount dir is required only by snapshots which were
 1048          * introduced in gluster-3.6.0
 1049          */
 1050         if (conf->op_version >= GD_OP_VERSION_3_6_0) {
 1051             brick_mount_dir = NULL;
 1052 
 1053             snprintf(key, sizeof(key), "brick%d.mount_dir", i);
 1054             ret = dict_get_str(dict, key, &brick_mount_dir);
 1055             if (ret) {
 1056                 gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 1057                        "%s not present", key);
 1058                 goto out;
 1059             }
 1060             strncpy(brickinfo->mount_dir, brick_mount_dir,
 1061                     SLEN(brickinfo->mount_dir));
 1062         }
 1063 
 1064         ret = glusterd_resolve_brick(brickinfo);
 1065         if (ret)
 1066             goto out;
 1067 
 1068         if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
 1069             ret = sys_statvfs(brickinfo->path, &brickstat);
 1070             if (ret) {
 1071                 gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_STATVFS_FAILED,
 1072                        "Failed to fetch disk utilization "
 1073                        "from the brick (%s:%s). Please check the health of "
 1074                        "the brick. Error code was %s",
 1075                        brickinfo->hostname, brickinfo->path, strerror(errno));
 1076 
 1077                 goto out;
 1078             }
 1079             brickinfo->statfs_fsid = brickstat.f_fsid;
 1080         }
 1081         if (stripe_count || replica_count) {
 1082             add_brick_at_right_order(brickinfo, volinfo, (i - 1), stripe_count,
 1083                                      replica_count);
 1084         } else {
 1085             cds_list_add_tail(&brickinfo->brick_list, &volinfo->bricks);
 1086         }
 1087         brick = strtok_r(NULL, " \n", &saveptr);
 1088         i++;
 1089         volinfo->brick_count++;
 1090     }
 1091 
 1092     /* Gets changed only if the options are given in add-brick cli */
 1093     if (type)
 1094         volinfo->type = type;
 1095     /* performance.client-io-threads is turned on by default,
 1096      * however this has adverse effects on replicate volumes due to
 1097      * replication design issues, till that get addressed
 1098      * performance.client-io-threads option is turned off for all
 1099      * replicate volumes if not already explicitly enabled.
 1100      */
 1101     if (type && glusterd_is_volume_replicate(volinfo) &&
 1102         conf->op_version >= GD_OP_VERSION_3_12_2) {
 1103         ret = dict_set_nstrn(volinfo->dict, "performance.client-io-threads",
 1104                              SLEN("performance.client-io-threads"), "off",
 1105                              SLEN("off"));
 1106         if (ret) {
 1107             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 1108                    "Failed to set "
 1109                    "performance.client-io-threads to off");
 1110             goto out;
 1111         }
 1112     }
 1113 
 1114     if (replica_count) {
 1115         volinfo->replica_count = replica_count;
 1116     }
 1117     if (arbiter_count) {
 1118         volinfo->arbiter_count = arbiter_count;
 1119     }
 1120     if (stripe_count) {
 1121         volinfo->stripe_count = stripe_count;
 1122     }
 1123     volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo);
 1124 
 1125     /* backward compatibility */
 1126     volinfo->sub_count = ((volinfo->dist_leaf_count == 1)
 1127                               ? 0
 1128                               : volinfo->dist_leaf_count);
 1129 
 1130     volinfo->subvol_count = (volinfo->brick_count / volinfo->dist_leaf_count);
 1131 
 1132     ret = 0;
 1133     if (GLUSTERD_STATUS_STARTED != volinfo->status)
 1134         goto generate_volfiles;
 1135 
 1136     ret = generate_brick_volfiles(volinfo);
 1137     if (ret)
 1138         goto out;
 1139 
 1140     brick_list = gf_strdup(bricks);
 1141     free_ptr2 = brick_list;
 1142     i = 1;
 1143 
 1144     if (count)
 1145         brick = strtok_r(brick_list + 1, " \n", &saveptr);
 1146 
 1147     if (glusterd_is_volume_replicate(volinfo)) {
 1148         if (replica_count && conf->op_version >= GD_OP_VERSION_3_7_10) {
 1149             is_valid_add_brick = _gf_true;
 1150             ret = generate_dummy_client_volfiles(volinfo);
 1151             if (ret) {
 1152                 gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
 1153                        "Failed to create volfile.");
 1154                 goto out;
 1155             }
 1156         }
 1157     }
 1158 
 1159     while (i <= count) {
 1160         ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
 1161                                                      _gf_true);
 1162         if (ret)
 1163             goto out;
 1164 
 1165         if (gf_uuid_is_null(brickinfo->uuid)) {
 1166             ret = glusterd_resolve_brick(brickinfo);
 1167             if (ret) {
 1168                 gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_RESOLVE_BRICK_FAIL,
 1169                        FMTSTR_RESOLVE_BRICK, brickinfo->hostname,
 1170                        brickinfo->path);
 1171                 goto out;
 1172             }
 1173         }
 1174 
 1175         /* if the volume is a replicate volume, do: */
 1176         if (is_valid_add_brick) {
 1177             if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
 1178                 ret = glusterd_handle_replicate_brick_ops(volinfo, brickinfo,
 1179                                                           GD_OP_ADD_BRICK);
 1180                 if (ret < 0)
 1181                     goto out;
 1182             }
 1183         }
 1184         ret = glusterd_brick_start(volinfo, brickinfo, _gf_true, _gf_false);
 1185         if (ret)
 1186             goto out;
 1187         i++;
 1188         brick = strtok_r(NULL, " \n", &saveptr);
 1189 
 1190         /* Check if the brick is added in this node, and set
 1191          * the restart_needed flag. */
 1192         if ((!gf_uuid_compare(brickinfo->uuid, MY_UUID)) && !restart_needed) {
 1193             restart_needed = 1;
 1194             gf_msg_debug("glusterd", 0,
 1195                          "Restart gsyncd session, if it's already "
 1196                          "running.");
 1197         }
 1198     }
 1199 
 1200     /* If the restart_needed flag is set, restart gsyncd sessions for that
 1201      * particular master with all the slaves. */
 1202     if (restart_needed) {
 1203         param.rsp_dict = dict;
 1204         param.volinfo = volinfo;
 1205         dict_foreach(volinfo->gsync_slaves, _glusterd_restart_gsync_session,
 1206                      &param);
 1207     }
 1208 
 1209 generate_volfiles:
 1210     if (conf->op_version <= GD_OP_VERSION_3_7_5) {
 1211         ret = glusterd_create_volfiles_and_notify_services(volinfo);
 1212     } else {
 1213         /*
 1214          * The cluster is operating at version greater than
 1215          * gluster-3.7.5. So no need to sent volfile fetch
 1216          * request in commit phase, the same will be done
 1217          * in post validate phase with v3 framework.
 1218          */
 1219     }
 1220 
 1221 out:
 1222     GF_FREE(free_ptr1);
 1223     GF_FREE(free_ptr2);
 1224 
 1225     gf_msg_debug("glusterd", 0, "Returning %d", ret);
 1226     return ret;
 1227 }
 1228 
 1229 int
 1230 glusterd_op_perform_remove_brick(glusterd_volinfo_t *volinfo, char *brick,
 1231                                  int force, int *need_migrate)
 1232 {
 1233     glusterd_brickinfo_t *brickinfo = NULL;
 1234     int32_t ret = -1;
 1235     glusterd_conf_t *priv = NULL;
 1236 
 1237     GF_ASSERT(volinfo);
 1238     GF_ASSERT(brick);
 1239 
 1240     priv = THIS->private;
 1241     GF_ASSERT(priv);
 1242 
 1243     ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
 1244                                                  _gf_false);
 1245     if (ret)
 1246         goto out;
 1247 
 1248     ret = glusterd_resolve_brick(brickinfo);
 1249     if (ret)
 1250         goto out;
 1251 
 1252     glusterd_volinfo_reset_defrag_stats(volinfo);
 1253 
 1254     if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
 1255         /* Only if the brick is in this glusterd, do the rebalance */
 1256         if (need_migrate)
 1257             *need_migrate = 1;
 1258     }
 1259 
 1260     if (force) {
 1261         ret = glusterd_brick_stop(volinfo, brickinfo, _gf_true);
 1262         if (ret) {
 1263             gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_STOP_FAIL,
 1264                    "Unable to stop "
 1265                    "glusterfs, ret: %d",
 1266                    ret);
 1267         }
 1268         goto out;
 1269     }
 1270 
 1271     brickinfo->decommissioned = 1;
 1272     ret = 0;
 1273 out:
 1274     gf_msg_debug("glusterd", 0, "Returning %d", ret);
 1275     return ret;
 1276 }
 1277 
 1278 int
 1279 glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
 1280 {
 1281     int ret = 0;
 1282     char *volname = NULL;
 1283     int count = 0;
 1284     int replica_count = 0;
 1285     int arbiter_count = 0;
 1286     int i = 0;
 1287     int32_t local_brick_count = 0;
 1288     char *bricks = NULL;
 1289     char *brick_list = NULL;
 1290     char *saveptr = NULL;
 1291     char *free_ptr = NULL;
 1292     char *brick = NULL;
 1293     glusterd_brickinfo_t *brickinfo = NULL;
 1294     glusterd_volinfo_t *volinfo = NULL;
 1295     xlator_t *this = NULL;
 1296     char msg[4096] = "";
 1297     char key[64] = "";
 1298     gf_boolean_t brick_alloc = _gf_false;
 1299     char *all_bricks = NULL;
 1300     char *str_ret = NULL;
 1301     gf_boolean_t is_force = _gf_false;
 1302     glusterd_conf_t *conf = NULL;
 1303     int32_t len = 0;
 1304 
 1305     this = THIS;
 1306     GF_ASSERT(this);
 1307     conf = this->private;
 1308     GF_ASSERT(conf);
 1309 
 1310     ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
 1311     if (ret) {
 1312         gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 1313                "Unable to get volume name");
 1314         goto out;
 1315     }
 1316 
 1317     ret = glusterd_volinfo_find(volname, &volinfo);
 1318     if (ret) {
 1319         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
 1320                "Unable to find volume: %s", volname);
 1321         goto out;
 1322     }
 1323 
 1324     ret = glusterd_validate_volume_id(dict, volinfo);
 1325     if (ret)
 1326         goto out;
 1327 
 1328     ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
 1329                           &replica_count);
 1330     if (ret) {
 1331         gf_msg_debug(THIS->name, 0, "Unable to get replica count");
 1332     }
 1333 
 1334     ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
 1335                           &arbiter_count);
 1336     if (ret) {
 1337         gf_msg_debug(THIS->name, 0, "No arbiter count present in the dict");
 1338     }
 1339 
 1340     if (replica_count > 0) {
 1341         ret = op_version_check(this, GD_OP_VER_PERSISTENT_AFR_XATTRS, msg,
 1342                                sizeof(msg));
 1343         if (ret) {
 1344             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
 1345                    "%s", msg);
 1346             *op_errstr = gf_strdup(msg);
 1347             goto out;
 1348         }
 1349     }
 1350 
 1351     if (glusterd_is_volume_replicate(volinfo)) {
 1352         /* Do not allow add-brick for stopped volumes when replica-count
 1353          * is being increased.
 1354          */
 1355         if (conf->op_version >= GD_OP_VERSION_3_7_10 && replica_count &&
 1356             GLUSTERD_STATUS_STOPPED == volinfo->status) {
 1357             ret = -1;
 1358             snprintf(msg, sizeof(msg),
 1359                      " Volume must not be in"
 1360                      " stopped state when replica-count needs to "
 1361                      " be increased.");
 1362             gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
 1363                    msg);
 1364             *op_errstr = gf_strdup(msg);
 1365             goto out;
 1366         }
 1367         /* op-version check for replica 2 to arbiter conversion. If we
 1368          * don't have this check, an older peer added as arbiter brick
 1369          * will not have the  arbiter xlator in its volfile. */
 1370         if ((conf->op_version < GD_OP_VERSION_3_8_0) && (arbiter_count == 1) &&
 1371             (replica_count == 3)) {
 1372             ret = -1;
 1373             snprintf(msg, sizeof(msg),
 1374                      "Cluster op-version must "
 1375                      "be >= 30800 to add arbiter brick to a "
 1376                      "replica 2 volume.");
 1377             gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
 1378                    msg);
 1379             *op_errstr = gf_strdup(msg);
 1380             goto out;
 1381         }
 1382         /* Do not allow increasing replica count for arbiter volumes. */
 1383         if (replica_count && volinfo->arbiter_count) {
 1384             ret = -1;
 1385             snprintf(msg, sizeof(msg),
 1386                      "Increasing replica count "
 1387                      "for arbiter volumes is not supported.");
 1388             gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
 1389                    msg);
 1390             *op_errstr = gf_strdup(msg);
 1391             goto out;
 1392         }
 1393     }
 1394 
 1395     is_force = dict_get_str_boolean(dict, "force", _gf_false);
 1396 
 1397     if (volinfo->replica_count < replica_count && !is_force) {
 1398         cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
 1399         {
 1400             if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
 1401                 continue;
 1402             if (brickinfo->status == GF_BRICK_STOPPED) {
 1403                 ret = -1;
 1404                 len = snprintf(msg, sizeof(msg),
 1405                                "Brick %s "
 1406                                "is down, changing replica "
 1407                                "count needs all the bricks "
 1408                                "to be up to avoid data loss",
 1409                                brickinfo->path);
 1410                 if (len < 0) {
 1411                     strcpy(msg, "<error>");
 1412                 }
 1413                 gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
 1414                        msg);
 1415                 *op_errstr = gf_strdup(msg);
 1416                 goto out;
 1417             }
 1418         }
 1419     }
 1420 
 1421     if (conf->op_version > GD_OP_VERSION_3_7_5 && is_origin_glusterd(dict)) {
 1422         ret = glusterd_validate_quorum(this, GD_OP_ADD_BRICK, dict, op_errstr);
 1423         if (ret) {
 1424             gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
 1425                    "Server quorum not met. Rejecting operation.");
 1426             goto out;
 1427         }
 1428     } else {
 1429         /* Case 1: conf->op_version <= GD_OP_VERSION_3_7_5
 1430          *         in this case the add-brick is running
 1431          *         syncop framework that will do a quorum
 1432          *         check by default
 1433          * Case 2: We don't need to do quorum check on every
 1434          *         node, only originator glusterd need to
 1435          *         check for quorum
 1436          * So nothing need to be done in else
 1437          */
 1438     }
 1439 
 1440     if (glusterd_is_defrag_on(volinfo)) {
 1441         snprintf(msg, sizeof(msg),
 1442                  "Volume name %s rebalance is in "
 1443                  "progress. Please retry after completion",
 1444                  volname);
 1445         gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg);
 1446         *op_errstr = gf_strdup(msg);
 1447         ret = -1;
 1448         goto out;
 1449     }
 1450 
 1451     if (volinfo->snap_count > 0 || !cds_list_empty(&volinfo->snap_volumes)) {
 1452         snprintf(msg, sizeof(msg),
 1453                  "Volume %s  has %" PRIu64
 1454                  " snapshots. "
 1455                  "Changing the volume configuration will not effect snapshots."
 1456                  "But the snapshot brick mount should be intact to "
 1457                  "make them function.",
 1458                  volname, volinfo->snap_count);
 1459         gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s", msg);
 1460         msg[0] = '\0';
 1461     }
 1462 
 1463     ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
 1464     if (ret) {
 1465         gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 1466                "Unable to get count");
 1467         goto out;
 1468     }
 1469 
 1470     ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
 1471     if (ret) {
 1472         gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 1473                "Unable to get bricks");
 1474         goto out;
 1475     }
 1476 
 1477     if (bricks) {
 1478         brick_list = gf_strdup(bricks);
 1479         all_bricks = gf_strdup(bricks);
 1480         free_ptr = brick_list;
 1481     }
 1482 
 1483     if (count)
 1484         brick = strtok_r(brick_list + 1, " \n", &saveptr);
 1485 
 1486     while (i < count) {
 1487         if (!glusterd_store_is_valid_brickpath(volname, brick) ||
 1488             !glusterd_is_valid_volfpath(volname, brick)) {
 1489             snprintf(msg, sizeof(msg),
 1490                      "brick path %s is "
 1491                      "too long",
 1492                      brick);
 1493             gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s",
 1494                    msg);
 1495             *op_errstr = gf_strdup(msg);
 1496 
 1497             ret = -1;
 1498             goto out;
 1499         }
 1500 
 1501         ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true,
 1502                                                 NULL);
 1503         if (ret) {
 1504             gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
 1505                    "Add-brick: Unable"
 1506                    " to get brickinfo");
 1507             goto out;
 1508         }
 1509         brick_alloc = _gf_true;
 1510 
 1511         ret = glusterd_new_brick_validate(brick, brickinfo, msg, sizeof(msg),
 1512                                           NULL);
 1513         if (ret) {
 1514             *op_errstr = gf_strdup(msg);
 1515             ret = -1;
 1516             goto out;
 1517         }
 1518 
 1519         if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
 1520             ret = glusterd_validate_and_create_brickpath(
 1521                 brickinfo, volinfo->volume_id, volinfo->volname, op_errstr,
 1522                 is_force, _gf_false);
 1523             if (ret)
 1524                 goto out;
 1525 
 1526             /* A bricks mount dir is required only by snapshots which were
 1527              * introduced in gluster-3.6.0
 1528              */
 1529             if (conf->op_version >= GD_OP_VERSION_3_6_0) {
 1530                 ret = glusterd_get_brick_mount_dir(
 1531                     brickinfo->path, brickinfo->hostname, brickinfo->mount_dir);
 1532                 if (ret) {
 1533                     gf_msg(this->name, GF_LOG_ERROR, 0,
 1534                            GD_MSG_BRICK_MOUNTDIR_GET_FAIL,
 1535                            "Failed to get brick mount_dir");
 1536                     goto out;
 1537                 }
 1538 
 1539                 snprintf(key, sizeof(key), "brick%d.mount_dir", i + 1);
 1540                 ret = dict_set_dynstr_with_alloc(rsp_dict, key,
 1541                                                  brickinfo->mount_dir);
 1542                 if (ret) {
 1543                     gf_msg(this->name, GF_LOG_ERROR, errno,
 1544                            GD_MSG_DICT_SET_FAILED, "Failed to set %s", key);
 1545                     goto out;
 1546                 }
 1547             }
 1548 
 1549             local_brick_count = i + 1;
 1550         }
 1551 
 1552         glusterd_brickinfo_delete(brickinfo);
 1553         brick_alloc = _gf_false;
 1554         brickinfo = NULL;
 1555         brick = strtok_r(NULL, " \n", &saveptr);
 1556         i++;
 1557     }
 1558 
 1559     ret = dict_set_int32n(rsp_dict, "brick_count", SLEN("brick_count"),
 1560                           local_brick_count);
 1561     if (ret) {
 1562         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
 1563                "Failed to set local_brick_count");
 1564         goto out;
 1565     }
 1566 
 1567 out:
 1568     GF_FREE(free_ptr);
 1569     if (brick_alloc && brickinfo)
 1570         glusterd_brickinfo_delete(brickinfo);
 1571     GF_FREE(str_ret);
 1572     GF_FREE(all_bricks);
 1573 
 1574     gf_msg_debug(THIS->name, 0, "Returning %d", ret);
 1575 
 1576     return ret;
 1577 }
 1578 
 1579 int
 1580 glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
 1581                                       dict_t *dict, glusterd_volinfo_t *volinfo,
 1582                                       char **errstr,
 1583                                       gf_cli_defrag_type cmd_defrag)
 1584 {
 1585     char *brick = NULL;
 1586     char msg[2048] = "";
 1587     char key[64] = "";
 1588     int keylen;
 1589     glusterd_brickinfo_t *brickinfo = NULL;
 1590     glusterd_peerinfo_t *peerinfo = NULL;
 1591     int i = 0;
 1592     int ret = -1;
 1593     char pidfile[PATH_MAX + 1] = {
 1594         0,
 1595     };
 1596     glusterd_conf_t *priv = THIS->private;
 1597     int pid = -1;
 1598 
 1599     /* Check whether all the nodes of the bricks to be removed are
 1600      * up, if not fail the operation */
 1601     for (i = 1; i <= brick_count; i++) {
 1602         keylen = snprintf(key, sizeof(key), "brick%d", i);
 1603         ret = dict_get_strn(dict, key, keylen, &brick);
 1604         if (ret) {
 1605             snprintf(msg, sizeof(msg), "Unable to get %s", key);
 1606             *errstr = gf_strdup(msg);
 1607             goto out;
 1608         }
 1609 
 1610         ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
 1611                                                      _gf_false);
 1612         if (ret) {
 1613             snprintf(msg, sizeof(msg),
 1614                      "Incorrect brick "
 1615                      "%s for volume %s",
 1616                      brick, volinfo->volname);
 1617             *errstr = gf_strdup(msg);
 1618             goto out;
 1619         }
 1620         /* Do not allow commit if the bricks are not decommissioned
 1621          * if its a remove brick commit
 1622          */
 1623         if (!brickinfo->decommissioned && cmd == GF_OP_CMD_COMMIT) {
 1624             snprintf(msg, sizeof(msg),
 1625                      "Brick %s "
 1626                      "is not decommissioned. "
 1627                      "Use start or force option",
 1628                      brick);
 1629             *errstr = gf_strdup(msg);
 1630             ret = -1;
 1631             goto out;
 1632         }
 1633 
 1634         if (glusterd_is_local_brick(THIS, volinfo, brickinfo)) {
 1635             switch (cmd) {
 1636                 case GF_OP_CMD_START:
 1637                     goto check;
 1638                 case GF_OP_CMD_NONE:
 1639                 default:
 1640                     break;
 1641             }
 1642 
 1643             switch (cmd_defrag) {
 1644                 case GF_DEFRAG_CMD_NONE:
 1645                 default:
 1646                     continue;
 1647             }
 1648         check:
 1649             if (brickinfo->status != GF_BRICK_STARTED) {
 1650                 snprintf(msg, sizeof(msg),
 1651                          "Found stopped "
 1652                          "brick %s. Use force option to "
 1653                          "remove the offline brick",
 1654                          brick);
 1655                 *errstr = gf_strdup(msg);
 1656                 ret = -1;
 1657                 goto out;
 1658             }
 1659             GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo, priv);
 1660             if (!gf_is_service_running(pidfile, &pid)) {
 1661                 snprintf(msg, sizeof(msg),
 1662                          "Found dead "
 1663                          "brick %s",
 1664                          brick);
 1665                 *errstr = gf_strdup(msg);
 1666                 ret = -1;
 1667                 goto out;
 1668             } else {
 1669                 ret = 0;
 1670             }
 1671             continue;
 1672         }
 1673 
 1674         RCU_READ_LOCK;
 1675         peerinfo = glusterd_peerinfo_find_by_uuid(brickinfo->uuid);
 1676         if (!peerinfo) {
 1677             RCU_READ_UNLOCK;
 1678             snprintf(msg, sizeof(msg),
 1679                      "Host node of the "
 1680                      "brick %s is not in cluster",
 1681                      brick);
 1682             *errstr = gf_strdup(msg);
 1683             ret = -1;
 1684             goto out;
 1685         }
 1686         if (!peerinfo->connected) {
 1687             RCU_READ_UNLOCK;
 1688             snprintf(msg, sizeof(msg),
 1689                      "Host node of the "
 1690                      "brick %s is down",
 1691                      brick);
 1692             *errstr = gf_strdup(msg);
 1693             ret = -1;
 1694             goto out;
 1695         }
 1696         RCU_READ_UNLOCK;
 1697     }
 1698 
 1699 out:
 1700     return ret;
 1701 }
 1702 
 1703 int
 1704 glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
 1705 {
 1706     int ret = -1;
 1707     char *volname = NULL;
 1708     glusterd_volinfo_t *volinfo = NULL;
 1709     char *errstr = NULL;
 1710     int32_t brick_count = 0;
 1711     char msg[2048] = "";
 1712     int32_t flag = 0;
 1713     gf1_op_commands cmd = GF_OP_CMD_NONE;
 1714     char *task_id_str = NULL;
 1715     xlator_t *this = NULL;
 1716     gsync_status_param_t param = {
 1717         0,
 1718     };
 1719 
 1720     this = THIS;
 1721     GF_ASSERT(this);
 1722 
 1723     ret = op_version_check(this, GD_OP_VER_PERSISTENT_AFR_XATTRS, msg,
 1724                            sizeof(msg));
 1725     if (ret) {
 1726         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH, "%s",
 1727                msg);
 1728         *op_errstr = gf_strdup(msg);
 1729         goto out;
 1730     }
 1731 
 1732     ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
 1733     if (ret) {
 1734         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
 1735                "Unable to get volume name");
 1736         goto out;
 1737     }
 1738 
 1739     ret = glusterd_volinfo_find(volname, &volinfo);
 1740 
 1741     if (ret) {
 1742         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
 1743                "Volume %s does not exist", volname);
 1744         goto out;
 1745     }
 1746 
 1747     ret = glusterd_validate_volume_id(dict, volinfo);
 1748     if (ret)
 1749         goto out;
 1750 
 1751     ret = dict_get_int32n(dict, "command", SLEN("command"), &flag);
 1752     if (ret) {
 1753         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 1754                "Unable to get brick command");
 1755         goto out;
 1756     }
 1757     cmd = flag;
 1758 
 1759     ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
 1760     if (ret) {
 1761         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 1762                "Unable to get brick count");
 1763         goto out;
 1764     }
 1765 
 1766     ret = 0;
 1767     if (volinfo->brick_count == brick_count) {
 1768         errstr = gf_strdup(
 1769             "Deleting all the bricks of the "
 1770             "volume is not allowed");
 1771         ret = -1;
 1772         goto out;
 1773     }
 1774 
 1775     ret = -1;
 1776     switch (cmd) {
 1777         case GF_OP_CMD_NONE:
 1778             errstr = gf_strdup("no remove-brick command issued");
 1779             goto out;
 1780 
 1781         case GF_OP_CMD_STATUS:
 1782             ret = 0;
 1783             goto out;
 1784         case GF_OP_CMD_START: {
 1785             if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
 1786                 dict_getn(dict, "replica-count", SLEN("replica-count"))) {
 1787                 snprintf(msg, sizeof(msg),
 1788                          "Migration of data is not "
 1789                          "needed when reducing replica count. Use the"
 1790                          " 'force' option");
 1791                 errstr = gf_strdup(msg);
 1792                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_USE_THE_FORCE, "%s",
 1793                        errstr);
 1794                 goto out;
 1795             }
 1796 
 1797             if (GLUSTERD_STATUS_STARTED != volinfo->status) {
 1798                 snprintf(msg, sizeof(msg),
 1799                          "Volume %s needs "
 1800                          "to be started before remove-brick "
 1801                          "(you can use 'force' or 'commit' "
 1802                          "to override this behavior)",
 1803                          volinfo->volname);
 1804                 errstr = gf_strdup(msg);
 1805                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED,
 1806                        "%s", errstr);
 1807                 goto out;
 1808             }
 1809             if (!gd_is_remove_brick_committed(volinfo)) {
 1810                 snprintf(msg, sizeof(msg),
 1811                          "An earlier remove-brick "
 1812                          "task exists for volume %s. Either commit it"
 1813                          " or stop it before starting a new task.",
 1814                          volinfo->volname);
 1815                 errstr = gf_strdup(msg);
 1816                 gf_msg(this->name, GF_LOG_ERROR, 0,
 1817                        GD_MSG_OLD_REMOVE_BRICK_EXISTS,
 1818                        "Earlier remove-brick"
 1819                        " task exists for volume %s.",
 1820                        volinfo->volname);
 1821                 goto out;
 1822             }
 1823             if (glusterd_is_defrag_on(volinfo)) {
 1824                 errstr = gf_strdup(
 1825                     "Rebalance is in progress. Please "
 1826                     "retry after completion");
 1827                 gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER,
 1828                        "%s", errstr);
 1829                 goto out;
 1830             }
 1831 
 1832             /* Check if the connected clients are all of version
 1833              * glusterfs-3.6 and higher. This is needed to prevent some data
 1834              * loss issues that could occur when older clients are connected
 1835              * when rebalance is run.
 1836              */
 1837             ret = glusterd_check_client_op_version_support(
 1838                 volname, GD_OP_VERSION_3_6_0, NULL);
 1839             if (ret) {
 1840                 ret = gf_asprintf(op_errstr,
 1841                                   "Volume %s has one or "
 1842                                   "more connected clients of a version"
 1843                                   " lower than GlusterFS-v3.6.0. "
 1844                                   "Starting remove-brick in this state "
 1845                                   "could lead to data loss.\nPlease "
 1846                                   "disconnect those clients before "
 1847                                   "attempting this command again.",
 1848                                   volname);
 1849                 goto out;
 1850             }
 1851 
 1852             if (volinfo->snap_count > 0 ||
 1853                 !cds_list_empty(&volinfo->snap_volumes)) {
 1854                 snprintf(msg, sizeof(msg),
 1855                          "Volume %s  has %" PRIu64
 1856                          " snapshots. "
 1857                          "Changing the volume configuration will not effect "
 1858                          "snapshots."
 1859                          "But the snapshot brick mount should be intact to "
 1860                          "make them function.",
 1861                          volname, volinfo->snap_count);
 1862                 gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s",
 1863                        msg);
 1864                 msg[0] = '\0';
 1865             }
 1866 
 1867             ret = glusterd_remove_brick_validate_bricks(
 1868                 cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
 1869             if (ret)
 1870                 goto out;
 1871 
 1872             if (is_origin_glusterd(dict)) {
 1873                 ret = glusterd_generate_and_set_task_id(
 1874                     dict, GF_REMOVE_BRICK_TID_KEY,
 1875                     SLEN(GF_REMOVE_BRICK_TID_KEY));
 1876                 if (ret) {
 1877                     gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TASKID_GEN_FAIL,
 1878                            "Failed to generate task-id");
 1879                     goto out;
 1880                 }
 1881             } else {
 1882                 ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
 1883                                     SLEN(GF_REMOVE_BRICK_TID_KEY),
 1884                                     &task_id_str);
 1885                 if (ret) {
 1886                     gf_msg(this->name, GF_LOG_WARNING, errno,
 1887                            GD_MSG_DICT_GET_FAILED, "Missing remove-brick-id");
 1888                     ret = 0;
 1889                 }
 1890             }
 1891             break;
 1892         }
 1893 
 1894         case GF_OP_CMD_STOP:
 1895             ret = 0;
 1896             break;
 1897 
 1898         case GF_OP_CMD_COMMIT:
 1899             if (volinfo->decommission_in_progress) {
 1900                 errstr = gf_strdup(
 1901                     "use 'force' option as migration "
 1902                     "is in progress");
 1903                 goto out;
 1904             }
 1905 
 1906             if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) {
 1907                 errstr = gf_strdup(
 1908                     "use 'force' option as migration "
 1909                     "has failed");
 1910                 goto out;
 1911             }
 1912 
 1913             if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_COMPLETE) {
 1914                 if (volinfo->rebal.rebalance_failures > 0 ||
 1915                     volinfo->rebal.skipped_files > 0) {
 1916                     errstr = gf_strdup(
 1917                         "use 'force' option as migration "
 1918                         "of some files might have been skipped or "
 1919                         "has failed");
 1920                     goto out;
 1921                 }
 1922             }
 1923 
 1924             ret = glusterd_remove_brick_validate_bricks(
 1925                 cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
 1926             if (ret)
 1927                 goto out;
 1928 
 1929             /* If geo-rep is configured, for this volume, it should be
 1930              * stopped.
 1931              */
 1932             param.volinfo = volinfo;
 1933             ret = glusterd_check_geo_rep_running(&param, op_errstr);
 1934             if (ret || param.is_active) {
 1935                 ret = -1;
 1936                 goto out;
 1937             }
 1938 
 1939             break;
 1940 
 1941         case GF_OP_CMD_COMMIT_FORCE:
 1942         case GF_OP_CMD_DETACH_START:
 1943         case GF_OP_CMD_DETACH_COMMIT:
 1944         case GF_OP_CMD_DETACH_COMMIT_FORCE:
 1945         case GF_OP_CMD_STOP_DETACH_TIER:
 1946             break;
 1947     }
 1948     ret = 0;
 1949 
 1950 out:
 1951     gf_msg_debug(this->name, 0, "Returning %d", ret);
 1952     if (ret && errstr) {
 1953         if (op_errstr)
 1954             *op_errstr = errstr;
 1955     }
 1956     if (!op_errstr && errstr)
 1957         GF_FREE(errstr);
 1958     return ret;
 1959 }
 1960 
 1961 int
 1962 glusterd_remove_brick_migrate_cbk(glusterd_volinfo_t *volinfo,
 1963                                   gf_defrag_status_t status)
 1964 {
 1965     int ret = 0;
 1966 
 1967 #if 0 /* TODO: enable this behavior once cluster-wide awareness comes for      \
 1968          defrag cbk function */
 1969         glusterd_brickinfo_t *brickinfo = NULL;
 1970         glusterd_brickinfo_t *tmp = NULL;
 1971 
 1972         switch (status) {
 1973         case GF_DEFRAG_STATUS_PAUSED:
 1974         case GF_DEFRAG_STATUS_FAILED:
 1975                 /* No changes required in the volume file.
 1976                    everything should remain as is */
 1977                 break;
 1978         case GF_DEFRAG_STATUS_STOPPED:
 1979                 /* Fall back to the old volume file */
 1980                 cds_list_for_each_entry_safe (brickinfo, tmp, &volinfo->bricks,
 1981                                               brick_list) {
 1982                         if (!brickinfo->decommissioned)
 1983                                 continue;
 1984                         brickinfo->decommissioned = 0;
 1985                 }
 1986                 break;
 1987 
 1988         case GF_DEFRAG_STATUS_COMPLETE:
 1989                 /* Done with the task, you can remove the brick from the
 1990                    volume file */
 1991                 cds_list_for_each_entry_safe (brickinfo, tmp, &volinfo->bricks,
 1992                                               brick_list) {
 1993                         if (!brickinfo->decommissioned)
 1994                                 continue;
 1995                         gf_log (THIS->name, GF_LOG_INFO, "removing the brick %s",
 1996                                 brickinfo->path);
 1997                         brickinfo->decommissioned = 0;
 1998                         if (GLUSTERD_STATUS_STARTED == volinfo->status) {
 1999                             /*TODO: use the 'atomic' flavour of brick_stop*/
 2000                                 ret = glusterd_brick_stop (volinfo, brickinfo);
 2001                                 if (ret) {
 2002                                         gf_log (THIS->name, GF_LOG_ERROR,
 2003                                                 "Unable to stop glusterfs (%d)", ret);
 2004                                 }
 2005                         }
 2006                         glusterd_delete_brick (volinfo, brickinfo);
 2007                 }
 2008                 break;
 2009 
 2010         default:
 2011                 GF_ASSERT (!"cbk function called with wrong status");
 2012                 break;
 2013         }
 2014 
 2015         ret = glusterd_create_volfiles_and_notify_services (volinfo);
 2016         if (ret)
 2017                 gf_log (THIS->name, GF_LOG_ERROR,
 2018                         "Unable to write volume files (%d)", ret);
 2019 
 2020         ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
 2021         if (ret)
 2022                 gf_log (THIS->name, GF_LOG_ERROR,
 2023                         "Unable to store volume info (%d)", ret);
 2024 
 2025 
 2026         if (GLUSTERD_STATUS_STARTED == volinfo->status) {
 2027                 ret = glusterd_check_generate_start_nfs ();
 2028                 if (ret)
 2029                         gf_log (THIS->name, GF_LOG_ERROR,
 2030                                 "Unable to start nfs process (%d)", ret);
 2031         }
 2032 
 2033 #endif
 2034 
 2035     volinfo->decommission_in_progress = 0;
 2036     return ret;
 2037 }
 2038 
 2039 int
 2040 glusterd_op_add_brick(dict_t *dict, char **op_errstr)
 2041 {
 2042     int ret = 0;
 2043     char *volname = NULL;
 2044     glusterd_conf_t *priv = NULL;
 2045     glusterd_volinfo_t *volinfo = NULL;
 2046     xlator_t *this = NULL;
 2047     char *bricks = NULL;
 2048     int32_t count = 0;
 2049 
 2050     this = THIS;
 2051     GF_ASSERT(this);
 2052 
 2053     priv = this->private;
 2054     GF_ASSERT(priv);
 2055 
 2056     ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
 2057 
 2058     if (ret) {
 2059         gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 2060                "Unable to get volume name");
 2061         goto out;
 2062     }
 2063 
 2064     ret = glusterd_volinfo_find(volname, &volinfo);
 2065 
 2066     if (ret) {
 2067         gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
 2068                "Unable to allocate memory");
 2069         goto out;
 2070     }
 2071 
 2072     ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
 2073     if (ret) {
 2074         gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 2075                "Unable to get count");
 2076         goto out;
 2077     }
 2078 
 2079     ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
 2080     if (ret) {
 2081         gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 2082                "Unable to get bricks");
 2083         goto out;
 2084     }
 2085 
 2086     ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict);
 2087     if (ret) {
 2088         gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
 2089                "Unable to add bricks");
 2090         goto out;
 2091     }
 2092     if (priv->op_version <= GD_OP_VERSION_3_7_5) {
 2093         ret = glusterd_store_volinfo(volinfo,
 2094                                      GLUSTERD_VOLINFO_VER_AC_INCREMENT);
 2095         if (ret)
 2096             goto out;
 2097     } else {
 2098         /*
 2099          * The cluster is operating at version greater than
 2100          * gluster-3.7.5. So no need to store volfiles
 2101          * in commit phase, the same will be done
 2102          * in post validate phase with v3 framework.
 2103          */
 2104     }
 2105 
 2106     if (GLUSTERD_STATUS_STARTED == volinfo->status)
 2107         ret = glusterd_svcs_manager(volinfo);
 2108 
 2109 out:
 2110     return ret;
 2111 }
 2112 
 2113 int
 2114 glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
 2115 {
 2116     int ret = -1;
 2117     char *volname = NULL;
 2118     glusterd_volinfo_t *volinfo = NULL;
 2119     char *brick = NULL;
 2120     int32_t count = 0;
 2121     int32_t i = 1;
 2122     char key[64] = "";
 2123     int keylen;
 2124     int32_t flag = 0;
 2125     int need_rebalance = 0;
 2126     int force = 0;
 2127     gf1_op_commands cmd = 0;
 2128     int32_t replica_count = 0;
 2129     char *task_id_str = NULL;
 2130     xlator_t *this = NULL;
 2131     dict_t *bricks_dict = NULL;
 2132     char *brick_tmpstr = NULL;
 2133     int start_remove = 0;
 2134     uint32_t commit_hash = 0;
 2135     int defrag_cmd = 0;
 2136     glusterd_conf_t *conf = NULL;
 2137 
 2138     this = THIS;
 2139     GF_ASSERT(this);
 2140     conf = this->private;
 2141     GF_VALIDATE_OR_GOTO(this->name, conf, out);
 2142 
 2143     ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
 2144 
 2145     if (ret) {
 2146         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
 2147                "Unable to get volume name");
 2148         goto out;
 2149     }
 2150 
 2151     ret = glusterd_volinfo_find(volname, &volinfo);
 2152     if (ret) {
 2153         gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
 2154                "Unable to allocate memory");
 2155         goto out;
 2156     }
 2157 
 2158     ret = dict_get_int32n(dict, "command", SLEN("command"), &flag);
 2159     if (ret) {
 2160         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 2161                "Unable to get command");
 2162         goto out;
 2163     }
 2164     cmd = flag;
 2165 
 2166     if (GF_OP_CMD_START == cmd)
 2167         start_remove = 1;
 2168 
 2169     /* Set task-id, if available, in ctx dict for operations other than
 2170      * start
 2171      */
 2172 
 2173     if (is_origin_glusterd(dict) && (!start_remove)) {
 2174         if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
 2175             ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id, dict,
 2176                                              GF_REMOVE_BRICK_TID_KEY,
 2177                                              SLEN(GF_REMOVE_BRICK_TID_KEY));
 2178             if (ret) {
 2179                 gf_msg(this->name, GF_LOG_ERROR, 0,
 2180                        GD_MSG_REMOVE_BRICK_ID_SET_FAIL,
 2181                        "Failed to set remove-brick-id");
 2182                 goto out;
 2183             }
 2184         }
 2185     }
 2186 
 2187     /* Clear task-id, rebal.op and stored bricks on commmitting/stopping
 2188      * remove-brick */
 2189     if ((!start_remove) && (cmd != GF_OP_CMD_STATUS)) {
 2190         gf_uuid_clear(volinfo->rebal.rebalance_id);
 2191         volinfo->rebal.op = GD_OP_NONE;
 2192         dict_unref(volinfo->rebal.dict);
 2193         volinfo->rebal.dict = NULL;
 2194     }
 2195 
 2196     ret = -1;
 2197     switch (cmd) {
 2198         case GF_OP_CMD_NONE:
 2199             goto out;
 2200 
 2201         case GF_OP_CMD_STATUS:
 2202             ret = 0;
 2203             goto out;
 2204 
 2205         case GF_OP_CMD_STOP:
 2206         case GF_OP_CMD_START:
 2207             /* Reset defrag status to 'NOT STARTED' whenever a
 2208              * remove-brick/rebalance command is issued to remove
 2209              * stale information from previous run.
 2210              * Update defrag_cmd as well or it will only be done
 2211              * for nodes on which the brick to be removed exists.
 2212              */
 2213             /* coverity[MIXED_ENUMS] */
 2214             volinfo->rebal.defrag_cmd = cmd;
 2215             volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
 2216             ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
 2217                                 SLEN(GF_REMOVE_BRICK_TID_KEY), &task_id_str);
 2218             if (ret) {
 2219                 gf_msg_debug(this->name, errno, "Missing remove-brick-id");
 2220                 ret = 0;
 2221             } else {
 2222                 gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
 2223                 volinfo->rebal.op = GD_OP_REMOVE_BRICK;
 2224             }
 2225             force = 0;
 2226             break;
 2227 
 2228         case GF_OP_CMD_COMMIT:
 2229             force = 1;
 2230             break;
 2231 
 2232         case GF_OP_CMD_COMMIT_FORCE:
 2233 
 2234             if (volinfo->decommission_in_progress) {
 2235                 if (volinfo->rebal.defrag) {
 2236                     LOCK(&volinfo->rebal.defrag->lock);
 2237                     /* Fake 'rebalance-complete' so the graph change
 2238                        happens right away */
 2239                     volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_COMPLETE;
 2240 
 2241                     UNLOCK(&volinfo->rebal.defrag->lock);
 2242                 }
 2243                 /* Graph change happens in rebalance _cbk function,
 2244                    no need to do anything here */
 2245                 /* TODO: '_cbk' function is not doing anything for now */
 2246             }
 2247 
 2248             ret = 0;
 2249             force = 1;
 2250             break;
 2251         case GF_OP_CMD_DETACH_START:
 2252         case GF_OP_CMD_DETACH_COMMIT_FORCE:
 2253         case GF_OP_CMD_DETACH_COMMIT:
 2254         case GF_OP_CMD_STOP_DETACH_TIER:
 2255             break;
 2256     }
 2257 
 2258     ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
 2259     if (ret) {
 2260         gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 2261                "Unable to get count");
 2262         goto out;
 2263     }
 2264     /* Save the list of bricks for later usage only on starting a
 2265      * remove-brick. Right now this is required for displaying the task
 2266      * parameters with task status in volume status.
 2267      */
 2268 
 2269     if (start_remove) {
 2270         bricks_dict = dict_new();
 2271         if (!bricks_dict) {
 2272             ret = -1;
 2273             goto out;
 2274         }
 2275         ret = dict_set_int32n(bricks_dict, "count", SLEN("count"), count);
 2276         if (ret) {
 2277             gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
 2278                    "Failed to save remove-brick count");
 2279             goto out;
 2280         }
 2281     }
 2282 
 2283     while (i <= count) {
 2284         keylen = snprintf(key, sizeof(key), "brick%d", i);
 2285         ret = dict_get_strn(dict, key, keylen, &brick);
 2286         if (ret) {
 2287             gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 2288                    "Unable to get %s", key);
 2289             goto out;
 2290         }
 2291 
 2292         if (start_remove) {
 2293             brick_tmpstr = gf_strdup(brick);
 2294             if (!brick_tmpstr) {
 2295                 ret = -1;
 2296                 gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
 2297                        "Failed to duplicate brick name");
 2298                 goto out;
 2299             }
 2300             ret = dict_set_dynstrn(bricks_dict, key, keylen, brick_tmpstr);
 2301             if (ret) {
 2302                 gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
 2303                        "Failed to add brick to dict");
 2304                 goto out;
 2305             }
 2306             brick_tmpstr = NULL;
 2307         }
 2308 
 2309         ret = glusterd_op_perform_remove_brick(volinfo, brick, force,
 2310                                                &need_rebalance);
 2311         if (ret)
 2312             goto out;
 2313         i++;
 2314     }
 2315 
 2316     if (start_remove)
 2317         volinfo->rebal.dict = dict_ref(bricks_dict);
 2318 
 2319     ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
 2320                           &replica_count);
 2321     if (!ret) {
 2322         gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
 2323                "changing replica count %d to %d on volume %s",
 2324                volinfo->replica_count, replica_count, volinfo->volname);
 2325         volinfo->replica_count = replica_count;
 2326         /* A reduction in replica count implies an arbiter volume
 2327          * earlier is now no longer one. */
 2328         if (volinfo->arbiter_count)
 2329             volinfo->arbiter_count = 0;
 2330         volinfo->sub_count = replica_count;
 2331         volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo);
 2332 
 2333         /*
 2334          * volinfo->type and sub_count have already been set for
 2335          * volumes undergoing a detach operation, they should not
 2336          * be modified here.
 2337          */
 2338         if (replica_count == 1) {
 2339             if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
 2340                 volinfo->type = GF_CLUSTER_TYPE_NONE;
 2341                 /* backward compatibility */
 2342                 volinfo->sub_count = 0;
 2343             }
 2344         }
 2345     }
 2346     volinfo->subvol_count = (volinfo->brick_count / volinfo->dist_leaf_count);
 2347 
 2348     if (!glusterd_is_volume_replicate(volinfo) &&
 2349         conf->op_version >= GD_OP_VERSION_3_12_2) {
 2350         ret = dict_set_nstrn(volinfo->dict, "performance.client-io-threads",
 2351                              SLEN("performance.client-io-threads"), "on",
 2352                              SLEN("on"));
 2353         if (ret) {
 2354             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
 2355                    "Failed to set "
 2356                    "performance.client-io-threads to on");
 2357             goto out;
 2358         }
 2359     }
 2360 
 2361     ret = glusterd_create_volfiles_and_notify_services(volinfo);
 2362     if (ret) {
 2363         gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL,
 2364                "failed to create volfiles");
 2365         goto out;
 2366     }
 2367 
 2368     ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
 2369     if (ret) {
 2370         gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_STORE_FAIL,
 2371                "failed to store volinfo");
 2372         goto out;
 2373     }
 2374 
 2375     if (start_remove && volinfo->status == GLUSTERD_STATUS_STARTED) {
 2376         ret = glusterd_svcs_reconfigure(volinfo);
 2377         if (ret) {
 2378             gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
 2379                    "Unable to reconfigure NFS-Server");
 2380             goto out;
 2381         }
 2382     }
 2383 
 2384     /* Need to reset the defrag/rebalance status accordingly */
 2385     switch (volinfo->rebal.defrag_status) {
 2386         case GF_DEFRAG_STATUS_FAILED:
 2387         case GF_DEFRAG_STATUS_COMPLETE:
 2388             volinfo->rebal.defrag_status = 0;
 2389         /* FALLTHROUGH */
 2390         default:
 2391             break;
 2392     }
 2393     if (!force && need_rebalance) {
 2394         if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
 2395             volinfo->rebal.commit_hash = commit_hash;
 2396         }
 2397         /* perform the rebalance operations */
 2398         defrag_cmd = GF_DEFRAG_CMD_START_FORCE;
 2399         /*
 2400          * We need to set this *before* we issue commands to the
 2401          * bricks, or else we might end up setting it after the bricks
 2402          * have responded.  If we fail to send the request(s) we'll
 2403          * clear it ourselves because nobody else will.
 2404          */
 2405         volinfo->decommission_in_progress = 1;
 2406         char err_str[4096] = "";
 2407         ret = glusterd_handle_defrag_start(
 2408             volinfo, err_str, sizeof(err_str), defrag_cmd,
 2409             glusterd_remove_brick_migrate_cbk, GD_OP_REMOVE_BRICK);
 2410 
 2411         if (ret) {
 2412             gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REBALANCE_START_FAIL,
 2413                    "failed to start the rebalance");
 2414             /* TBD: shouldn't we do more than print a message? */
 2415             volinfo->decommission_in_progress = 0;
 2416             if (op_errstr)
 2417                 *op_errstr = gf_strdup(err_str);
 2418         }
 2419     } else {
 2420         if (GLUSTERD_STATUS_STARTED == volinfo->status)
 2421             ret = glusterd_svcs_manager(volinfo);
 2422     }
 2423 out:
 2424     GF_FREE(brick_tmpstr);
 2425     if (bricks_dict)
 2426         dict_unref(bricks_dict);
 2427 
 2428     return ret;
 2429 }
 2430 
 2431 int
 2432 glusterd_op_stage_barrier(dict_t *dict, char **op_errstr)
 2433 {
 2434     int ret = -1;
 2435     xlator_t *this = NULL;
 2436     char *volname = NULL;
 2437     glusterd_volinfo_t *vol = NULL;
 2438     char *barrier_op = NULL;
 2439 
 2440     GF_ASSERT(dict);
 2441     this = THIS;
 2442     GF_ASSERT(this);
 2443 
 2444     ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
 2445     if (ret) {
 2446         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 2447                "Volname not present in "
 2448                "dict");
 2449         goto out;
 2450     }
 2451 
 2452     ret = glusterd_volinfo_find(volname, &vol);
 2453     if (ret) {
 2454         gf_asprintf(op_errstr, "Volume %s does not exist", volname);
 2455         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
 2456                *op_errstr);
 2457         goto out;
 2458     }
 2459 
 2460     if (!glusterd_is_volume_started(vol)) {
 2461         gf_asprintf(op_errstr, "Volume %s is not started", volname);
 2462         ret = -1;
 2463         goto out;
 2464     }
 2465 
 2466     ret = dict_get_strn(dict, "barrier", SLEN("barrier"), &barrier_op);
 2467     if (ret == -1) {
 2468         gf_asprintf(op_errstr,
 2469                     "Barrier op for volume %s not present "
 2470                     "in dict",
 2471                     volname);
 2472         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
 2473                *op_errstr);
 2474         goto out;
 2475     }
 2476     ret = 0;
 2477 out:
 2478     gf_msg_debug(this->name, 0, "Returning %d", ret);
 2479     return ret;
 2480 }
 2481 
 2482 int
 2483 glusterd_op_barrier(dict_t *dict, char **op_errstr)
 2484 {
 2485     int ret = -1;
 2486     xlator_t *this = NULL;
 2487     char *volname = NULL;
 2488     glusterd_volinfo_t *vol = NULL;
 2489     char *barrier_op = NULL;
 2490 
 2491     GF_ASSERT(dict);
 2492     this = THIS;
 2493     GF_ASSERT(this);
 2494 
 2495     ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
 2496     if (ret) {
 2497         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
 2498                "Volname not present in "
 2499                "dict");
 2500         goto out;
 2501     }
 2502 
 2503     ret = glusterd_volinfo_find(volname, &vol);
 2504     if (ret) {
 2505         gf_asprintf(op_errstr, "Volume %s does not exist", volname);
 2506         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
 2507                *op_errstr);
 2508         goto out;
 2509     }
 2510 
 2511     ret = dict_get_strn(dict, "barrier", SLEN("barrier"), &barrier_op);
 2512     if (ret) {
 2513         gf_asprintf(op_errstr,
 2514                     "Barrier op for volume %s not present "
 2515                     "in dict",
 2516                     volname);
 2517         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
 2518                *op_errstr);
 2519         goto out;
 2520     }
 2521 
 2522     ret = dict_set_dynstr_with_alloc(vol->dict, "features.barrier", barrier_op);
 2523     if (ret) {
 2524         gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
 2525                "Failed to set barrier op in"
 2526                " volume option dict");
 2527         goto out;
 2528     }
 2529 
 2530     gd_update_volume_op_versions(vol);
 2531     ret = glusterd_create_volfiles(vol);
 2532     if (ret) {
 2533         gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
 2534                "Failed to create volfiles");
 2535         goto out;
 2536     }
 2537     ret = glusterd_store_volinfo(vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
 2538 
 2539 out:
 2540     gf_msg_debug(this->name, 0, "Returning %d", ret);
 2541     return ret;
 2542 }
 2543 
 2544 int
 2545 glusterd_handle_add_tier_brick(rpcsvc_request_t *req)
 2546 {
 2547     return 0;
 2548 }
 2549 
 2550 int
 2551 glusterd_handle_attach_tier(rpcsvc_request_t *req)
 2552 {
 2553     return 0;
 2554 }
 2555 
 2556 int
 2557 glusterd_handle_detach_tier(rpcsvc_request_t *req)
 2558 {
 2559     return 0;
 2560 }