"Fossies" - the Fresh Open Source Software Archive

Member "glusterfs-8.2/xlators/protocol/server/src/server-handshake.c" (16 Sep 2020, 25033 Bytes) of package /linux/misc/glusterfs-8.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "server-handshake.c" see the Fossies "Dox" file reference documentation.

    1 /*
    2   Copyright (c) 2010-2013 Red Hat, Inc. <http://www.redhat.com>
    3   This file is part of GlusterFS.
    4 
    5   This file is licensed to you under your choice of the GNU Lesser
    6   General Public License, version 3 or any later version (LGPLv3 or
    7   later), or the GNU General Public License, version 2 (GPLv2), in all
    8   cases as published by the Free Software Foundation.
    9 */
   10 
   11 #include "server.h"
   12 #include "server-helpers.h"
   13 #include "rpc-common-xdr.h"
   14 #include "glusterfs3-xdr.h"
   15 #include <glusterfs/compat-errno.h>
   16 #include "glusterfs3.h"
   17 #include "authenticate.h"
   18 #include "server-messages.h"
   19 #include <glusterfs/syscall.h>
   20 #include <glusterfs/events.h>
   21 #include <glusterfs/syncop.h>
   22 
   23 struct __get_xl_struct {
   24     const char *name;
   25     xlator_t *reply;
   26 };
   27 int
   28 gf_compare_client_version(rpcsvc_request_t *req, int fop_prognum,
   29                           int mgmt_prognum)
   30 {
   31     int ret = -1;
   32     /* TODO: think.. */
   33     if (glusterfs3_3_fop_prog.prognum == fop_prognum)
   34         ret = 0;
   35 
   36     return ret;
   37 }
   38 
   39 int
   40 server_getspec(rpcsvc_request_t *req)
   41 {
   42     int32_t ret = 0;
   43     int32_t op_errno = ENOENT;
   44     gf_getspec_req args = {
   45         0,
   46     };
   47     gf_getspec_rsp rsp = {
   48         0,
   49     };
   50 
   51     ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_getspec_req);
   52     if (ret < 0) {
   53         // failed to decode msg;
   54         req->rpc_err = GARBAGE_ARGS;
   55         op_errno = EINVAL;
   56         goto fail;
   57     }
   58 
   59     op_errno = ENOSYS;
   60 fail:
   61     rsp.spec = "<this method is not in use, use glusterd for getspec>";
   62     rsp.op_errno = gf_errno_to_error(op_errno);
   63     rsp.op_ret = -1;
   64 
   65     server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
   66                         (xdrproc_t)xdr_gf_getspec_rsp);
   67 
   68     return 0;
   69 }
   70 
   71 static void
   72 server_first_lookup_done(rpcsvc_request_t *req, gf_setvolume_rsp *rsp)
   73 {
   74     server_submit_reply(NULL, req, rsp, NULL, 0, NULL,
   75                         (xdrproc_t)xdr_gf_setvolume_rsp);
   76 
   77     GF_FREE(rsp->dict.dict_val);
   78     GF_FREE(rsp);
   79 }
   80 
   81 static inode_t *
   82 do_path_lookup(xlator_t *xl, dict_t *dict, inode_t *parinode, char *basename)
   83 {
   84     int ret = 0;
   85     loc_t loc = {
   86         0,
   87     };
   88     uuid_t gfid = {
   89         0,
   90     };
   91     struct iatt iatt = {
   92         0,
   93     };
   94     inode_t *inode = NULL;
   95 
   96     loc.parent = parinode;
   97     loc_touchup(&loc, basename);
   98     loc.inode = inode_new(xl->itable);
   99 
  100     gf_uuid_generate(gfid);
  101     ret = dict_set_gfuuid(dict, "gfid-req", gfid, true);
  102     if (ret) {
  103         gf_log(xl->name, GF_LOG_ERROR, "failed to set 'gfid-req' for subdir");
  104         goto out;
  105     }
  106 
  107     ret = syncop_lookup(xl, &loc, &iatt, NULL, dict, NULL);
  108     if (ret < 0) {
  109         gf_log(xl->name, GF_LOG_ERROR, "first lookup on subdir (%s) failed: %s",
  110                basename, strerror(errno));
  111     }
  112 
  113     /* Inode linking is required so that the
  114        resolution happens all fine for future fops */
  115     inode = inode_link(loc.inode, loc.parent, loc.name, &iatt);
  116 
  117     /* Extra ref so the pointer is valid till client is valid */
  118     /* FIXME: not a priority, but this can lead to some inode
  119        leaks if subdir is more than 1 level depth. Leak is only
  120        per subdir entry, and not dependent on number of
  121        connections, so it should be fine for now */
  122     inode_ref(inode);
  123 
  124 out:
  125     return inode;
  126 }
  127 
  128 int
  129 server_first_lookup(xlator_t *this, client_t *client, dict_t *reply)
  130 {
  131     loc_t loc = {
  132         0,
  133     };
  134     struct iatt iatt = {
  135         0,
  136     };
  137     dict_t *dict = NULL;
  138     int ret = 0;
  139     xlator_t *xl = client->bound_xl;
  140     char *msg = NULL;
  141     inode_t *inode = NULL;
  142     char *bname = NULL;
  143     char *str = NULL;
  144     char *tmp = NULL;
  145     char *saveptr = NULL;
  146 
  147     loc.path = "/";
  148     loc.name = "";
  149     loc.inode = xl->itable->root;
  150     loc.parent = NULL;
  151     gf_uuid_copy(loc.gfid, loc.inode->gfid);
  152 
  153     ret = syncop_lookup(xl, &loc, &iatt, NULL, NULL, NULL);
  154     if (ret < 0)
  155         gf_log(xl->name, GF_LOG_ERROR, "lookup on root failed: %s",
  156                strerror(errno));
  157     /* Ignore error from lookup, don't set
  158      * failure in rsp->op_ret. lookup on a snapview-server
  159      * can fail with ESTALE
  160      */
  161     /* TODO-SUBDIR-MOUNT: validate above comment with respect to subdir lookup
  162      */
  163 
  164     if (client->subdir_mount) {
  165         str = tmp = gf_strdup(client->subdir_mount);
  166         dict = dict_new();
  167         inode = xl->itable->root;
  168         bname = strtok_r(str, "/", &saveptr);
  169         while (bname != NULL) {
  170             inode = do_path_lookup(xl, dict, inode, bname);
  171             if (inode == NULL) {
  172                 gf_log(this->name, GF_LOG_ERROR,
  173                        "first lookup on subdir (%s) failed: %s",
  174                        client->subdir_mount, strerror(errno));
  175                 ret = -1;
  176                 goto fail;
  177             }
  178             bname = strtok_r(NULL, "/", &saveptr);
  179         }
  180 
  181         /* Can be used in server_resolve() */
  182         gf_uuid_copy(client->subdir_gfid, inode->gfid);
  183         client->subdir_inode = inode;
  184     }
  185 
  186     ret = 0;
  187     goto out;
  188 
  189 fail:
  190     /* we should say to client, it is not possible
  191        to connect */
  192     ret = gf_asprintf(&msg, "subdirectory for mount \"%s\" is not found",
  193                       client->subdir_mount);
  194     if (-1 == ret) {
  195         gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
  196                "asprintf failed while setting error msg");
  197     }
  198     ret = dict_set_dynstr(reply, "ERROR", msg);
  199     if (ret < 0)
  200         gf_msg_debug(this->name, 0,
  201                      "failed to set error "
  202                      "msg");
  203 
  204     ret = -1;
  205 out:
  206     if (dict)
  207         dict_unref(dict);
  208 
  209     inode_unref(loc.inode);
  210 
  211     if (tmp)
  212         GF_FREE(tmp);
  213 
  214     return ret;
  215 }
  216 
  217 int
  218 server_setvolume(rpcsvc_request_t *req)
  219 {
  220     gf_setvolume_req args = {
  221         {
  222             0,
  223         },
  224     };
  225     gf_setvolume_rsp *rsp = NULL;
  226     client_t *client = NULL;
  227     server_ctx_t *serv_ctx = NULL;
  228     server_conf_t *conf = NULL;
  229     peer_info_t *peerinfo = NULL;
  230     dict_t *reply = NULL;
  231     dict_t *config_params = NULL;
  232     dict_t *params = NULL;
  233     char *name = NULL;
  234     char *volume_id = NULL;
  235     char *client_uid = NULL;
  236     char *clnt_version = NULL;
  237     xlator_t *xl = NULL;
  238     char *msg = NULL;
  239     xlator_t *this = NULL;
  240     int32_t ret = -1;
  241     int32_t op_ret = -1;
  242     int32_t op_errno = EINVAL;
  243     uint32_t opversion = 0;
  244     rpc_transport_t *xprt = NULL;
  245     int32_t fop_version = 0;
  246     int32_t mgmt_version = 0;
  247     glusterfs_ctx_t *ctx = NULL;
  248     struct _child_status *tmp = NULL;
  249     char *subdir_mount = NULL;
  250     char *client_name = NULL;
  251     gf_boolean_t cleanup_starting = _gf_false;
  252     gf_boolean_t xlator_in_graph = _gf_true;
  253 
  254     params = dict_new();
  255     reply = dict_new();
  256     ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_setvolume_req);
  257     if (ret < 0) {
  258         // failed to decode msg;
  259         req->rpc_err = GARBAGE_ARGS;
  260         goto fail;
  261     }
  262     ctx = THIS->ctx;
  263 
  264     this = req->svc->xl;
  265     /* this is to ensure config_params is populated with the first brick
  266      * details at first place if brick multiplexing is enabled
  267      */
  268     config_params = dict_copy_with_ref(this->options, NULL);
  269 
  270     ret = dict_unserialize(args.dict.dict_val, args.dict.dict_len, &params);
  271     if (ret < 0) {
  272         ret = dict_set_sizen_str_sizen(reply, "ERROR",
  273                                        "Internal error: failed to unserialize "
  274                                        "request dictionary");
  275         if (ret < 0)
  276             gf_msg_debug(this->name, 0,
  277                          "failed to set error "
  278                          "msg \"%s\"",
  279                          "Internal error: failed "
  280                          "to unserialize request dictionary");
  281 
  282         op_ret = -1;
  283         op_errno = EINVAL;
  284         goto fail;
  285     }
  286 
  287     ret = dict_get_str(params, "remote-subvolume", &name);
  288     if (ret < 0) {
  289         ret = dict_set_str(reply, "ERROR",
  290                            "No remote-subvolume option specified");
  291         if (ret < 0)
  292             gf_msg_debug(this->name, 0,
  293                          "failed to set error "
  294                          "msg");
  295 
  296         op_ret = -1;
  297         op_errno = EINVAL;
  298         goto fail;
  299     }
  300 
  301     LOCK(&ctx->volfile_lock);
  302     {
  303         xl = get_xlator_by_name(this, name);
  304         if (!xl) {
  305             xlator_in_graph = _gf_false;
  306             xl = this;
  307         }
  308     }
  309     UNLOCK(&ctx->volfile_lock);
  310     if (xl == NULL) {
  311         ret = gf_asprintf(&msg, "remote-subvolume \"%s\" is not found", name);
  312         if (-1 == ret) {
  313             gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
  314                    "asprintf failed while setting error msg");
  315             goto fail;
  316         }
  317         ret = dict_set_dynstr(reply, "ERROR", msg);
  318         if (ret < 0)
  319             gf_msg_debug(this->name, 0,
  320                          "failed to set error "
  321                          "msg");
  322 
  323         op_ret = -1;
  324         op_errno = ENOENT;
  325         goto fail;
  326     }
  327 
  328     config_params = dict_copy_with_ref(xl->options, config_params);
  329     conf = this->private;
  330 
  331     if (conf->parent_up == _gf_false) {
  332         /* PARENT_UP indicates that all xlators in graph are inited
  333          * successfully
  334          */
  335         op_ret = -1;
  336         op_errno = EAGAIN;
  337 
  338         ret = dict_set_str(reply, "ERROR",
  339                            "xlator graph in server is not initialised "
  340                            "yet. Try again later");
  341         if (ret < 0)
  342             gf_msg_debug(this->name, 0,
  343                          "failed to set error: "
  344                          "xlator graph in server is not "
  345                          "initialised yet. Try again later");
  346         goto fail;
  347     }
  348 
  349     pthread_mutex_lock(&conf->mutex);
  350     list_for_each_entry(tmp, &conf->child_status->status_list, status_list)
  351     {
  352         if (strcmp(tmp->name, name) == 0)
  353             break;
  354     }
  355 
  356     if (!tmp->name) {
  357         gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CHILD_STATUS_FAILED,
  358                "No xlator %s is found in child status list", name);
  359     } else {
  360         ret = dict_set_int32(reply, "child_up", tmp->child_up);
  361         if (ret < 0)
  362             gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_DICT_GET_FAILED,
  363                    "Failed to set 'child_up' for xlator %s "
  364                    "in the reply dict",
  365                    tmp->name);
  366         if (!tmp->child_up) {
  367             ret = dict_set_str(reply, "ERROR",
  368                                "Not received child_up for this xlator");
  369             if (ret < 0)
  370                 gf_msg_debug(this->name, 0, "failed to set error msg");
  371 
  372             gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_CHILD_STATUS_FAILED,
  373                    "Not received child_up for this xlator %s", name);
  374             op_ret = -1;
  375             op_errno = EAGAIN;
  376             pthread_mutex_unlock(&conf->mutex);
  377             goto fail;
  378         }
  379     }
  380     pthread_mutex_unlock(&conf->mutex);
  381 
  382     ret = dict_get_str(params, "process-uuid", &client_uid);
  383     if (ret < 0) {
  384         ret = dict_set_str(reply, "ERROR", "UUID not specified");
  385         if (ret < 0)
  386             gf_msg_debug(this->name, 0,
  387                          "failed to set error "
  388                          "msg");
  389 
  390         op_ret = -1;
  391         op_errno = EINVAL;
  392         goto fail;
  393     }
  394 
  395     ret = dict_get_str(params, "subdir-mount", &subdir_mount);
  396     if (ret < 0) {
  397         /* Not a problem at all as the key is optional */
  398     }
  399     ret = dict_get_str(params, "process-name", &client_name);
  400     if (ret < 0) {
  401         client_name = "unknown";
  402     }
  403 
  404     /* If any value is set, the first element will be non-0.
  405        It would be '0', but not '\0' :-) */
  406     if (xl->graph->volume_id[0]) {
  407         ret = dict_get_str_sizen(params, "volume-id", &volume_id);
  408         if (!ret && strcmp(xl->graph->volume_id, volume_id)) {
  409             ret = dict_set_str(reply, "ERROR",
  410                                "Volume-ID different, possible case "
  411                                "of same brick re-used in another volume");
  412             if (ret < 0)
  413                 gf_msg_debug(this->name, 0, "failed to set error msg");
  414 
  415             op_ret = -1;
  416             op_errno = EINVAL;
  417             goto fail;
  418         }
  419         ret = dict_set_str(reply, "volume-id", tmp->volume_id);
  420         if (ret)
  421             gf_msg_debug(this->name, 0, "failed to set 'volume-id'");
  422     }
  423     client = gf_client_get(this, &req->cred, client_uid, subdir_mount);
  424     if (client == NULL) {
  425         op_ret = -1;
  426         op_errno = ENOMEM;
  427         goto fail;
  428     }
  429 
  430     client->client_name = gf_strdup(client_name);
  431 
  432     gf_msg_debug(this->name, 0, "Connected to %s", client->client_uid);
  433 
  434     serv_ctx = server_ctx_get(client, client->this);
  435     if (serv_ctx == NULL) {
  436         gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_SERVER_CTX_GET_FAILED,
  437                "server_ctx_get() "
  438                "failed");
  439         goto fail;
  440     }
  441 
  442     pthread_mutex_lock(&conf->mutex);
  443     if (xl->cleanup_starting) {
  444         cleanup_starting = _gf_true;
  445     } else if (req->trans->xl_private != client) {
  446         req->trans->xl_private = client;
  447     }
  448     pthread_mutex_unlock(&conf->mutex);
  449 
  450     if (cleanup_starting) {
  451         op_ret = -1;
  452         op_errno = EAGAIN;
  453 
  454         ret = dict_set_str(reply, "ERROR",
  455                            "cleanup flag is set for xlator. "
  456                            " Try again later");
  457         if (ret < 0)
  458             gf_msg_debug(this->name, 0,
  459                          "failed to set error: "
  460                          "cleanup flag is set for xlator. "
  461                          "Try again later");
  462         goto fail;
  463     }
  464 
  465     auth_set_username_passwd(params, config_params, client);
  466     if (req->trans->ssl_name) {
  467         if (dict_set_str(params, "ssl-name", req->trans->ssl_name) != 0) {
  468             gf_msg(this->name, GF_LOG_WARNING, 0, PS_MSG_SSL_NAME_SET_FAILED,
  469                    "failed to set "
  470                    "ssl_name %s",
  471                    req->trans->ssl_name);
  472             /* Not fatal, auth will just fail. */
  473         }
  474     }
  475 
  476     ret = dict_get_int32(params, "fops-version", &fop_version);
  477     if (ret < 0) {
  478         ret = dict_set_str(reply, "ERROR", "No FOP version number specified");
  479         if (ret < 0)
  480             gf_msg_debug(this->name, 0,
  481                          "failed to set error "
  482                          "msg");
  483     }
  484 
  485     ret = dict_get_int32(params, "mgmt-version", &mgmt_version);
  486     if (ret < 0) {
  487         ret = dict_set_str(reply, "ERROR", "No MGMT version number specified");
  488         if (ret < 0)
  489             gf_msg_debug(this->name, 0,
  490                          "failed to set error "
  491                          "msg");
  492     }
  493 
  494     ret = gf_compare_client_version(req, fop_version, mgmt_version);
  495     if (ret != 0) {
  496         ret = gf_asprintf(&msg,
  497                           "version mismatch: client(%d)"
  498                           " - client-mgmt(%d)",
  499                           fop_version, mgmt_version);
  500         /* get_supported_version (req)); */
  501         if (-1 == ret) {
  502             gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
  503                    "asprintf failed while"
  504                    "setting up error msg");
  505             goto fail;
  506         }
  507         ret = dict_set_dynstr(reply, "ERROR", msg);
  508         if (ret < 0)
  509             gf_msg_debug(this->name, 0,
  510                          "failed to set error "
  511                          "msg");
  512 
  513         op_ret = -1;
  514         op_errno = EINVAL;
  515         goto fail;
  516     }
  517 
  518     peerinfo = &req->trans->peerinfo;
  519     if (peerinfo) {
  520         ret = dict_set_static_ptr(params, "peer-info", peerinfo);
  521         if (ret < 0)
  522             gf_msg_debug(this->name, 0,
  523                          "failed to set "
  524                          "peer-info");
  525     }
  526 
  527     ret = dict_get_uint32(params, "opversion", &opversion);
  528     if (ret) {
  529         gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_OPVERSION_GET_FAILED,
  530                "Failed to get client opversion");
  531     }
  532     client->opversion = opversion;
  533     /* Assign op-version value to the client */
  534     pthread_mutex_lock(&conf->mutex);
  535     list_for_each_entry(xprt, &conf->xprt_list, list)
  536     {
  537         if (strcmp(peerinfo->identifier, xprt->peerinfo.identifier))
  538             continue;
  539         xprt->peerinfo.max_op_version = opversion;
  540     }
  541     pthread_mutex_unlock(&conf->mutex);
  542 
  543     if (conf->auth_modules == NULL) {
  544         gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_AUTH_INIT_FAILED,
  545                "Authentication module not initialized");
  546     }
  547 
  548     ret = dict_get_str(params, "client-version", &clnt_version);
  549     if (ret)
  550         gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_VERSION_NOT_SET,
  551                "client-version not set, may be of older version");
  552 
  553     ret = gf_authenticate(params, config_params, conf->auth_modules);
  554 
  555     if (ret == AUTH_ACCEPT) {
  556         /* Store options received from client side */
  557         req->trans->clnt_options = dict_ref(params);
  558 
  559         gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_ACCEPTED,
  560                "accepted client from %s (version: %s) with subvol %s",
  561                client->client_uid, (clnt_version) ? clnt_version : "old", name);
  562 
  563         gf_event(EVENT_CLIENT_CONNECT,
  564                  "client_uid=%s;"
  565                  "client_identifier=%s;server_identifier=%s;"
  566                  "brick_path=%s;subdir_mount=%s",
  567                  client->client_uid, req->trans->peerinfo.identifier,
  568                  req->trans->myinfo.identifier, name, subdir_mount);
  569 
  570         op_ret = 0;
  571         client->bound_xl = xl;
  572 
  573         /* Don't be confused by the below line (like how ERROR can
  574            be Success), key checked on client is 'ERROR' and hence
  575            we send 'Success' in this key */
  576         ret = dict_set_str(reply, "ERROR", "Success");
  577         if (ret < 0)
  578             gf_msg_debug(this->name, 0,
  579                          "failed to set error "
  580                          "msg");
  581     } else {
  582         op_ret = -1;
  583         if (!xlator_in_graph) {
  584             gf_msg(this->name, GF_LOG_ERROR, ENOENT, PS_MSG_AUTHENTICATE_ERROR,
  585                    "Cannot authenticate client"
  586                    " from %s %s because brick is not attached in graph",
  587                    client->client_uid, (clnt_version) ? clnt_version : "old");
  588 
  589             op_errno = ENOENT;
  590             ret = dict_set_str(reply, "ERROR", "Brick not found");
  591         } else {
  592             gf_event(EVENT_CLIENT_AUTH_REJECT,
  593                      "client_uid=%s;"
  594                      "client_identifier=%s;server_identifier=%s;"
  595                      "brick_path=%s",
  596                      client->client_uid, req->trans->peerinfo.identifier,
  597                      req->trans->myinfo.identifier, name);
  598             gf_msg(this->name, GF_LOG_ERROR, EACCES, PS_MSG_AUTHENTICATE_ERROR,
  599                    "Cannot authenticate client"
  600                    " from %s %s",
  601                    client->client_uid, (clnt_version) ? clnt_version : "old");
  602 
  603             op_errno = EACCES;
  604             ret = dict_set_str(reply, "ERROR", "Authentication failed");
  605         }
  606         if (ret < 0)
  607             gf_msg_debug(this->name, 0,
  608                          "failed to set error "
  609                          "msg");
  610         goto fail;
  611     }
  612 
  613     if (client->bound_xl == NULL) {
  614         ret = dict_set_str(reply, "ERROR",
  615                            "Check volfile and handshake "
  616                            "options in protocol/client");
  617         if (ret < 0)
  618             gf_msg_debug(this->name, 0,
  619                          "failed to set error "
  620                          "msg");
  621 
  622         op_ret = -1;
  623         op_errno = EACCES;
  624         goto fail;
  625     }
  626 
  627     LOCK(&conf->itable_lock);
  628     {
  629         if (client->bound_xl->itable == NULL) {
  630             /* create inode table for this bound_xl, if one doesn't
  631                already exist */
  632 
  633             gf_msg_trace(this->name, 0,
  634                          "creating inode table with"
  635                          " lru_limit=%" PRId32 ", xlator=%s",
  636                          conf->inode_lru_limit, client->bound_xl->name);
  637 
  638             /* TODO: what is this ? */
  639             client->bound_xl->itable = inode_table_new(conf->inode_lru_limit,
  640                                                        client->bound_xl);
  641         }
  642     }
  643     UNLOCK(&conf->itable_lock);
  644 
  645     ret = dict_set_str(reply, "process-uuid", this->ctx->process_uuid);
  646     if (ret)
  647         gf_msg_debug(this->name, 0, "failed to set 'process-uuid'");
  648 
  649     /* Insert a dummy key value pair to avoid failure at client side for
  650      * clnt-lk-version with older clients.
  651      */
  652     ret = dict_set_uint32(reply, "clnt-lk-version", 0);
  653     if (ret) {
  654         gf_msg(this->name, GF_LOG_WARNING, 0, PS_MSG_CLIENT_LK_VERSION_ERROR,
  655                "failed to set "
  656                "'clnt-lk-version'");
  657     }
  658 
  659     ret = dict_set_uint64(reply, "transport-ptr", ((uint64_t)(long)req->trans));
  660     if (ret)
  661         gf_msg_debug(this->name, 0, "failed to set 'transport-ptr'");
  662 
  663 fail:
  664     /* It is important to validate the lookup on '/' as part of handshake,
  665        because if lookup itself can't succeed, we should communicate this
  666        to client. Very important in case of subdirectory mounts, where if
  667        client is trying to mount a non-existing directory */
  668     if (op_ret >= 0 && client->bound_xl->itable) {
  669         if (client->bound_xl->cleanup_starting) {
  670             op_ret = -1;
  671             op_errno = EAGAIN;
  672             ret = dict_set_str(reply, "ERROR",
  673                                "cleanup flag is set for xlator "
  674                                "before call first_lookup Try again later");
  675             /* quisce coverity about UNUSED_VALUE ret */
  676             (void)(ret);
  677         } else {
  678             op_ret = server_first_lookup(this, client, reply);
  679             if (op_ret == -1)
  680                 op_errno = ENOENT;
  681         }
  682     }
  683 
  684     rsp = GF_CALLOC(1, sizeof(gf_setvolume_rsp), gf_server_mt_setvolume_rsp_t);
  685     GF_ASSERT(rsp);
  686 
  687     rsp->op_ret = 0;
  688 
  689     ret = dict_allocate_and_serialize(reply, (char **)&rsp->dict.dict_val,
  690                                       &rsp->dict.dict_len);
  691     if (ret != 0) {
  692         ret = -1;
  693         gf_msg_debug("server-handshake", 0, "failed to serialize reply dict");
  694         op_ret = -1;
  695         op_errno = -ret;
  696     }
  697 
  698     rsp->op_ret = op_ret;
  699     rsp->op_errno = gf_errno_to_error(op_errno);
  700 
  701     /* if bound_xl is NULL or something fails, then put the connection
  702      * back. Otherwise the connection would have been added to the
  703      * list of connections the server is maintaining and might segfault
  704      * during statedump when bound_xl of the connection is accessed.
  705      */
  706     if (op_ret && !xl && (client != NULL)) {
  707         /* We would have set the xl_private of the transport to the
  708          * @conn. But if we have put the connection i.e shutting down
  709          * the connection, then we should set xl_private to NULL as it
  710          * would be pointing to a freed memory and would segfault when
  711          * accessed upon getting DISCONNECT.
  712          */
  713         gf_client_put(client, NULL);
  714         req->trans->xl_private = NULL;
  715     }
  716 
  717     /* Send the response properly */
  718     server_first_lookup_done(req, rsp);
  719 
  720     free(args.dict.dict_val);
  721 
  722     dict_unref(params);
  723     dict_unref(reply);
  724     if (config_params) {
  725         /*
  726          * This might be null if we couldn't even find the translator
  727          * (brick) to copy it from.
  728          */
  729         dict_unref(config_params);
  730     }
  731 
  732     return 0;
  733 }
  734 
  735 int
  736 server_ping(rpcsvc_request_t *req)
  737 {
  738     gf_common_rsp rsp = {
  739         0,
  740     };
  741 
  742     /* Accepted */
  743     rsp.op_ret = 0;
  744 
  745     server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
  746                         (xdrproc_t)xdr_gf_common_rsp);
  747 
  748     return 0;
  749 }
  750 
  751 int
  752 server_set_lk_version(rpcsvc_request_t *req)
  753 {
  754     int ret = -1;
  755     gf_set_lk_ver_req args = {
  756         0,
  757     };
  758     gf_set_lk_ver_rsp rsp = {
  759         0,
  760     };
  761 
  762     ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_set_lk_ver_req);
  763     if (ret < 0) {
  764         /* failed to decode msg */
  765         req->rpc_err = GARBAGE_ARGS;
  766         goto fail;
  767     }
  768 
  769     rsp.lk_ver = args.lk_ver;
  770 fail:
  771     server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
  772                         (xdrproc_t)xdr_gf_set_lk_ver_rsp);
  773 
  774     free(args.uid);
  775 
  776     return 0;
  777 }
  778 
  779 static rpcsvc_actor_t gluster_handshake_actors[GF_HNDSK_MAXVALUE] = {
  780     [GF_HNDSK_NULL] = {"NULL", server_null, NULL, GF_HNDSK_NULL, DRC_NA, 0},
  781     [GF_HNDSK_SETVOLUME] = {"SETVOLUME", server_setvolume, NULL,
  782                             GF_HNDSK_SETVOLUME, DRC_NA, 0},
  783     [GF_HNDSK_GETSPEC] = {"GETSPEC", server_getspec, NULL, GF_HNDSK_GETSPEC,
  784                           DRC_NA, 0},
  785     [GF_HNDSK_PING] = {"PING", server_ping, NULL, GF_HNDSK_PING, DRC_NA, 0},
  786     [GF_HNDSK_SET_LK_VER] = {"SET_LK_VER", server_set_lk_version, NULL,
  787                              GF_HNDSK_SET_LK_VER, DRC_NA, 0},
  788 };
  789 
  790 struct rpcsvc_program gluster_handshake_prog = {
  791     .progname = "GlusterFS Handshake",
  792     .prognum = GLUSTER_HNDSK_PROGRAM,
  793     .progver = GLUSTER_HNDSK_VERSION,
  794     .actors = gluster_handshake_actors,
  795     .numactors = GF_HNDSK_MAXVALUE,
  796 };