"Fossies" - the Fresh Open Source Software Archive

Member "glusterfs-8.2/xlators/features/snapview-server/src/snapview-server.c" (16 Sep 2020, 86006 Bytes) of package /linux/misc/glusterfs-8.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "snapview-server.c" see the Fossies "Dox" file reference documentation.

    1 /*
    2    Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
    3    This file is part of GlusterFS.
    4 
    5    This file is licensed to you under your choice of the GNU Lesser
    6    General Public License, version 3 or any later version (LGPLv3 or
    7    later), or the GNU General Public License, version 2 (GPLv2), in all
    8    cases as published by the Free Software Foundation.
    9 */
   10 #include "snapview-server.h"
   11 #include "snapview-server-mem-types.h"
   12 #include <glusterfs/compat-errno.h>
   13 
   14 #include <glusterfs/xlator.h>
   15 #include "rpc-clnt.h"
   16 #include "xdr-generic.h"
   17 #include "protocol-common.h"
   18 #include <glusterfs/syscall.h>
   19 #include <pthread.h>
   20 
   21 #include "glfs-internal.h"
   22 
   23 int
   24 gf_setcredentials(uid_t *uid, gid_t *gid, uint16_t ngrps, uint32_t *groups)
   25 {
   26     int ret = 0;
   27 
   28     if (uid) {
   29         ret = glfs_setfsuid(*uid);
   30         if (ret != 0) {
   31             gf_msg("snapview-server", GF_LOG_ERROR, 0, SVS_MSG_SETFSUID_FAIL,
   32                    "failed to set uid "
   33                    "%u in thread context",
   34                    *uid);
   35             return ret;
   36         }
   37     }
   38     if (gid) {
   39         ret = glfs_setfsgid(*gid);
   40         if (ret != 0) {
   41             gf_msg("snapview-server", GF_LOG_ERROR, 0, SVS_MSG_SETFSGID_FAIL,
   42                    "failed to set gid "
   43                    "%u in thread context",
   44                    *gid);
   45             return ret;
   46         }
   47     }
   48 
   49     if (ngrps != 0 && groups) {
   50         ret = glfs_setfsgroups(ngrps, groups);
   51         if (ret != 0) {
   52             gf_msg("snapview-server", GF_LOG_ERROR, 0, SVS_MSG_SETFSGRPS_FAIL,
   53                    "failed to set "
   54                    "groups in thread context");
   55             return ret;
   56         }
   57     }
   58     return 0;
   59 }
   60 
   61 int32_t
   62 svs_lookup_entry_point(xlator_t *this, loc_t *loc, inode_t *parent,
   63                        struct iatt *buf, struct iatt *postparent,
   64                        int32_t *op_errno)
   65 {
   66     uuid_t gfid;
   67     svs_inode_t *inode_ctx = NULL;
   68     int op_ret = -1;
   69 
   70     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
   71     GF_VALIDATE_OR_GOTO(this->name, loc, out);
   72     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
   73     GF_VALIDATE_OR_GOTO(this->name, buf, out);
   74     GF_VALIDATE_OR_GOTO(this->name, postparent, out);
   75 
   76     if (gf_uuid_is_null(loc->inode->gfid)) {
   77         gf_uuid_generate(gfid);
   78         svs_iatt_fill(gfid, buf);
   79 
   80         /* Here the inode context of the entry point directory
   81            is filled with just the type of the inode and the gfid
   82            of the parent from where the entry point was entered.
   83            The glfs object and the fs instance will be NULL.
   84         */
   85         if (parent)
   86             svs_iatt_fill(parent->gfid, postparent);
   87         else {
   88             svs_iatt_fill(buf->ia_gfid, postparent);
   89         }
   90 
   91         inode_ctx = svs_inode_ctx_get_or_new(this, loc->inode);
   92         if (!inode_ctx) {
   93             op_ret = -1;
   94             *op_errno = ENOMEM;
   95             gf_msg(this->name, GF_LOG_ERROR, *op_errno,
   96                    SVS_MSG_NEW_INODE_CTX_FAILED,
   97                    "failed to "
   98                    "allocate inode context for entry point "
   99                    "directory");
  100             goto out;
  101         }
  102 
  103         gf_uuid_copy(inode_ctx->pargfid, loc->pargfid);
  104         memcpy(&inode_ctx->buf, buf, sizeof(*buf));
  105         inode_ctx->type = SNAP_VIEW_ENTRY_POINT_INODE;
  106     } else {
  107         inode_ctx = svs_inode_ctx_get(this, loc->inode);
  108         if (inode_ctx) {
  109             memcpy(buf, &inode_ctx->buf, sizeof(*buf));
  110             svs_iatt_fill(inode_ctx->pargfid, postparent);
  111         } else {
  112             svs_iatt_fill(loc->inode->gfid, buf);
  113             if (parent)
  114                 svs_iatt_fill(parent->gfid, postparent);
  115             else {
  116                 svs_iatt_fill(loc->inode->gfid, postparent);
  117             }
  118         }
  119     }
  120 
  121     op_ret = 0;
  122 
  123 out:
  124     return op_ret;
  125 }
  126 
  127 /* When lookup comes from client and the protocol/server tries to resolve
  128    the pargfid via just sending the gfid as part of lookup, if the inode
  129    for the parent gfid is not found. But since that gfid has not yet been
  130    looked  up yet, inode will not be having inode context and parent is not
  131    there (as it is the parent of the entry that is being resolved). So
  132    without parent and inode context, svs cannot know which snapshot
  133    to look into. In such cases, the amguity is handled by looking
  134    into the latest snapshot. If the directory is there in the latest
  135    snapshot, lookup is successful, otherwise it is a failure. So for
  136    any directory created after taking the latest snapshot, entry into
  137    snapshot world is denied. i.e you have to be part of snapshot world
  138    to enter it. If the gfid is not found there, then unwind with
  139    ESTALE
  140    This gets executed mainly in the situation where the snapshot entry
  141    point is entered from a non-root directory and that non-root directory's
  142    inode (or gfid) is not yet looked up. And in each case when a gfid has to
  143    be looked up (without any inode contex and parent context present), last
  144    snapshot is referred and a random gfid is not generated.
  145 */
  146 int32_t
  147 svs_lookup_gfid(xlator_t *this, loc_t *loc, struct iatt *buf,
  148                 struct iatt *postparent, int32_t *op_errno)
  149 {
  150     int32_t op_ret = -1;
  151     unsigned char handle_obj[GFAPI_HANDLE_LENGTH] = {
  152         0,
  153     };
  154     glfs_t *fs = NULL;
  155     glfs_object_t *object = NULL;
  156     struct stat statbuf = {
  157         0,
  158     };
  159     svs_inode_t *inode_ctx = NULL;
  160 
  161     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
  162     GF_VALIDATE_OR_GOTO(this->name, loc, out);
  163     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
  164     GF_VALIDATE_OR_GOTO(this->name, buf, out);
  165     GF_VALIDATE_OR_GOTO(this->name, postparent, out);
  166 
  167     if (gf_uuid_is_null(loc->gfid) && gf_uuid_is_null(loc->inode->gfid)) {
  168         gf_msg(this->name, GF_LOG_ERROR, 0, SVS_MSG_NULL_GFID, "gfid is NULL");
  169         goto out;
  170     }
  171 
  172     if (!gf_uuid_is_null(loc->inode->gfid))
  173         memcpy(handle_obj, loc->inode->gfid, GFAPI_HANDLE_LENGTH);
  174     else
  175         memcpy(handle_obj, loc->gfid, GFAPI_HANDLE_LENGTH);
  176 
  177     fs = svs_get_latest_snapshot(this);
  178     if (!fs) {
  179         op_ret = -1;
  180         *op_errno = EINVAL;
  181         gf_msg(this->name, GF_LOG_ERROR, *op_errno,
  182                SVS_MSG_GET_LATEST_SNAP_FAILED,
  183                "failed to get the latest "
  184                "snapshot");
  185         goto out;
  186     }
  187 
  188     object = glfs_h_create_from_handle(fs, handle_obj, GFAPI_HANDLE_LENGTH,
  189                                        &statbuf);
  190     if (!object) {
  191         op_ret = -1;
  192         *op_errno = ESTALE;
  193         gf_msg(this->name, GF_LOG_ERROR, *op_errno,
  194                SVS_MSG_GET_GLFS_H_OBJECT_FAILED,
  195                "failed to do lookup and get "
  196                "the handle on the snapshot %s (path: %s, gfid: %s)",
  197                loc->name, loc->path, uuid_utoa(loc->gfid));
  198         goto out;
  199     }
  200 
  201     inode_ctx = svs_inode_ctx_get_or_new(this, loc->inode);
  202     if (!inode_ctx) {
  203         op_ret = -1;
  204         *op_errno = ENOMEM;
  205         gf_msg(this->name, GF_LOG_ERROR, *op_errno,
  206                SVS_MSG_NEW_INODE_CTX_FAILED,
  207                "failed to allocate inode "
  208                "context");
  209         goto out;
  210     }
  211 
  212     iatt_from_stat(buf, &statbuf);
  213     if (!gf_uuid_is_null(loc->gfid))
  214         gf_uuid_copy(buf->ia_gfid, loc->gfid);
  215     else
  216         gf_uuid_copy(buf->ia_gfid, loc->inode->gfid);
  217 
  218     inode_ctx->type = SNAP_VIEW_VIRTUAL_INODE;
  219     inode_ctx->fs = fs;
  220     inode_ctx->object = object;
  221     memcpy(&inode_ctx->buf, buf, sizeof(*buf));
  222     svs_iatt_fill(buf->ia_gfid, postparent);
  223 
  224     op_ret = 0;
  225 
  226 out:
  227     return op_ret;
  228 }
  229 
  230 /* If the parent is an entry point inode, then create the handle for the
  231    snapshot on which lookup came. i.e in reality lookup came on
  232    the directory from which the entry point directory was entered, but
  233    lookup is into the past. So create the handle for it by doing
  234    the name-less lookup on the gfid (which can be obtained from
  235    parent's context
  236 */
  237 int32_t
  238 svs_lookup_snapshot(xlator_t *this, loc_t *loc, struct iatt *buf,
  239                     struct iatt *postparent, inode_t *parent,
  240                     svs_inode_t *parent_ctx, int32_t *op_errno)
  241 {
  242     int32_t op_ret = -1;
  243     unsigned char handle_obj[GFAPI_HANDLE_LENGTH] = {
  244         0,
  245     };
  246     glfs_t *fs = NULL;
  247     glfs_object_t *object = NULL;
  248     struct stat statbuf = {
  249         0,
  250     };
  251     svs_inode_t *inode_ctx = NULL;
  252     uuid_t gfid;
  253 
  254     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
  255     GF_VALIDATE_OR_GOTO(this->name, loc, out);
  256     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
  257     GF_VALIDATE_OR_GOTO(this->name, buf, out);
  258     GF_VALIDATE_OR_GOTO(this->name, postparent, out);
  259     GF_VALIDATE_OR_GOTO(this->name, parent_ctx, out);
  260     GF_VALIDATE_OR_GOTO(this->name, parent, out);
  261 
  262     fs = svs_initialise_snapshot_volume(this, loc->name, op_errno);
  263     if (!fs) {
  264         gf_msg_debug(this->name, 0,
  265                      "failed to create "
  266                      "the fs instance for snap %s",
  267                      loc->name);
  268         *op_errno = ENOENT;
  269         op_ret = -1;
  270         goto out;
  271     }
  272 
  273     memcpy(handle_obj, parent_ctx->pargfid, GFAPI_HANDLE_LENGTH);
  274     object = glfs_h_create_from_handle(fs, handle_obj, GFAPI_HANDLE_LENGTH,
  275                                        &statbuf);
  276     if (!object) {
  277         op_ret = -1;
  278         *op_errno = errno;
  279         /* Should this be in warning or error mode? */
  280         gf_msg_debug(this->name, 0,
  281                      "failed to do lookup and "
  282                      "get the handle on the snapshot %s",
  283                      loc->name);
  284         goto out;
  285     }
  286 
  287     inode_ctx = svs_inode_ctx_get_or_new(this, loc->inode);
  288     if (!inode_ctx) {
  289         op_ret = -1;
  290         *op_errno = ENOMEM;
  291         gf_msg(this->name, GF_LOG_ERROR, *op_errno,
  292                SVS_MSG_NEW_INODE_CTX_FAILED,
  293                "failed to allocate "
  294                "inode context");
  295         goto out;
  296     }
  297 
  298     if (gf_uuid_is_null(loc->gfid) && gf_uuid_is_null(loc->inode->gfid))
  299         gf_uuid_generate(gfid);
  300     else {
  301         if (!gf_uuid_is_null(loc->inode->gfid))
  302             gf_uuid_copy(gfid, loc->inode->gfid);
  303         else
  304             gf_uuid_copy(gfid, loc->gfid);
  305     }
  306     iatt_from_stat(buf, &statbuf);
  307     gf_uuid_copy(buf->ia_gfid, gfid);
  308     svs_fill_ino_from_gfid(buf);
  309     inode_ctx->type = SNAP_VIEW_SNAPSHOT_INODE;
  310     inode_ctx->fs = fs;
  311     inode_ctx->object = object;
  312     memcpy(&inode_ctx->buf, buf, sizeof(*buf));
  313     svs_iatt_fill(parent->gfid, postparent);
  314 
  315     SVS_STRDUP(inode_ctx->snapname, loc->name);
  316     if (!inode_ctx->snapname) {
  317         op_ret = -1;
  318         *op_errno = ENOMEM;
  319         goto out;
  320     }
  321     op_ret = 0;
  322 
  323 out:
  324     if (op_ret) {
  325         if (object)
  326             glfs_h_close(object);
  327 
  328         if (inode_ctx)
  329             inode_ctx->object = NULL;
  330     }
  331 
  332     return op_ret;
  333 }
  334 
  335 /* Both parent and entry are from snapshot world */
  336 int32_t
  337 svs_lookup_entry(xlator_t *this, loc_t *loc, struct iatt *buf,
  338                  struct iatt *postparent, inode_t *parent,
  339                  svs_inode_t *parent_ctx, int32_t *op_errno)
  340 {
  341     int32_t op_ret = -1;
  342     glfs_t *fs = NULL;
  343     glfs_object_t *object = NULL;
  344     struct stat statbuf = {
  345         0,
  346     };
  347     svs_inode_t *inode_ctx = NULL;
  348     glfs_object_t *parent_object = NULL;
  349     uuid_t gfid = {
  350         0,
  351     };
  352 
  353     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
  354     GF_VALIDATE_OR_GOTO(this->name, loc, out);
  355     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
  356     GF_VALIDATE_OR_GOTO(this->name, buf, out);
  357     GF_VALIDATE_OR_GOTO(this->name, postparent, out);
  358     GF_VALIDATE_OR_GOTO(this->name, parent_ctx, out);
  359     GF_VALIDATE_OR_GOTO(this->name, parent, out);
  360 
  361     parent_object = parent_ctx->object;
  362     fs = parent_ctx->fs;
  363 
  364     object = glfs_h_lookupat(fs, parent_object, loc->name, &statbuf, 0);
  365     if (!object) {
  366         /* should this be in WARNING or ERROR mode? */
  367         gf_msg_debug(this->name, 0,
  368                      "failed to do lookup and "
  369                      "get the handle for entry %s (path: %s)",
  370                      loc->name, loc->path);
  371         op_ret = -1;
  372         *op_errno = errno;
  373         goto out;
  374     }
  375 
  376     if (gf_uuid_is_null(object->gfid)) {
  377         /* should this be in WARNING or ERROR mode? */
  378         gf_msg_debug(this->name, 0,
  379                      "gfid from glfs handle is "
  380                      "NULL for entry %s (path: %s)",
  381                      loc->name, loc->path);
  382         op_ret = -1;
  383         *op_errno = errno;
  384         goto out;
  385     }
  386 
  387     inode_ctx = svs_inode_ctx_get_or_new(this, loc->inode);
  388     if (!inode_ctx) {
  389         op_ret = -1;
  390         *op_errno = ENOMEM;
  391         gf_msg(this->name, GF_LOG_ERROR, *op_errno,
  392                SVS_MSG_NEW_INODE_CTX_FAILED,
  393                "failed to allocate "
  394                "inode context");
  395         goto out;
  396     }
  397 
  398     if (gf_uuid_is_null(loc->gfid) && gf_uuid_is_null(loc->inode->gfid)) {
  399         if (svs_uuid_generate(this, gfid, parent_ctx->snapname, object->gfid)) {
  400             /*
  401              * should op_errno be something else such as
  402              * EINVAL or ESTALE?
  403              */
  404             op_ret = -1;
  405             *op_errno = EIO;
  406             goto out;
  407         }
  408     } else {
  409         if (!gf_uuid_is_null(loc->inode->gfid))
  410             gf_uuid_copy(gfid, loc->inode->gfid);
  411         else
  412             gf_uuid_copy(gfid, loc->gfid);
  413     }
  414 
  415     iatt_from_stat(buf, &statbuf);
  416     gf_uuid_copy(buf->ia_gfid, gfid);
  417     svs_fill_ino_from_gfid(buf);
  418     inode_ctx->type = SNAP_VIEW_VIRTUAL_INODE;
  419     inode_ctx->fs = fs;
  420     inode_ctx->object = object;
  421     memcpy(&inode_ctx->buf, buf, sizeof(*buf));
  422     svs_iatt_fill(parent->gfid, postparent);
  423 
  424     if (IA_ISDIR(buf->ia_type)) {
  425         SVS_STRDUP(inode_ctx->snapname, parent_ctx->snapname);
  426         if (!inode_ctx->snapname) {
  427             op_ret = -1;
  428             *op_errno = ENOMEM;
  429             goto out;
  430         }
  431     }
  432 
  433     op_ret = 0;
  434 
  435 out:
  436     if (op_ret) {
  437         if (object)
  438             glfs_h_close(object);
  439 
  440         if (inode_ctx)
  441             inode_ctx->object = NULL;
  442     }
  443 
  444     return op_ret;
  445 }
  446 
  447 /* inode context is there means lookup has come on an object which was
  448    built either as part of lookup or as part of readdirp. But in readdirp
  449    we would not have got the handle to access the object in the gfapi
  450    world.
  451    So if inode context contains glfs_t instance for the right
  452    gfapi world and glfs_object_t handle for accessing it in the gfapi
  453    world, then unwind with success as the snapshots as of now are
  454    read-only.
  455    If the above condition is not met, then send lookup call again to
  456    the gfapi world. It can happen only if both parent context and
  457    the name of the entry are present.
  458 
  459    If parent is an entry point to snapshot world:
  460    * parent is needed for getting the gfid on which lookup has to be done
  461      (the gfid present in the inode is a virtual gfid) in the snapshot
  462      world.
  463    * name is required to get the right glfs_t instance on which lookup
  464      has to be done
  465 
  466    If parent is a directory from snapshot world:
  467    * parent context is needed to get the glfs_t instance and to get the
  468      handle to parent directory in the snapshot world.
  469    * name is needed to do the lookup on the right entry in the snapshot
  470      world
  471 */
  472 int32_t
  473 svs_revalidate(xlator_t *this, loc_t *loc, inode_t *parent,
  474                svs_inode_t *inode_ctx, svs_inode_t *parent_ctx,
  475                struct iatt *buf, struct iatt *postparent, int32_t *op_errno)
  476 {
  477     int32_t op_ret = -1;
  478     int ret = -1;
  479     char tmp_uuid[64] = {
  480         0,
  481     };
  482     glfs_t *fs = NULL;
  483 
  484     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
  485     GF_VALIDATE_OR_GOTO(this->name, buf, out);
  486     GF_VALIDATE_OR_GOTO(this->name, postparent, out);
  487     GF_VALIDATE_OR_GOTO(this->name, inode_ctx, out);
  488 
  489     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
  490         svs_iatt_fill(loc->inode->gfid, buf);
  491         if (parent)
  492             svs_iatt_fill(parent->gfid, postparent);
  493         else
  494             svs_iatt_fill(loc->inode->gfid, postparent);
  495         op_ret = 0;
  496         goto out;
  497     } else {
  498         /* Though fs and object are present in the inode context, its
  499          * better to check if fs is valid or not before doing anything.
  500          * Its for the protection from the following operations.
  501          * 1) Create a file on the glusterfs mount point
  502          * 2) Create a snapshot (say "snap1")
  503          * 3) Access the contents of the snapshot
  504          * 4) Delete the file from the mount point
  505          * 5) Delete the snapshot "snap1"
  506          * 6) Create a new snapshot "snap1"
  507          *
  508          * Now accessing the new snapshot "snap1" gives problems.
  509          * Because the inode and dentry created for snap1 would not be
  510          * deleted upon the deletion of the snapshot (as deletion of
  511          * snapshot is a gluster cli operation, not a fop). So next time
  512          * upon creation of a new snap with same name, the previous
  513          * inode and dentry itself will be used. But the inode context
  514          * contains old information about the glfs_t instance and the
  515          * handle in the gfapi world. Thus the glfs_t instance should
  516          * be checked before accessing. If its wrong, then right
  517          * instance should be obtained by doing the lookup.
  518          */
  519         if (inode_ctx->fs && inode_ctx->object) {
  520             fs = inode_ctx->fs;
  521             SVS_CHECK_VALID_SNAPSHOT_HANDLE(fs, this);
  522             if (fs) {
  523                 memcpy(buf, &inode_ctx->buf, sizeof(*buf));
  524                 if (parent)
  525                     svs_iatt_fill(parent->gfid, postparent);
  526                 else
  527                     svs_iatt_fill(buf->ia_gfid, postparent);
  528                 op_ret = 0;
  529                 goto out;
  530             } else {
  531                 inode_ctx->fs = NULL;
  532                 inode_ctx->object = NULL;
  533                 ret = svs_get_handle(this, loc, inode_ctx, op_errno);
  534                 if (ret) {
  535                     gf_msg(this->name, GF_LOG_ERROR, *op_errno,
  536                            SVS_MSG_GET_GLFS_H_OBJECT_FAILED,
  537                            "failed to get the handle for "
  538                            "%s (gfid %s)",
  539                            loc->path, uuid_utoa_r(loc->inode->gfid, tmp_uuid));
  540                     op_ret = -1;
  541                     goto out;
  542                 }
  543             }
  544         }
  545 
  546         /* To send the lookup to gfapi world, both the name of the
  547            entry as well as the parent context is needed.
  548         */
  549         if (!loc->name || !parent_ctx) {
  550             *op_errno = ESTALE;
  551             gf_msg(this->name, GF_LOG_ERROR, *op_errno,
  552                    SVS_MSG_PARENT_CTX_OR_NAME_NULL, "%s is NULL",
  553                    loc->name ? "parent context" : "loc->name");
  554             goto out;
  555         }
  556 
  557         if (parent_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE)
  558             op_ret = svs_lookup_snapshot(this, loc, buf, postparent, parent,
  559                                          parent_ctx, op_errno);
  560         else
  561             op_ret = svs_lookup_entry(this, loc, buf, postparent, parent,
  562                                       parent_ctx, op_errno);
  563 
  564         goto out;
  565     }
  566 
  567 out:
  568     return op_ret;
  569 }
  570 
  571 int32_t
  572 svs_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
  573 {
  574     struct iatt buf = {
  575         0,
  576     };
  577     int32_t op_ret = -1;
  578     int32_t op_errno = EINVAL;
  579     struct iatt postparent = {
  580         0,
  581     };
  582     svs_inode_t *inode_ctx = NULL;
  583     svs_inode_t *parent_ctx = NULL;
  584     int32_t ret = -1;
  585     inode_t *parent = NULL;
  586     gf_boolean_t entry_point_key = _gf_false;
  587     gf_boolean_t entry_point = _gf_false;
  588     call_stack_t *root = NULL;
  589 
  590     GF_VALIDATE_OR_GOTO("svs", this, out);
  591     GF_VALIDATE_OR_GOTO(this->name, this->private, out);
  592     GF_VALIDATE_OR_GOTO(this->name, frame, out);
  593     GF_VALIDATE_OR_GOTO(this->name, loc, out);
  594     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
  595 
  596     root = frame->root;
  597     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
  598                                root->groups);
  599     if (op_ret != 0) {
  600         goto out;
  601     }
  602 
  603     /* For lookups sent on inodes (i.e not parent inode + basename, but
  604        direct inode itself which usually is a nameless lookup or revalidate
  605        on the inode), loc->name will not be there. Get it from path if
  606        it is there.
  607        This is the difference between nameless lookup and revalidate lookup
  608        on an inode:
  609        nameless lookup: loc->path contains gfid and strrchr on it fails
  610        revalidate lookup: loc->path contains the entry name of the inode
  611                           and strrchr gives the name of the entry from path
  612     */
  613     if (loc->path) {
  614         if (!loc->name || (loc->name && !strcmp(loc->name, ""))) {
  615             loc->name = strrchr(loc->path, '/');
  616             if (loc->name)
  617                 loc->name++;
  618         }
  619     }
  620 
  621     if (loc->parent)
  622         parent = inode_ref(loc->parent);
  623     else {
  624         parent = inode_find(loc->inode->table, loc->pargfid);
  625         if (!parent)
  626             parent = inode_parent(loc->inode, NULL, NULL);
  627     }
  628     if (parent)
  629         parent_ctx = svs_inode_ctx_get(this, parent);
  630 
  631     inode_ctx = svs_inode_ctx_get(this, loc->inode);
  632 
  633     if (xdata && !inode_ctx) {
  634         ret = dict_get_str_boolean(xdata, "entry-point", _gf_false);
  635         if (ret == -1) {
  636             gf_msg_debug(this->name, 0,
  637                          "failed to get the "
  638                          "entry point info");
  639             entry_point_key = _gf_false;
  640         } else {
  641             entry_point_key = ret;
  642         }
  643 
  644         if (loc->name && strlen(loc->name)) {
  645             /* lookup can come with the entry-point set in the dict
  646              * for the parent directory of the entry-point as well.
  647              * So consider entry_point only for named lookup
  648              */
  649             entry_point = entry_point_key;
  650         }
  651     }
  652 
  653     if (inode_ctx && inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
  654         /* entry-point may not be set in the dictonary.
  655          * This can happen if snap-view client is restarted where
  656          * inode-ctx not available and a nameless lookup has come
  657          */
  658         entry_point = _gf_true;
  659     }
  660 
  661     /* lookup is on the entry point to the snapshot world */
  662     if (entry_point) {
  663         op_ret = svs_lookup_entry_point(this, loc, parent, &buf, &postparent,
  664                                         &op_errno);
  665         goto out;
  666     }
  667 
  668     /* revalidate */
  669     if (inode_ctx) {
  670         op_ret = svs_revalidate(this, loc, parent, inode_ctx, parent_ctx, &buf,
  671                                 &postparent, &op_errno);
  672         goto out;
  673     }
  674 
  675     /* This can happen when entry point directory is entered from non-root
  676        directory. (ex: if /mnt/glusterfs is the mount point, then entry
  677        point (say .snaps) is entered from /mnt/glusterfs/dir/.snaps). Also
  678        it can happen when client sends a nameless lookup on just a gfid and
  679        the server does not have the inode in the inode table.
  680     */
  681     if (!inode_ctx && !parent_ctx) {
  682         if (gf_uuid_is_null(loc->gfid) && gf_uuid_is_null(loc->inode->gfid)) {
  683             op_ret = -1;
  684             op_errno = ESTALE;
  685             gf_msg_debug(this->name, 0,
  686                          "gfid is NULL. Either the lookup "
  687                          "came on missing entry or the "
  688                          "entry is stale");
  689             goto out;
  690         }
  691 
  692         if (!entry_point_key) {
  693             /* This can happen when there is no inode_ctx available.
  694              * snapview-server might have restarted or
  695              * graph change might have happened
  696              */
  697             op_ret = -1;
  698             op_errno = ESTALE;
  699             goto out;
  700         }
  701 
  702         /* lookup is on the parent directory of entry-point.
  703          * this would have already looked up by snap-view client
  704          * so return success
  705          */
  706         if (!gf_uuid_is_null(loc->gfid))
  707             gf_uuid_copy(buf.ia_gfid, loc->gfid);
  708         else
  709             gf_uuid_copy(buf.ia_gfid, loc->inode->gfid);
  710 
  711         svs_iatt_fill(buf.ia_gfid, &buf);
  712         svs_iatt_fill(buf.ia_gfid, &postparent);
  713 
  714         op_ret = 0;
  715         goto out;
  716     }
  717 
  718     if (parent_ctx) {
  719         if (parent_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE)
  720             op_ret = svs_lookup_snapshot(this, loc, &buf, &postparent, parent,
  721                                          parent_ctx, &op_errno);
  722         else
  723             op_ret = svs_lookup_entry(this, loc, &buf, &postparent, parent,
  724                                       parent_ctx, &op_errno);
  725         goto out;
  726     }
  727 
  728 out:
  729     STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno,
  730                         loc ? loc->inode : NULL, &buf, xdata, &postparent);
  731 
  732     if (parent)
  733         inode_unref(parent);
  734 
  735     return 0;
  736 }
  737 
  738 int32_t
  739 svs_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
  740             dict_t *xdata)
  741 {
  742     svs_inode_t *inode_ctx = NULL;
  743     int32_t op_ret = -1;
  744     int32_t op_errno = EINVAL;
  745     svs_fd_t *svs_fd = NULL;
  746     glfs_fd_t *glfd = NULL;
  747     glfs_t *fs = NULL;
  748     glfs_object_t *object = NULL;
  749     call_stack_t *root = NULL;
  750 
  751     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
  752     GF_VALIDATE_OR_GOTO(this->name, frame, out);
  753     GF_VALIDATE_OR_GOTO(this->name, fd, out);
  754     GF_VALIDATE_OR_GOTO(this->name, loc, out);
  755     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
  756 
  757     root = frame->root;
  758     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
  759                                root->groups);
  760     if (op_ret != 0) {
  761         goto out;
  762     }
  763 
  764     inode_ctx = svs_inode_ctx_get(this, loc->inode);
  765     if (!inode_ctx) {
  766         op_ret = -1;
  767         op_errno = ESTALE;
  768         gf_msg(this->name, GF_LOG_ERROR, op_errno,
  769                SVS_MSG_GET_INODE_CONTEXT_FAILED,
  770                "inode context not found "
  771                "for the inode %s",
  772                uuid_utoa(loc->inode->gfid));
  773         goto out;
  774     }
  775 
  776     /* Fake success is sent if the opendir is on the entry point directory
  777        or the inode is SNAP_VIEW_ENTRY_POINT_INODE
  778     */
  779     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
  780         op_ret = 0;
  781         op_errno = 0;
  782         goto out;
  783     } else {
  784         SVS_GET_INODE_CTX_INFO(inode_ctx, fs, object, this, loc, op_ret,
  785                                op_errno, out);
  786 
  787         glfd = glfs_h_opendir(fs, object);
  788         if (!glfd) {
  789             op_ret = -1;
  790             op_errno = errno;
  791             gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_OPENDIR_FAILED,
  792                    "opendir on %s failed "
  793                    "(gfid: %s)",
  794                    loc->name, uuid_utoa(loc->inode->gfid));
  795             goto out;
  796         }
  797         svs_fd = svs_fd_ctx_get_or_new(this, fd);
  798         if (!svs_fd) {
  799             op_ret = -1;
  800             op_errno = ENOMEM;
  801             gf_msg(this->name, GF_LOG_ERROR, op_errno,
  802                    SVS_MSG_NEW_FD_CTX_FAILED,
  803                    "failed to allocate fd context "
  804                    "for %s (gfid: %s)",
  805                    loc->name, uuid_utoa(fd->inode->gfid));
  806             glfs_closedir(glfd);
  807             goto out;
  808         }
  809         svs_fd->fd = glfd;
  810 
  811         op_ret = 0;
  812         op_errno = 0;
  813     }
  814 
  815 out:
  816     STACK_UNWIND_STRICT(opendir, frame, op_ret, op_errno, fd, NULL);
  817 
  818     return 0;
  819 }
  820 
  821 /*
  822  * This function adds the xattr keys present in the list (@list) to the dict.
  823  * But the list contains only the names of the xattrs (and no value, as
  824  * the gfapi functions for the listxattr operations would return only the
  825  * names of the xattrs in the buffer provided by the caller, though they had
  826  * got the values of those xattrs from posix) as described in the man page of
  827  * listxattr. But before unwinding snapview-server has to put those names
  828  * back into the dict. But to get the values for those xattrs it has to do the
  829  * getxattr operation on each xattr which might turn out to be a costly
  830  * operation. So for each of the xattrs present in the list, a 0 byte value
  831  * ("") is set into the dict before unwinding. Since ("") is also a valid xattr
  832  * value(in a file system) we use an extra key in the same dictionary as an
  833  * indicator to other xlators which want to cache the xattrs (as of now,
  834  * md-cache which caches acl and selinux related xattrs) to not to cache the
  835  * values of the xattrs present in the dict.
  836  */
  837 int32_t
  838 svs_add_xattrs_to_dict(xlator_t *this, dict_t *dict, char *list, ssize_t size)
  839 {
  840     char keybuffer[4096] = {
  841         0,
  842     };
  843     size_t remaining_size = 0;
  844     int32_t list_offset = 0;
  845     int32_t ret = -1;
  846 
  847     GF_VALIDATE_OR_GOTO("snapview-daemon", this, out);
  848     GF_VALIDATE_OR_GOTO(this->name, dict, out);
  849     GF_VALIDATE_OR_GOTO(this->name, list, out);
  850 
  851     remaining_size = size;
  852     list_offset = 0;
  853     while (remaining_size > 0) {
  854         strncpy(keybuffer, list + list_offset, sizeof(keybuffer) - 1);
  855 #ifdef GF_DARWIN_HOST_OS
  856         /* The protocol expect namespace for now */
  857         char *newkey = NULL;
  858         gf_add_prefix(XATTR_USER_PREFIX, keybuffer, &newkey);
  859         strcpy(keybuffer, newkey);
  860         GF_FREE(newkey);
  861 #endif
  862         ret = dict_set_str(dict, keybuffer, "");
  863         if (ret < 0) {
  864             gf_msg(this->name, GF_LOG_ERROR, 0, SVS_MSG_DICT_SET_FAILED,
  865                    "dict set operation "
  866                    "for the key %s failed.",
  867                    keybuffer);
  868             goto out;
  869         }
  870 
  871         remaining_size -= strlen(keybuffer) + 1;
  872         list_offset += strlen(keybuffer) + 1;
  873     } /* while (remaining_size > 0) */
  874 
  875     /* Add an additional key to indicate that we don't need to cache these
  876      * xattrs(with value "") */
  877     ret = dict_set_str(dict, "glusterfs.skip-cache", "");
  878     if (ret < 0) {
  879         gf_msg(this->name, GF_LOG_ERROR, 0, SVS_MSG_DICT_SET_FAILED,
  880                "dict set operation for the key glusterfs.skip-cache failed.");
  881         goto out;
  882     }
  883 
  884     ret = 0;
  885 
  886 out:
  887     return ret;
  888 }
  889 
  890 int32_t
  891 svs_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
  892              dict_t *xdata)
  893 {
  894     svs_inode_t *inode_ctx = NULL;
  895     int32_t op_ret = -1;
  896     int32_t op_errno = EINVAL;
  897     glfs_t *fs = NULL;
  898     glfs_object_t *object = NULL;
  899     char *value = 0;
  900     ssize_t size = 0;
  901     dict_t *dict = NULL;
  902     call_stack_t *root = NULL;
  903 
  904     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
  905     GF_VALIDATE_OR_GOTO("snap-view-daemon", frame, out);
  906     GF_VALIDATE_OR_GOTO("snap-view-daemon", loc, out);
  907     GF_VALIDATE_OR_GOTO("snap-view-daemon", loc->inode, out);
  908 
  909     root = frame->root;
  910     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
  911                                root->groups);
  912     if (op_ret != 0) {
  913         goto out;
  914     }
  915 
  916     inode_ctx = svs_inode_ctx_get(this, loc->inode);
  917     if (!inode_ctx) {
  918         op_ret = -1;
  919         op_errno = ESTALE;
  920         gf_msg(this->name, GF_LOG_ERROR, op_errno,
  921                SVS_MSG_GET_INODE_CONTEXT_FAILED,
  922                "inode context not found "
  923                "for the inode %s",
  924                uuid_utoa(loc->inode->gfid));
  925         goto out;
  926     }
  927 
  928     /* ENODATA is sent if the getxattr is on entry point directory
  929        or the inode is SNAP_VIEW_ENTRY_POINT_INODE. Entry point is
  930        a virtual directory on which setxattr operations are not
  931        allowed. If getxattr has to be faked as success, then a value
  932        for the name of the xattr has to be sent which we don't have.
  933     */
  934     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
  935         op_ret = -1;
  936         op_errno = ENODATA;
  937         goto out;
  938     } else {
  939         SVS_GET_INODE_CTX_INFO(inode_ctx, fs, object, this, loc, op_ret,
  940                                op_errno, out);
  941 
  942         dict = dict_new();
  943         if (!dict) {
  944             op_ret = -1;
  945             op_errno = ENOMEM;
  946             gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
  947                    "failed to allocate dict");
  948             goto out;
  949         }
  950 
  951         size = glfs_h_getxattrs(fs, object, name, NULL, 0);
  952         if (size == -1) {
  953             op_ret = -1;
  954             op_errno = errno;
  955             if (errno == ENODATA) {
  956                 gf_msg_debug(this->name, 0,
  957                              "getxattr on "
  958                              "%s failed (ket: %s) with %s",
  959                              loc->path, name, strerror(errno));
  960             } else {
  961                 gf_msg(this->name, GF_LOG_ERROR, op_errno,
  962                        SVS_MSG_GETXATTR_FAILED,
  963                        "getxattr on %s failed (key: %s) with %s", loc->path,
  964                        name, strerror(errno));
  965             }
  966             goto out;
  967         }
  968         value = GF_CALLOC(size + 1, sizeof(char), gf_common_mt_char);
  969         if (!value) {
  970             op_ret = -1;
  971             op_errno = ENOMEM;
  972             gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
  973                    "failed to allocate memory for getxattr "
  974                    "on %s (key: %s)",
  975                    loc->name, name);
  976             goto out;
  977         }
  978 
  979         size = glfs_h_getxattrs(fs, object, name, value, size);
  980         if (size == -1) {
  981             op_ret = -1;
  982             op_errno = errno;
  983             gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_GETXATTR_FAILED,
  984                    "failed to get the xattr %s for "
  985                    "entry %s",
  986                    name, loc->name);
  987             goto out;
  988         }
  989         value[size] = '\0';
  990 
  991         if (name) {
  992             op_ret = dict_set_dynptr(dict, (char *)name, value, size);
  993             if (op_ret < 0) {
  994                 op_errno = -op_ret;
  995                 gf_msg(this->name, GF_LOG_ERROR, op_errno,
  996                        SVS_MSG_DICT_SET_FAILED,
  997                        "dict set operation for %s for "
  998                        "the key %s failed.",
  999                        loc->path, name);
 1000                 GF_FREE(value);
 1001                 value = NULL;
 1002                 goto out;
 1003             }
 1004         } else {
 1005             op_ret = svs_add_xattrs_to_dict(this, dict, value, size);
 1006             if (op_ret == -1) {
 1007                 op_errno = ENOMEM;
 1008                 gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
 1009                        "failed to add xattrs from the list to "
 1010                        "dict for %s (gfid: %s)",
 1011                        loc->path, uuid_utoa(loc->inode->gfid));
 1012                 goto out;
 1013             }
 1014             GF_FREE(value);
 1015             value = NULL;
 1016         }
 1017     }
 1018 
 1019 out:
 1020     if (op_ret && value)
 1021         GF_FREE(value);
 1022 
 1023     STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, dict, NULL);
 1024 
 1025     if (dict)
 1026         dict_unref(dict);
 1027 
 1028     return 0;
 1029 }
 1030 
 1031 int32_t
 1032 svs_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
 1033               dict_t *xdata)
 1034 {
 1035     svs_inode_t *inode_ctx = NULL;
 1036     int32_t op_ret = -1;
 1037     int32_t op_errno = EINVAL;
 1038     char *value = 0;
 1039     ssize_t size = 0;
 1040     dict_t *dict = NULL;
 1041     svs_fd_t *sfd = NULL;
 1042     glfs_fd_t *glfd = NULL;
 1043 
 1044     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 1045     GF_VALIDATE_OR_GOTO("snap-view-daemon", frame, out);
 1046     GF_VALIDATE_OR_GOTO("snap-view-daemon", fd, out);
 1047     GF_VALIDATE_OR_GOTO("snap-view-daemon", fd->inode, out);
 1048 
 1049     inode_ctx = svs_inode_ctx_get(this, fd->inode);
 1050     if (!inode_ctx) {
 1051         op_ret = -1;
 1052         op_errno = ESTALE;
 1053         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1054                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 1055                "inode context not found "
 1056                "for the inode %s",
 1057                uuid_utoa(fd->inode->gfid));
 1058         goto out;
 1059     }
 1060 
 1061     if (!(svs_inode_ctx_glfs_mapping(this, inode_ctx))) {
 1062         op_ret = -1;
 1063         op_errno = EBADF;
 1064         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_FS_INSTANCE_INVALID,
 1065                "glfs instance %p to which the inode %s "
 1066                "belongs to does not exist. The snapshot "
 1067                "corresponding to the instance might have"
 1068                "been deleted or deactivated",
 1069                inode_ctx->fs, uuid_utoa(fd->inode->gfid));
 1070         goto out;
 1071     }
 1072 
 1073     sfd = svs_fd_ctx_get_or_new(this, fd);
 1074     if (!sfd) {
 1075         op_ret = -1;
 1076         op_errno = EBADFD;
 1077         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1078                SVS_MSG_GET_FD_CONTEXT_FAILED,
 1079                "failed to get the fd "
 1080                "context for %s",
 1081                uuid_utoa(fd->inode->gfid));
 1082         goto out;
 1083     }
 1084 
 1085     glfd = sfd->fd;
 1086     /* EINVAL is sent if the getxattr is on entry point directory
 1087        or the inode is SNAP_VIEW_ENTRY_POINT_INODE. Entry point is
 1088        a virtual directory on which setxattr operations are not
 1089        allowed. If getxattr has to be faked as success, then a value
 1090        for the name of the xattr has to be sent which we don't have.
 1091     */
 1092     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
 1093         op_ret = -1;
 1094         op_errno = EINVAL;
 1095         goto out;
 1096     } else {
 1097         dict = dict_new();
 1098         if (!dict) {
 1099             op_ret = -1;
 1100             op_errno = ENOMEM;
 1101             gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
 1102                    "failed to allocate dict "
 1103                    "(gfid: %s, key: %s)",
 1104                    uuid_utoa(fd->inode->gfid), name);
 1105             goto out;
 1106         }
 1107 
 1108         if (name) {
 1109             size = glfs_fgetxattr(glfd, name, NULL, 0);
 1110             if (size == -1) {
 1111                 op_ret = -1;
 1112                 op_errno = errno;
 1113                 gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1114                        SVS_MSG_GETXATTR_FAILED,
 1115                        "getxattr on %s failed "
 1116                        "(key: %s)",
 1117                        uuid_utoa(fd->inode->gfid), name);
 1118                 goto out;
 1119             }
 1120             value = GF_CALLOC(size + 1, sizeof(char), gf_common_mt_char);
 1121             if (!value) {
 1122                 op_ret = -1;
 1123                 op_errno = ENOMEM;
 1124                 gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
 1125                        "failed to "
 1126                        "allocate memory for getxattr on %s "
 1127                        "(key: %s)",
 1128                        uuid_utoa(fd->inode->gfid), name);
 1129                 goto out;
 1130             }
 1131 
 1132             size = glfs_fgetxattr(glfd, name, value, size);
 1133             if (size == -1) {
 1134                 op_ret = -1;
 1135                 op_errno = errno;
 1136                 gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1137                        SVS_MSG_GETXATTR_FAILED,
 1138                        "failed to get the xattr %s "
 1139                        "for inode %s",
 1140                        name, uuid_utoa(fd->inode->gfid));
 1141                 goto out;
 1142             }
 1143             value[size] = '\0';
 1144 
 1145             op_ret = dict_set_dynptr(dict, (char *)name, value, size);
 1146             if (op_ret < 0) {
 1147                 op_errno = -op_ret;
 1148                 gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1149                        SVS_MSG_DICT_SET_FAILED,
 1150                        "dict set operation for gfid %s "
 1151                        "for the key %s failed.",
 1152                        uuid_utoa(fd->inode->gfid), name);
 1153                 goto out;
 1154             }
 1155         } else {
 1156             size = glfs_flistxattr(glfd, NULL, 0);
 1157             if (size == -1) {
 1158                 op_errno = errno;
 1159                 gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1160                        SVS_MSG_LISTXATTR_FAILED, "listxattr on %s failed",
 1161                        uuid_utoa(fd->inode->gfid));
 1162                 goto out;
 1163             }
 1164 
 1165             value = GF_CALLOC(size + 1, sizeof(char), gf_common_mt_char);
 1166             if (!value) {
 1167                 op_ret = -1;
 1168                 op_errno = ENOMEM;
 1169                 gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
 1170                        "failed to "
 1171                        "allocate buffer for xattr "
 1172                        "list (%s)",
 1173                        uuid_utoa(fd->inode->gfid));
 1174                 goto out;
 1175             }
 1176 
 1177             size = glfs_flistxattr(glfd, value, size);
 1178             if (size == -1) {
 1179                 op_ret = -1;
 1180                 op_errno = errno;
 1181                 gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1182                        SVS_MSG_LISTXATTR_FAILED, "listxattr on %s failed",
 1183                        uuid_utoa(fd->inode->gfid));
 1184                 goto out;
 1185             }
 1186 
 1187             op_ret = svs_add_xattrs_to_dict(this, dict, value, size);
 1188             if (op_ret == -1) {
 1189                 op_errno = ENOMEM;
 1190                 gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
 1191                        "failed to add xattrs from the list "
 1192                        "to dict (gfid: %s)",
 1193                        uuid_utoa(fd->inode->gfid));
 1194                 goto out;
 1195             }
 1196             GF_FREE(value);
 1197         }
 1198 
 1199         op_ret = 0;
 1200         op_errno = 0;
 1201     }
 1202 
 1203 out:
 1204     if (op_ret)
 1205         GF_FREE(value);
 1206 
 1207     STACK_UNWIND_STRICT(fgetxattr, frame, op_ret, op_errno, dict, NULL);
 1208 
 1209     if (dict)
 1210         dict_unref(dict);
 1211 
 1212     return 0;
 1213 }
 1214 
 1215 int32_t
 1216 svs_releasedir(xlator_t *this, fd_t *fd)
 1217 {
 1218     svs_fd_t *sfd = NULL;
 1219     uint64_t tmp_pfd = 0;
 1220     int ret = 0;
 1221     svs_inode_t *svs_inode = NULL;
 1222     glfs_t *fs = NULL;
 1223     inode_t *inode = NULL;
 1224 
 1225     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
 1226     GF_VALIDATE_OR_GOTO(this->name, fd, out);
 1227 
 1228     ret = fd_ctx_del(fd, this, &tmp_pfd);
 1229     if (ret < 0) {
 1230         gf_msg_debug(this->name, 0, "pfd from fd=%p is NULL", fd);
 1231         goto out;
 1232     }
 1233 
 1234     inode = fd->inode;
 1235 
 1236     svs_inode = svs_inode_ctx_get(this, inode);
 1237     if (svs_inode) {
 1238         fs = svs_inode->fs; /* should inode->lock be held for this? */
 1239         SVS_CHECK_VALID_SNAPSHOT_HANDLE(fs, this);
 1240         if (fs) {
 1241             sfd = (svs_fd_t *)(long)tmp_pfd;
 1242             if (sfd->fd) {
 1243                 ret = glfs_closedir(sfd->fd);
 1244                 if (ret)
 1245                     gf_msg(this->name, GF_LOG_WARNING, errno,
 1246                            SVS_MSG_RELEASEDIR_FAILED,
 1247                            "failed to close the glfd for "
 1248                            "directory %s",
 1249                            uuid_utoa(fd->inode->gfid));
 1250             }
 1251         }
 1252     }
 1253 
 1254     GF_FREE(sfd);
 1255 
 1256 out:
 1257     return 0;
 1258 }
 1259 
 1260 int32_t
 1261 svs_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
 1262 {
 1263     int32_t op_ret = -1;
 1264     int32_t op_errno = 0;
 1265     int ret = -1;
 1266     uint64_t value = 0;
 1267     svs_inode_t *inode_ctx = NULL;
 1268     call_stack_t *root = NULL;
 1269 
 1270     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
 1271     GF_VALIDATE_OR_GOTO(this->name, frame, out);
 1272     GF_VALIDATE_OR_GOTO(this->name, fd, out);
 1273 
 1274     root = frame->root;
 1275     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 1276                                root->groups);
 1277     if (op_ret != 0) {
 1278         goto out;
 1279     }
 1280 
 1281     inode_ctx = svs_inode_ctx_get(this, fd->inode);
 1282     if (!inode_ctx) {
 1283         op_ret = -1;
 1284         op_errno = EINVAL;
 1285         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1286                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 1287                "inode context not found for"
 1288                " the inode %s",
 1289                uuid_utoa(fd->inode->gfid));
 1290         goto out;
 1291     }
 1292 
 1293     ret = fd_ctx_get(fd, this, &value);
 1294     if (ret < 0 && inode_ctx->type != SNAP_VIEW_ENTRY_POINT_INODE) {
 1295         op_errno = EINVAL;
 1296         gf_msg(this->name, GF_LOG_WARNING, op_errno,
 1297                SVS_MSG_GET_FD_CONTEXT_FAILED, "pfd is NULL on fd=%p", fd);
 1298         goto out;
 1299     }
 1300 
 1301     op_ret = 0;
 1302 
 1303 out:
 1304     STACK_UNWIND_STRICT(flush, frame, op_ret, op_errno, NULL);
 1305 
 1306     return 0;
 1307 }
 1308 
 1309 int32_t
 1310 svs_release(xlator_t *this, fd_t *fd)
 1311 {
 1312     svs_fd_t *sfd = NULL;
 1313     uint64_t tmp_pfd = 0;
 1314     int ret = 0;
 1315     inode_t *inode = NULL;
 1316     svs_inode_t *svs_inode = NULL;
 1317     glfs_t *fs = NULL;
 1318 
 1319     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
 1320     GF_VALIDATE_OR_GOTO(this->name, fd, out);
 1321 
 1322     ret = fd_ctx_del(fd, this, &tmp_pfd);
 1323     if (ret < 0) {
 1324         gf_msg_debug(this->name, 0, "pfd from fd=%p is NULL", fd);
 1325         goto out;
 1326     }
 1327 
 1328     inode = fd->inode;
 1329 
 1330     svs_inode = svs_inode_ctx_get(this, inode);
 1331     if (svs_inode) {
 1332         fs = svs_inode->fs; /* should inode->lock be held for this? */
 1333         SVS_CHECK_VALID_SNAPSHOT_HANDLE(fs, this);
 1334         if (fs) {
 1335             sfd = (svs_fd_t *)(long)tmp_pfd;
 1336             if (sfd->fd) {
 1337                 ret = glfs_close(sfd->fd);
 1338                 if (ret)
 1339                     gf_msg(this->name, GF_LOG_ERROR, errno,
 1340                            SVS_MSG_RELEASE_FAILED,
 1341                            "failed to close "
 1342                            "the glfd for %s",
 1343                            uuid_utoa(fd->inode->gfid));
 1344             }
 1345         }
 1346     }
 1347 
 1348     GF_FREE(sfd);
 1349 out:
 1350     return 0;
 1351 }
 1352 
 1353 int32_t
 1354 svs_forget(xlator_t *this, inode_t *inode)
 1355 {
 1356     int ret = -1;
 1357     uint64_t value = 0;
 1358     svs_inode_t *inode_ctx = NULL;
 1359 
 1360     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
 1361     GF_VALIDATE_OR_GOTO(this->name, inode, out);
 1362 
 1363     ret = inode_ctx_del(inode, this, &value);
 1364     if (ret) {
 1365         gf_msg(this->name, GF_LOG_ERROR, 0, SVS_MSG_DELETE_INODE_CONTEXT_FAILED,
 1366                "failed to delete the inode "
 1367                "context of %s",
 1368                uuid_utoa(inode->gfid));
 1369         goto out;
 1370     }
 1371 
 1372     inode_ctx = (svs_inode_t *)(uintptr_t)value;
 1373     if (!inode_ctx)
 1374         goto out;
 1375 
 1376     if (inode_ctx->snapname)
 1377         GF_FREE(inode_ctx->snapname);
 1378 
 1379     /*
 1380      * glfs_h_close leads to unref and forgetting of the
 1381      * underlying inode in the gfapi world. i.e. the inode
 1382      * which inode_ctx->object points to.
 1383      * As of now the only possibility is, this forget came as a
 1384      * result of snapdaemon's inode table reaching the lru
 1385      * limit and receiving forget as a result of purging of
 1386      * extra inodes that exceeded the limit. But, care must
 1387      * be taken to ensure that, the gfapi instance to which
 1388      * the glfs_h_object belongs to is not deleted. Otherwise
 1389      * this might result in access of a freed pointer.
 1390      * This will still be helpful in reducing the memory
 1391      * footprint of snapdaemon when the fs instance itself is
 1392      * valid (i.e. present and not destroyed due to either snap
 1393      * deactivate or snap delete), but the lru limit is reached.
 1394      * The forget due to lru limit will make the underlying inode
 1395      * being unrefed and forgotten.
 1396      */
 1397     if (svs_inode_ctx_glfs_mapping(this, inode_ctx)) {
 1398         glfs_h_close(inode_ctx->object);
 1399         inode_ctx->object = NULL;
 1400     }
 1401     GF_FREE(inode_ctx);
 1402 
 1403 out:
 1404     return 0;
 1405 }
 1406 
 1407 int
 1408 svs_fill_readdir(xlator_t *this, gf_dirent_t *entries, size_t size, off_t off)
 1409 {
 1410     gf_dirent_t *entry = NULL;
 1411     svs_private_t *priv = NULL;
 1412     int i = 0;
 1413     snap_dirent_t *dirents = NULL;
 1414     int this_size = 0;
 1415     int filled_size = 0;
 1416     int count = 0;
 1417 
 1418     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 1419     GF_VALIDATE_OR_GOTO("snap-view-daemon", entries, out);
 1420 
 1421     priv = this->private;
 1422     GF_ASSERT(priv);
 1423 
 1424     /* create the dir entries */
 1425     LOCK(&priv->snaplist_lock);
 1426     {
 1427         dirents = priv->dirents;
 1428 
 1429         for (i = off; i < priv->num_snaps;) {
 1430             this_size = sizeof(gf_dirent_t) + strlen(dirents[i].name) + 1;
 1431             if (this_size + filled_size > size)
 1432                 goto unlock;
 1433 
 1434             entry = gf_dirent_for_name(dirents[i].name);
 1435             if (!entry) {
 1436                 gf_msg(this->name, GF_LOG_ERROR, ENOMEM, SVS_MSG_NO_MEMORY,
 1437                        "failed to allocate dentry for %s", dirents[i].name);
 1438                 goto unlock;
 1439             }
 1440 
 1441             entry->d_off = i + 1;
 1442             /*
 1443              * readdir on the entry-point directory to the snapshot
 1444              * world, will return elements in the list of the
 1445              * snapshots as the directory entries. Since the entries
 1446              * returned are virtual entries which does not exist
 1447              * physically on the disk, pseudo inode numbers are
 1448              * generated.
 1449              */
 1450             entry->d_ino = i + 2 * 42;
 1451             entry->d_type = DT_DIR;
 1452             list_add_tail(&entry->list, &entries->list);
 1453             ++i;
 1454             count++;
 1455             filled_size += this_size;
 1456         }
 1457     }
 1458 unlock:
 1459     UNLOCK(&priv->snaplist_lock);
 1460 
 1461 out:
 1462     return count;
 1463 }
 1464 
 1465 int32_t
 1466 svs_glfs_readdir(xlator_t *this, glfs_fd_t *glfd, gf_dirent_t *entries,
 1467                  int32_t *op_errno, struct iatt *buf, gf_boolean_t readdirplus,
 1468                  size_t size)
 1469 {
 1470     int filled_size = 0;
 1471     int this_size = 0;
 1472     int32_t ret = -1;
 1473     int32_t count = 0;
 1474     gf_dirent_t *entry = NULL;
 1475     struct dirent *dirents = NULL;
 1476     struct dirent de = {
 1477         0,
 1478     };
 1479     struct stat statbuf = {
 1480         0,
 1481     };
 1482     off_t in_case = -1;
 1483 
 1484     GF_VALIDATE_OR_GOTO("svs", this, out);
 1485     GF_VALIDATE_OR_GOTO(this->name, glfd, out);
 1486     GF_VALIDATE_OR_GOTO(this->name, entries, out);
 1487 
 1488     while (filled_size < size) {
 1489         in_case = glfs_telldir(glfd);
 1490         if (in_case == -1) {
 1491             gf_msg(this->name, GF_LOG_ERROR, errno, SVS_MSG_TELLDIR_FAILED,
 1492                    "telldir failed");
 1493             break;
 1494         }
 1495 
 1496         if (readdirplus)
 1497             ret = glfs_readdirplus_r(glfd, &statbuf, &de, &dirents);
 1498         else
 1499             ret = glfs_readdir_r(glfd, &de, &dirents);
 1500 
 1501         if (ret == 0 && dirents != NULL) {
 1502             if (readdirplus)
 1503                 this_size = max(sizeof(gf_dirent_t), sizeof(gfs3_dirplist)) +
 1504                             strlen(de.d_name) + 1;
 1505             else
 1506                 this_size = sizeof(gf_dirent_t) + strlen(de.d_name) + 1;
 1507 
 1508             if (this_size + filled_size > size) {
 1509                 glfs_seekdir(glfd, in_case);
 1510                 break;
 1511             }
 1512 
 1513             entry = gf_dirent_for_name(de.d_name);
 1514             if (!entry) {
 1515                 /*
 1516                  * Since gf_dirent_for_name can return
 1517                  * NULL only when it fails to allocate
 1518                  * memory for the directory entry,
 1519                  * SVS_MSG_NO_MEMORY is used as the
 1520                  * message-id.
 1521                  */
 1522                 gf_msg(this->name, GF_LOG_ERROR, errno, SVS_MSG_NO_MEMORY,
 1523                        "could not create gf_dirent "
 1524                        "for entry %s: (%s)",
 1525                        entry->d_name, strerror(errno));
 1526                 break;
 1527             }
 1528             entry->d_off = glfs_telldir(glfd);
 1529             entry->d_ino = de.d_ino;
 1530             entry->d_type = de.d_type;
 1531             if (readdirplus) {
 1532                 iatt_from_stat(buf, &statbuf);
 1533                 entry->d_stat = *buf;
 1534             }
 1535             list_add_tail(&entry->list, &entries->list);
 1536 
 1537             filled_size += this_size;
 1538             count++;
 1539         } else if (ret == 0 && dirents == NULL) {
 1540             *op_errno = ENOENT;
 1541             break;
 1542         } else if (ret != 0) {
 1543             *op_errno = errno;
 1544             break;
 1545         }
 1546         dirents = NULL;
 1547     }
 1548 
 1549 out:
 1550     return count;
 1551 }
 1552 
 1553 /* readdirp can be of 2 types.
 1554    1) It can come on entry point directory where the list of snapshots
 1555       is sent as dirents. In this case, the iatt structure is filled
 1556       on the fly if the inode is not found for the entry or the inode
 1557       context is NULL. Other wise if inode is found and inode context
 1558       is there the iatt structure saved in the context is used.
 1559    2) It can be on a directory in one of the snapshots. In this case,
 1560       the readdirp call would have sent us a iatt structure. So the same
 1561       structure is used with the exception that the gfid and the inode
 1562       numbers will be newly generated and filled in.
 1563 */
 1564 void
 1565 svs_readdirp_fill(xlator_t *this, inode_t *parent, svs_inode_t *parent_ctx,
 1566                   gf_dirent_t *entry)
 1567 {
 1568     inode_t *inode = NULL;
 1569     uuid_t random_gfid = {
 1570         0,
 1571     };
 1572     struct iatt buf = {
 1573         0,
 1574     };
 1575     svs_inode_t *inode_ctx = NULL;
 1576 
 1577     GF_VALIDATE_OR_GOTO("snapview-server", this, out);
 1578     GF_VALIDATE_OR_GOTO(this->name, parent, out);
 1579     GF_VALIDATE_OR_GOTO(this->name, parent_ctx, out);
 1580     GF_VALIDATE_OR_GOTO(this->name, entry, out);
 1581 
 1582     if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, ".."))
 1583         goto out;
 1584 
 1585     inode = inode_grep(parent->table, parent, entry->d_name);
 1586     if (inode) {
 1587         entry->inode = inode;
 1588         inode_ctx = svs_inode_ctx_get(this, inode);
 1589         if (!inode_ctx) {
 1590             gf_uuid_copy(buf.ia_gfid, inode->gfid);
 1591             svs_iatt_fill(inode->gfid, &buf);
 1592             buf.ia_type = inode->ia_type;
 1593         } else {
 1594             buf = inode_ctx->buf;
 1595         }
 1596 
 1597         entry->d_ino = buf.ia_ino;
 1598 
 1599         if (parent_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE)
 1600             entry->d_stat = buf;
 1601         else {
 1602             entry->d_stat.ia_ino = buf.ia_ino;
 1603             gf_uuid_copy(entry->d_stat.ia_gfid, buf.ia_gfid);
 1604         }
 1605     } else {
 1606         if (parent_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
 1607             inode = inode_new(parent->table);
 1608             entry->inode = inode;
 1609 
 1610             /* If inode context allocation fails, then do not send
 1611              * the inode for that particular entry as part of
 1612              * readdirp response. Fuse and protocol/server will link
 1613              * the inodes in readdirp only if the entry contains
 1614              * inode in it.
 1615              */
 1616             inode_ctx = svs_inode_ctx_get_or_new(this, inode);
 1617             if (!inode_ctx) {
 1618                 gf_msg(this->name, GF_LOG_ERROR, ENOMEM, SVS_MSG_NO_MEMORY,
 1619                        "failed to allocate inode "
 1620                        "context for %s",
 1621                        entry->d_name);
 1622                 inode_unref(entry->inode);
 1623                 entry->inode = NULL;
 1624                 goto out;
 1625             }
 1626 
 1627             /* Generate virtual gfid for SNAPSHOT dir and
 1628              * update the statbuf
 1629              */
 1630             gf_uuid_generate(random_gfid);
 1631             gf_uuid_copy(buf.ia_gfid, random_gfid);
 1632             svs_fill_ino_from_gfid(&buf);
 1633             buf.ia_type = IA_IFDIR;
 1634             entry->d_ino = buf.ia_ino;
 1635             entry->d_stat = buf;
 1636             inode_ctx->buf = buf;
 1637             inode_ctx->type = SNAP_VIEW_SNAPSHOT_INODE;
 1638         } else {
 1639             /* For files under snapshot world do not set
 1640              * entry->inode and reset statbuf (except ia_ino),
 1641              * so that FUSE/Kernel will send an explicit lookup.
 1642              * entry->d_stat contains the statbuf information
 1643              * of original file, so for NFS not to cache this
 1644              * information and to send explicit lookup, it is
 1645              * required to reset the statbuf.
 1646              * Virtual gfid for these files will be generated in the
 1647              * first lookup.
 1648              */
 1649             buf.ia_ino = entry->d_ino;
 1650             entry->d_stat = buf;
 1651         }
 1652     }
 1653 
 1654 out:
 1655     return;
 1656 }
 1657 
 1658 /* In readdirp, though new inode is created along with the generation of
 1659    new gfid, the inode context created will not contain the glfs_t instance
 1660    for the filesystem it belongs to and the handle for it in the gfapi
 1661    world. (handle is obtained only by doing the lookup call on the entry
 1662    and doing lookup on each entry received as part of readdir call is a
 1663    costly operation. So the fs and handle is NULL in the inode context
 1664    and is filled in when lookup comes on that object.
 1665 */
 1666 int32_t
 1667 svs_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
 1668              off_t off, dict_t *dict)
 1669 {
 1670     gf_dirent_t entries;
 1671     gf_dirent_t *entry = NULL;
 1672     struct iatt buf = {
 1673         0,
 1674     };
 1675     int count = 0;
 1676     int op_ret = -1;
 1677     int op_errno = EINVAL;
 1678     svs_inode_t *parent_ctx = NULL;
 1679     svs_fd_t *svs_fd = NULL;
 1680     call_stack_t *root = NULL;
 1681 
 1682     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, unwind);
 1683     GF_VALIDATE_OR_GOTO(this->name, frame, unwind);
 1684     GF_VALIDATE_OR_GOTO(this->name, fd, unwind);
 1685     GF_VALIDATE_OR_GOTO(this->name, fd->inode, unwind);
 1686 
 1687     INIT_LIST_HEAD(&entries.list);
 1688 
 1689     root = frame->root;
 1690     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 1691                                root->groups);
 1692     if (op_ret != 0) {
 1693         goto unwind;
 1694     }
 1695 
 1696     parent_ctx = svs_inode_ctx_get(this, fd->inode);
 1697     if (!parent_ctx) {
 1698         op_ret = -1;
 1699         op_errno = EINVAL;
 1700         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1701                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 1702                "failed to get the inode "
 1703                "context for %s",
 1704                uuid_utoa(fd->inode->gfid));
 1705         goto unwind;
 1706     }
 1707 
 1708     if (parent_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
 1709         LOCK(&fd->lock);
 1710         {
 1711             count = svs_fill_readdir(this, &entries, size, off);
 1712         }
 1713         UNLOCK(&fd->lock);
 1714 
 1715         op_ret = count;
 1716 
 1717         list_for_each_entry(entry, &entries.list, list)
 1718         {
 1719             svs_readdirp_fill(this, fd->inode, parent_ctx, entry);
 1720         }
 1721 
 1722         goto unwind;
 1723     } else {
 1724         svs_fd = svs_fd_ctx_get_or_new(this, fd);
 1725         if (!svs_fd) {
 1726             op_ret = -1;
 1727             op_errno = EBADFD;
 1728             gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1729                    SVS_MSG_GET_FD_CONTEXT_FAILED,
 1730                    "failed to get the fd context "
 1731                    "for the inode %s",
 1732                    uuid_utoa(fd->inode->gfid));
 1733             goto unwind;
 1734         }
 1735 
 1736         glfs_seekdir(svs_fd->fd, off);
 1737 
 1738         LOCK(&fd->lock);
 1739         {
 1740             count = svs_glfs_readdir(this, svs_fd->fd, &entries, &op_errno,
 1741                                      &buf, _gf_true, size);
 1742         }
 1743         UNLOCK(&fd->lock);
 1744 
 1745         op_ret = count;
 1746 
 1747         list_for_each_entry(entry, &entries.list, list)
 1748         {
 1749             svs_readdirp_fill(this, fd->inode, parent_ctx, entry);
 1750         }
 1751 
 1752         goto unwind;
 1753     }
 1754 
 1755 unwind:
 1756     STACK_UNWIND_STRICT(readdirp, frame, op_ret, op_errno, &entries, dict);
 1757 
 1758     gf_dirent_free(&entries);
 1759 
 1760     return 0;
 1761 }
 1762 
 1763 int32_t
 1764 svs_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
 1765             off_t off, dict_t *xdata)
 1766 {
 1767     gf_dirent_t entries = {
 1768         {
 1769             {
 1770                 0,
 1771             },
 1772         },
 1773     };
 1774     int count = 0;
 1775     svs_inode_t *inode_ctx = NULL;
 1776     int op_errno = EINVAL;
 1777     int op_ret = -1;
 1778     svs_fd_t *svs_fd = NULL;
 1779     glfs_fd_t *glfd = NULL;
 1780 
 1781     INIT_LIST_HEAD(&entries.list);
 1782 
 1783     GF_VALIDATE_OR_GOTO("snap-view-server", this, unwind);
 1784     GF_VALIDATE_OR_GOTO(this->name, frame, unwind);
 1785     GF_VALIDATE_OR_GOTO(this->name, fd, unwind);
 1786     GF_VALIDATE_OR_GOTO(this->name, fd->inode, unwind);
 1787 
 1788     inode_ctx = svs_inode_ctx_get(this, fd->inode);
 1789     if (!inode_ctx) {
 1790         op_ret = -1;
 1791         op_errno = EINVAL;
 1792         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1793                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 1794                "inode context not found in "
 1795                "the inode %s",
 1796                uuid_utoa(fd->inode->gfid));
 1797         goto unwind;
 1798     }
 1799 
 1800     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
 1801         LOCK(&fd->lock);
 1802         {
 1803             count = svs_fill_readdir(this, &entries, size, off);
 1804         }
 1805         UNLOCK(&fd->lock);
 1806     } else {
 1807         svs_fd = svs_fd_ctx_get_or_new(this, fd);
 1808         if (!svs_fd) {
 1809             op_ret = -1;
 1810             op_errno = EBADFD;
 1811             gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1812                    SVS_MSG_GET_FD_CONTEXT_FAILED,
 1813                    "failed to get the fd "
 1814                    "context for %s",
 1815                    uuid_utoa(fd->inode->gfid));
 1816             goto unwind;
 1817         }
 1818 
 1819         glfd = svs_fd->fd;
 1820 
 1821         LOCK(&fd->lock);
 1822         {
 1823             count = svs_glfs_readdir(this, glfd, &entries, &op_errno, NULL,
 1824                                      _gf_false, size);
 1825         }
 1826         UNLOCK(&fd->lock);
 1827     }
 1828 
 1829     op_ret = count;
 1830 
 1831 unwind:
 1832     STACK_UNWIND_STRICT(readdir, frame, op_ret, op_errno, &entries, xdata);
 1833 
 1834     gf_dirent_free(&entries);
 1835 
 1836     return 0;
 1837 }
 1838 
 1839 /*
 1840  * This function is mainly helpful for NFS. Till now NFS server was not linking
 1841  * the inodes in readdirp, which caused problems when below operations were
 1842  * performed.
 1843  *
 1844  * 1) ls -l in one of the snaopshots (snapview-server would generate gfids for
 1845  *    each entry on the fly and link the inodes associated with those entries)
 1846  * 2) NFS server upon getting readdirp reply would not link the inodes of the
 1847  *    entries. But it used to generate filehandles for each entry and associate
 1848  *    the gfid of that entry with the filehandle and send it as part of the
 1849  *    reply to nfs client.
 1850  * 3) NFS client would send the filehandle of one of those entries when some
 1851  *    activity is done on it.
 1852  * 4) NFS server would not be able to find the inode for the gfid present in the
 1853  *    filehandle (as the inode was not linked) and would go for hard resolution
 1854  *    by sending a lookup on the gfid by creating a new inode.
 1855  * 5) snapview-client will not able to identify whether the inode is a real
 1856  *    inode existing in the main volume or a virtual inode existing in the
 1857  *    snapshots as there would not be any inode context.
 1858  * 6) Since the gfid upon which lookup is sent is a virtual gfid which is not
 1859  *    present in the disk, lookup would fail and the application would get an
 1860  *    error.
 1861  *
 1862  * The above problem is fixed by the below commit which makes snapview server
 1863  * more compatible with nfs server (1dea949cb60c3814c9206df6ba8dddec8d471a94).
 1864  * But now because NFS server does inode linking in readdirp has introduced
 1865  * the below issue.
 1866  * In readdirp though snapview-server allocates inode contexts it does not
 1867  * actually perform lookup on each entry it obtained in readdirp (as doing
 1868  * a lookup via gfapi over the network for each entry would be costly).
 1869  *
 1870  * Till now it was not a problem with NFS server, as NFS was sending a lookup on
 1871  * the gfid it got from NFS client, for which it was not able to find the right
 1872  * inode. So snapview-server was able to get the fs instance (glfs_t) of the
 1873  * snapshot volume to which the entry belongs to, and the handle for the entry
 1874  * from the corresponding snapshot volume and fill those information in the
 1875  * inode context.
 1876  *
 1877  * But now, since NFS server is able to find the inode from the inode table for
 1878  * the gfid it got from the NFS client, it won't send lookup. Rather it directly
 1879  * sends the fop it received from the client. Now this causes problems for
 1880  * snapview-server. Because for each fop snapview-server assumes that lookup has
 1881  * been performed on that entry and the entry's inode context contains the
 1882  * pointers for the fs instance and the handle to the entry in that fs. When NFS
 1883  * server sends the fop and snapview-server finds that the fs instance and the
 1884  * handle within the inode context are NULL it unwinds with EINVAL.
 1885  *
 1886  * So to handle this, if fs instance or handle within the inode context are
 1887  * NULL, then do a lookup based on parent inode context's fs instance. And
 1888  * unwind the results obtained as part of lookup
 1889  */
 1890 
 1891 int32_t
 1892 svs_get_handle(xlator_t *this, loc_t *loc, svs_inode_t *inode_ctx,
 1893                int32_t *op_errno)
 1894 {
 1895     svs_inode_t *parent_ctx = NULL;
 1896     int ret = -1;
 1897     inode_t *parent = NULL;
 1898     struct iatt postparent = {
 1899         0,
 1900     };
 1901     struct iatt buf = {
 1902         0,
 1903     };
 1904     char uuid1[64];
 1905 
 1906     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 1907     GF_VALIDATE_OR_GOTO(this->name, loc, out);
 1908     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
 1909 
 1910     if (loc->path) {
 1911         if (!loc->name || (loc->name && !strcmp(loc->name, ""))) {
 1912             loc->name = strrchr(loc->path, '/');
 1913             if (loc->name)
 1914                 loc->name++;
 1915         }
 1916     }
 1917 
 1918     if (loc->parent)
 1919         parent = inode_ref(loc->parent);
 1920     else {
 1921         parent = inode_find(loc->inode->table, loc->pargfid);
 1922         if (!parent)
 1923             parent = inode_parent(loc->inode, NULL, NULL);
 1924     }
 1925 
 1926     if (parent)
 1927         parent_ctx = svs_inode_ctx_get(this, parent);
 1928 
 1929     if (!parent_ctx) {
 1930         *op_errno = EINVAL;
 1931         gf_msg(this->name, GF_LOG_WARNING, *op_errno,
 1932                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 1933                "failed to get the parent "
 1934                "context for %s (%s)",
 1935                loc->path, uuid_utoa_r(loc->inode->gfid, uuid1));
 1936         goto out;
 1937     }
 1938 
 1939     if (parent_ctx) {
 1940         if (parent_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE)
 1941             ret = svs_lookup_snapshot(this, loc, &buf, &postparent, parent,
 1942                                       parent_ctx, op_errno);
 1943         else
 1944             ret = svs_lookup_entry(this, loc, &buf, &postparent, parent,
 1945                                    parent_ctx, op_errno);
 1946     }
 1947 
 1948 out:
 1949     if (parent)
 1950         inode_unref(parent);
 1951 
 1952     return ret;
 1953 }
 1954 
 1955 int32_t
 1956 svs_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
 1957 {
 1958     struct iatt buf = {
 1959         0,
 1960     };
 1961     int32_t op_errno = EINVAL;
 1962     int32_t op_ret = -1;
 1963     svs_inode_t *inode_ctx = NULL;
 1964     glfs_t *fs = NULL;
 1965     glfs_object_t *object = NULL;
 1966     struct stat stat = {
 1967         0,
 1968     };
 1969     int ret = -1;
 1970     call_stack_t *root = NULL;
 1971 
 1972     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 1973     GF_VALIDATE_OR_GOTO(this->name, frame, out);
 1974     GF_VALIDATE_OR_GOTO(this->name, loc, out);
 1975     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
 1976 
 1977     root = frame->root;
 1978     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 1979                                root->groups);
 1980     if (op_ret != 0) {
 1981         goto out;
 1982     }
 1983 
 1984     /* Instead of doing the check of whether it is a entry point directory
 1985        or not by checking the name of the entry and then deciding what
 1986        to do, just check the inode context and decide what to be done.
 1987     */
 1988 
 1989     inode_ctx = svs_inode_ctx_get(this, loc->inode);
 1990     if (!inode_ctx) {
 1991         op_ret = -1;
 1992         op_errno = EINVAL;
 1993         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 1994                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 1995                "inode context not found for %s", uuid_utoa(loc->inode->gfid));
 1996         goto out;
 1997     }
 1998 
 1999     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
 2000         svs_iatt_fill(loc->inode->gfid, &buf);
 2001         op_ret = 0;
 2002     } else {
 2003         SVS_GET_INODE_CTX_INFO(inode_ctx, fs, object, this, loc, op_ret,
 2004                                op_errno, out);
 2005 
 2006         ret = glfs_h_stat(fs, object, &stat);
 2007         if (ret) {
 2008             op_ret = -1;
 2009             op_errno = errno;
 2010             gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_STAT_FAILED,
 2011                    "glfs_h_stat on %s (gfid: %s) "
 2012                    "failed",
 2013                    loc->name, uuid_utoa(loc->inode->gfid));
 2014             goto out;
 2015         } else
 2016             gf_msg_debug(this->name, 0, "stat on %s (%s) successful", loc->path,
 2017                          uuid_utoa(loc->inode->gfid));
 2018 
 2019         iatt_from_stat(&buf, &stat);
 2020         gf_uuid_copy(buf.ia_gfid, loc->inode->gfid);
 2021         svs_fill_ino_from_gfid(&buf);
 2022         op_ret = ret;
 2023     }
 2024 
 2025 out:
 2026     STACK_UNWIND_STRICT(stat, frame, op_ret, op_errno, &buf, xdata);
 2027     return 0;
 2028 }
 2029 
 2030 int32_t
 2031 svs_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
 2032 {
 2033     struct iatt buf = {
 2034         0,
 2035     };
 2036     int32_t op_errno = EINVAL;
 2037     int32_t op_ret = -1;
 2038     svs_inode_t *inode_ctx = NULL;
 2039     struct stat stat = {
 2040         0,
 2041     };
 2042     int ret = -1;
 2043     glfs_fd_t *glfd = NULL;
 2044     svs_fd_t *sfd = NULL;
 2045     call_stack_t *root = NULL;
 2046 
 2047     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 2048     GF_VALIDATE_OR_GOTO(this->name, frame, out);
 2049     GF_VALIDATE_OR_GOTO(this->name, fd, out);
 2050     GF_VALIDATE_OR_GOTO(this->name, fd->inode, out);
 2051 
 2052     /* Instead of doing the check of whether it is a entry point directory
 2053        or not by checking the name of the entry and then deciding what
 2054        to do, just check the inode context and decide what to be done.
 2055     */
 2056 
 2057     root = frame->root;
 2058     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 2059                                root->groups);
 2060     if (op_ret != 0) {
 2061         goto out;
 2062     }
 2063 
 2064     inode_ctx = svs_inode_ctx_get(this, fd->inode);
 2065     if (!inode_ctx) {
 2066         op_ret = -1;
 2067         op_errno = EINVAL;
 2068         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 2069                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 2070                "inode context not found for"
 2071                " the inode %s",
 2072                uuid_utoa(fd->inode->gfid));
 2073         goto out;
 2074     }
 2075 
 2076     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
 2077         svs_iatt_fill(fd->inode->gfid, &buf);
 2078         op_ret = 0;
 2079     } else {
 2080         if (!(svs_inode_ctx_glfs_mapping(this, inode_ctx))) {
 2081             op_ret = -1;
 2082             op_errno = EBADF;
 2083             gf_msg(this->name, GF_LOG_ERROR, op_errno,
 2084                    SVS_MSG_FS_INSTANCE_INVALID,
 2085                    "glfs instance %p to which the inode %s "
 2086                    "belongs to does not exist. That snapshot "
 2087                    "corresponding to the fs instance "
 2088                    "might have been deleted or deactivated.",
 2089                    inode_ctx->fs, uuid_utoa(fd->inode->gfid));
 2090             goto out;
 2091         }
 2092 
 2093         sfd = svs_fd_ctx_get_or_new(this, fd);
 2094         if (!sfd) {
 2095             op_ret = -1;
 2096             op_errno = EBADFD;
 2097             gf_msg(this->name, GF_LOG_ERROR, op_errno,
 2098                    SVS_MSG_GET_FD_CONTEXT_FAILED,
 2099                    "failed to get the fd context "
 2100                    "for %s",
 2101                    uuid_utoa(fd->inode->gfid));
 2102             goto out;
 2103         }
 2104 
 2105         glfd = sfd->fd;
 2106         ret = glfs_fstat(glfd, &stat);
 2107         if (ret) {
 2108             op_ret = -1;
 2109             op_errno = errno;
 2110             gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_STAT_FAILED,
 2111                    "glfs_fstat on gfid: %s failed", uuid_utoa(fd->inode->gfid));
 2112             goto out;
 2113         }
 2114 
 2115         iatt_from_stat(&buf, &stat);
 2116         gf_uuid_copy(buf.ia_gfid, fd->inode->gfid);
 2117         svs_fill_ino_from_gfid(&buf);
 2118         op_ret = ret;
 2119     }
 2120 
 2121 out:
 2122     STACK_UNWIND_STRICT(fstat, frame, op_ret, op_errno, &buf, xdata);
 2123     return 0;
 2124 }
 2125 
 2126 int32_t
 2127 svs_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
 2128 {
 2129     struct statvfs buf = {
 2130         0,
 2131     };
 2132     int32_t op_errno = EINVAL;
 2133     int32_t op_ret = -1;
 2134     svs_inode_t *inode_ctx = NULL;
 2135     glfs_t *fs = NULL;
 2136     glfs_object_t *object = NULL;
 2137     int ret = -1;
 2138     call_stack_t *root = NULL;
 2139 
 2140     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 2141     GF_VALIDATE_OR_GOTO(this->name, frame, out);
 2142     GF_VALIDATE_OR_GOTO(this->name, loc, out);
 2143     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
 2144 
 2145     root = frame->root;
 2146     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 2147                                root->groups);
 2148     if (op_ret != 0) {
 2149         goto out;
 2150     }
 2151 
 2152     /* Instead of doing the check of whether it is a entry point directory
 2153        or not by checking the name of the entry and then deciding what
 2154        to do, just check the inode context and decide what to be done.
 2155     */
 2156     inode_ctx = svs_inode_ctx_get(this, loc->inode);
 2157     if (!inode_ctx) {
 2158         op_ret = -1;
 2159         op_errno = EINVAL;
 2160         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 2161                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 2162                "inode context not found for %s", uuid_utoa(loc->inode->gfid));
 2163         goto out;
 2164     }
 2165 
 2166     SVS_GET_INODE_CTX_INFO(inode_ctx, fs, object, this, loc, op_ret, op_errno,
 2167                            out);
 2168 
 2169     ret = glfs_h_statfs(fs, object, &buf);
 2170     if (ret) {
 2171         op_ret = -1;
 2172         op_errno = errno;
 2173         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_STATFS_FAILED,
 2174                "glfs_h_statvfs on %s (gfid: %s) "
 2175                "failed",
 2176                loc->name, uuid_utoa(loc->inode->gfid));
 2177         goto out;
 2178     }
 2179     op_ret = ret;
 2180 
 2181 out:
 2182     STACK_UNWIND_STRICT(statfs, frame, op_ret, op_errno, &buf, xdata);
 2183     return 0;
 2184 }
 2185 
 2186 int32_t
 2187 svs_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
 2188          fd_t *fd, dict_t *xdata)
 2189 {
 2190     svs_inode_t *inode_ctx = NULL;
 2191     svs_fd_t *sfd = NULL;
 2192     int32_t op_ret = -1;
 2193     int32_t op_errno = EINVAL;
 2194     glfs_fd_t *glfd = NULL;
 2195     glfs_t *fs = NULL;
 2196     glfs_object_t *object = NULL;
 2197     call_stack_t *root = NULL;
 2198 
 2199     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 2200     GF_VALIDATE_OR_GOTO(this->name, frame, out);
 2201     GF_VALIDATE_OR_GOTO(this->name, fd, out);
 2202     GF_VALIDATE_OR_GOTO(this->name, loc, out);
 2203     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
 2204 
 2205     root = frame->root;
 2206 
 2207     inode_ctx = svs_inode_ctx_get(this, loc->inode);
 2208     if (!inode_ctx) {
 2209         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 2210                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 2211                "inode context for %s (gfid: %s) "
 2212                "not found",
 2213                loc->name, uuid_utoa(loc->inode->gfid));
 2214         goto out;
 2215     }
 2216 
 2217     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE)
 2218         GF_ASSERT(0);  // on entry point it should always be opendir
 2219 
 2220     SVS_GET_INODE_CTX_INFO(inode_ctx, fs, object, this, loc, op_ret, op_errno,
 2221                            out);
 2222 
 2223     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 2224                                root->groups);
 2225     if (op_ret != 0) {
 2226         goto out;
 2227     }
 2228 
 2229     glfd = glfs_h_open(fs, object, flags);
 2230     if (!glfd) {
 2231         op_ret = -1;
 2232         op_errno = errno;
 2233         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_OPEN_FAILED,
 2234                "glfs_h_open on %s failed (gfid: %s)", loc->name,
 2235                uuid_utoa(loc->inode->gfid));
 2236         goto out;
 2237     }
 2238 
 2239     sfd = svs_fd_ctx_get_or_new(this, fd);
 2240     if (!sfd) {
 2241         op_ret = -1;
 2242         op_errno = ENOMEM;
 2243         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
 2244                "failed to allocate fd context "
 2245                "for %s (gfid: %s)",
 2246                loc->name, uuid_utoa(loc->inode->gfid));
 2247         glfs_close(glfd);
 2248         goto out;
 2249     }
 2250     sfd->fd = glfd;
 2251 
 2252     op_ret = 0;
 2253 
 2254 out:
 2255     STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, fd, NULL);
 2256     return 0;
 2257 }
 2258 
 2259 int32_t
 2260 svs_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
 2261           off_t offset, uint32_t flags, dict_t *xdata)
 2262 {
 2263     int32_t op_ret = -1;
 2264     int32_t op_errno = 0;
 2265     svs_private_t *priv = NULL;
 2266     struct iobuf *iobuf = NULL;
 2267     struct iobref *iobref = NULL;
 2268     struct iovec vec = {
 2269         0,
 2270     };
 2271     svs_fd_t *sfd = NULL;
 2272     int ret = -1;
 2273     struct glfs_stat fstatbuf = {
 2274         0,
 2275     };
 2276     glfs_fd_t *glfd = NULL;
 2277     struct iatt stbuf = {
 2278         0,
 2279     };
 2280     call_stack_t *root = NULL;
 2281 
 2282     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 2283     GF_VALIDATE_OR_GOTO(this->name, frame, out);
 2284     GF_VALIDATE_OR_GOTO(this->name, fd, out);
 2285     GF_VALIDATE_OR_GOTO(this->name, fd->inode, out);
 2286 
 2287     priv = this->private;
 2288     VALIDATE_OR_GOTO(priv, out);
 2289 
 2290     root = frame->root;
 2291     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 2292                                root->groups);
 2293     if (op_ret != 0) {
 2294         goto out;
 2295     }
 2296 
 2297     if (!svs_inode_glfs_mapping(this, fd->inode)) {
 2298         op_ret = -1;
 2299         op_errno = EBADF; /* should this be some other error? */
 2300         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_FS_INSTANCE_INVALID,
 2301                "glfs instance to which the inode "
 2302                "%s receiving read request belongs, "
 2303                "does not exist anymore",
 2304                uuid_utoa(fd->inode->gfid));
 2305         goto out;
 2306     }
 2307 
 2308     sfd = svs_fd_ctx_get_or_new(this, fd);
 2309     if (!sfd) {
 2310         op_ret = -1;
 2311         op_errno = EBADFD;
 2312         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 2313                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 2314                "failed to get the fd "
 2315                "context for %s",
 2316                uuid_utoa(fd->inode->gfid));
 2317         goto out;
 2318     }
 2319 
 2320     glfd = sfd->fd;
 2321 
 2322     iobuf = iobuf_get2(this->ctx->iobuf_pool, size);
 2323     if (!iobuf) {
 2324         op_ret = -1;
 2325         op_errno = ENOMEM;
 2326         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_NO_MEMORY,
 2327                "failed to "
 2328                "allocate iobuf while reading the "
 2329                "file with gfid %s",
 2330                uuid_utoa(fd->inode->gfid));
 2331         goto out;
 2332     }
 2333 
 2334     ret = glfs_pread(glfd, iobuf->ptr, size, offset, 0, &fstatbuf);
 2335     if (ret < 0) {
 2336         op_ret = -1;
 2337         op_errno = errno;
 2338         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_READ_FAILED,
 2339                "glfs_read failed on %s (%s)", uuid_utoa(fd->inode->gfid),
 2340                strerror(op_errno));
 2341         goto out;
 2342     }
 2343 
 2344     vec.iov_base = iobuf->ptr;
 2345     vec.iov_len = ret;
 2346 
 2347     iobref = iobref_new();
 2348 
 2349     iobref_add(iobref, iobuf);
 2350     glfs_iatt_from_statx(&stbuf, &fstatbuf);
 2351     gf_uuid_copy(stbuf.ia_gfid, fd->inode->gfid);
 2352     svs_fill_ino_from_gfid(&stbuf);
 2353 
 2354     /* Hack to notify higher layers of EOF. */
 2355     if (!stbuf.ia_size || (offset + vec.iov_len) >= stbuf.ia_size)
 2356         op_errno = ENOENT;
 2357 
 2358     op_ret = vec.iov_len;
 2359 
 2360 out:
 2361 
 2362     STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, &vec, 1, &stbuf, iobref,
 2363                         NULL);
 2364 
 2365     if (iobref)
 2366         iobref_unref(iobref);
 2367     if (iobuf)
 2368         iobuf_unref(iobuf);
 2369 
 2370     return 0;
 2371 }
 2372 
 2373 int32_t
 2374 svs_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size,
 2375              dict_t *xdata)
 2376 {
 2377     svs_inode_t *inode_ctx = NULL;
 2378     glfs_t *fs = NULL;
 2379     glfs_object_t *object = NULL;
 2380     int op_ret = -1;
 2381     int op_errno = EINVAL;
 2382     char *buf = NULL;
 2383     struct iatt stbuf = {
 2384         0,
 2385     };
 2386     int ret = -1;
 2387     struct stat stat = {
 2388         0,
 2389     };
 2390     call_stack_t *root = NULL;
 2391 
 2392     GF_VALIDATE_OR_GOTO("snap-view-daemon", this, out);
 2393     GF_VALIDATE_OR_GOTO(this->name, frame, out);
 2394     GF_VALIDATE_OR_GOTO(this->name, loc, out);
 2395     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
 2396 
 2397     root = frame->root;
 2398     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 2399                                root->groups);
 2400     if (op_ret != 0) {
 2401         goto out;
 2402     }
 2403 
 2404     inode_ctx = svs_inode_ctx_get(this, loc->inode);
 2405     if (!inode_ctx) {
 2406         op_ret = -1;
 2407         op_errno = EINVAL;
 2408         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 2409                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 2410                "failed to get inode context "
 2411                "for %s (gfid: %s)",
 2412                loc->name, uuid_utoa(loc->inode->gfid));
 2413         goto out;
 2414     }
 2415 
 2416     SVS_GET_INODE_CTX_INFO(inode_ctx, fs, object, this, loc, op_ret, op_errno,
 2417                            out);
 2418 
 2419     ret = glfs_h_stat(fs, object, &stat);
 2420     if (ret) {
 2421         op_ret = -1;
 2422         op_errno = errno;
 2423         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_STAT_FAILED,
 2424                "glfs_h_stat on %s (gfid: %s) "
 2425                "failed",
 2426                loc->name, uuid_utoa(loc->inode->gfid));
 2427         goto out;
 2428     }
 2429 
 2430     iatt_from_stat(&stbuf, &stat);
 2431     gf_uuid_copy(stbuf.ia_gfid, loc->inode->gfid);
 2432     svs_fill_ino_from_gfid(&stbuf);
 2433 
 2434     buf = alloca(size + 1);
 2435     op_ret = glfs_h_readlink(fs, object, buf, size);
 2436     if (op_ret == -1) {
 2437         op_errno = errno;
 2438         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_READLINK_FAILED,
 2439                "readlink on %s failed (gfid: %s)", loc->name,
 2440                uuid_utoa(loc->inode->gfid));
 2441         goto out;
 2442     }
 2443 
 2444     buf[op_ret] = 0;
 2445 
 2446 out:
 2447     STACK_UNWIND_STRICT(readlink, frame, op_ret, op_errno, buf, &stbuf, NULL);
 2448 
 2449     return 0;
 2450 }
 2451 
 2452 int32_t
 2453 svs_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int mask,
 2454            dict_t *xdata)
 2455 {
 2456     int ret = -1;
 2457     int32_t op_ret = -1;
 2458     int32_t op_errno = EINVAL;
 2459     glfs_t *fs = NULL;
 2460     glfs_object_t *object = NULL;
 2461     svs_inode_t *inode_ctx = NULL;
 2462     gf_boolean_t is_fuse_call = 0;
 2463     int mode = 0;
 2464     call_stack_t *root = NULL;
 2465 
 2466     GF_VALIDATE_OR_GOTO("svs", this, out);
 2467     GF_VALIDATE_OR_GOTO(this->name, this->private, out);
 2468     GF_VALIDATE_OR_GOTO(this->name, frame, out);
 2469     GF_VALIDATE_OR_GOTO(this->name, loc, out);
 2470     GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
 2471 
 2472     root = frame->root;
 2473     op_ret = gf_setcredentials(&root->uid, &root->gid, root->ngrps,
 2474                                root->groups);
 2475     if (op_ret != 0) {
 2476         goto out;
 2477     }
 2478 
 2479     inode_ctx = svs_inode_ctx_get(this, loc->inode);
 2480     if (!inode_ctx) {
 2481         op_ret = -1;
 2482         op_errno = EINVAL;
 2483         gf_msg(this->name, GF_LOG_ERROR, op_errno,
 2484                SVS_MSG_GET_INODE_CONTEXT_FAILED,
 2485                "inode context not found for %s", uuid_utoa(loc->inode->gfid));
 2486         goto out;
 2487     }
 2488 
 2489     is_fuse_call = __is_fuse_call(frame);
 2490 
 2491     /*
 2492      * For entry-point directory, set read and execute bits. But not write
 2493      * permissions.
 2494      */
 2495     if (inode_ctx->type == SNAP_VIEW_ENTRY_POINT_INODE) {
 2496         if (is_fuse_call) {
 2497             op_ret = 0;
 2498             op_errno = 0;
 2499         } else {
 2500             op_ret = 0;
 2501             mode |= POSIX_ACL_READ;
 2502             mode |= POSIX_ACL_EXECUTE;
 2503             op_errno = mode;
 2504         }
 2505         goto out;
 2506     }
 2507 
 2508     SVS_GET_INODE_CTX_INFO(inode_ctx, fs, object, this, loc, op_ret, op_errno,
 2509                            out);
 2510 
 2511     /* The actual posix_acl xlator does acl checks differently for
 2512        fuse and nfs. So set frame->root->pid as fspid of the syncop
 2513        if the call came from nfs
 2514     */
 2515     if (!is_fuse_call) {
 2516         syncopctx_setfspid(&frame->root->pid);
 2517         syncopctx_setfsuid(&frame->root->uid);
 2518         syncopctx_setfsgid(&frame->root->gid);
 2519         syncopctx_setfsgroups(frame->root->ngrps, frame->root->groups);
 2520     }
 2521 
 2522     ret = glfs_h_access(fs, object, mask);
 2523     if (ret < 0) {
 2524         op_ret = -1;
 2525         op_errno = errno;
 2526         gf_msg(this->name, GF_LOG_ERROR, op_errno, SVS_MSG_ACCESS_FAILED,
 2527                "failed to access %s (gfid: %s)", loc->path,
 2528                uuid_utoa(loc->inode->gfid));
 2529         goto out;
 2530     }
 2531 
 2532     op_ret = 0;
 2533     op_errno = ret;
 2534 
 2535 out:
 2536 
 2537     STACK_UNWIND_STRICT(access, frame, op_ret, op_errno, NULL);
 2538     return 0;
 2539 }
 2540 
 2541 int32_t
 2542 notify(xlator_t *this, int32_t event, void *data, ...)
 2543 {
 2544     switch (event) {
 2545         case GF_EVENT_PARENT_UP: {
 2546             /* Tell the parent that snapview-server xlator is up */
 2547             default_notify(this, GF_EVENT_CHILD_UP, data);
 2548         } break;
 2549         default:
 2550             break;
 2551     }
 2552     return 0;
 2553 }
 2554 
 2555 int32_t
 2556 mem_acct_init(xlator_t *this)
 2557 {
 2558     int ret = -1;
 2559 
 2560     if (!this)
 2561         return ret;
 2562 
 2563     ret = xlator_mem_acct_init(this, gf_svs_mt_end + 1);
 2564 
 2565     if (ret != 0) {
 2566         gf_msg(this->name, GF_LOG_WARNING, 0, SVS_MSG_MEM_ACNT_FAILED,
 2567                "Memory accounting"
 2568                " init failed");
 2569         return ret;
 2570     }
 2571 
 2572     return ret;
 2573 }
 2574 
 2575 int32_t
 2576 init(xlator_t *this)
 2577 {
 2578     svs_private_t *priv = NULL;
 2579     int ret = -1;
 2580 
 2581     /* This can be the top of graph in certain cases */
 2582     if (!this->parents) {
 2583         gf_msg_debug(this->name, 0, "dangling volume. check volfile ");
 2584     }
 2585 
 2586     priv = GF_CALLOC(1, sizeof(*priv), gf_svs_mt_priv_t);
 2587     if (!priv) {
 2588         gf_msg(this->name, GF_LOG_ERROR, ENOMEM, SVS_MSG_NO_MEMORY,
 2589                "failed to "
 2590                "allocate memory for this->private ");
 2591         goto out;
 2592     }
 2593 
 2594     this->private = priv;
 2595 
 2596     GF_OPTION_INIT("volname", priv->volname, str, out);
 2597     LOCK_INIT(&priv->snaplist_lock);
 2598 
 2599     LOCK(&priv->snaplist_lock);
 2600     {
 2601         priv->num_snaps = 0;
 2602     }
 2603     UNLOCK(&priv->snaplist_lock);
 2604 
 2605     /* What to do here upon failure? should init be failed or succeed? */
 2606     /* If succeeded, then dynamic management of snapshots will not */
 2607     /* happen.*/
 2608     ret = svs_mgmt_init(this);
 2609     if (ret) {
 2610         gf_msg(this->name, GF_LOG_WARNING, EINVAL, SVS_MSG_MGMT_INIT_FAILED,
 2611                "failed to initiate the "
 2612                "mgmt rpc callback for svs. Dymamic management of the"
 2613                "snapshots will not happen");
 2614         goto out;
 2615     }
 2616 
 2617     /* get the list of snaps first to return to client xlator */
 2618     ret = svs_get_snapshot_list(this);
 2619     if (ret) {
 2620         gf_msg(this->name, GF_LOG_ERROR, EINVAL,
 2621                SVS_MSG_GET_SNAPSHOT_LIST_FAILED,
 2622                "Error initializing snaplist infrastructure");
 2623         ret = -1;
 2624         goto out;
 2625     }
 2626 
 2627     ret = 0;
 2628 
 2629 out:
 2630     if (ret && priv) {
 2631         LOCK_DESTROY(&priv->snaplist_lock);
 2632         GF_FREE(priv->dirents);
 2633         GF_FREE(priv);
 2634     }
 2635 
 2636     return ret;
 2637 }
 2638 
 2639 void
 2640 fini(xlator_t *this)
 2641 {
 2642     svs_private_t *priv = NULL;
 2643     glusterfs_ctx_t *ctx = NULL;
 2644     int ret = 0;
 2645 
 2646     GF_ASSERT(this);
 2647     priv = this->private;
 2648     this->private = NULL;
 2649     ctx = this->ctx;
 2650     if (!ctx)
 2651         gf_msg(this->name, GF_LOG_ERROR, 0, SVS_MSG_INVALID_GLFS_CTX,
 2652                "Invalid ctx found");
 2653 
 2654     if (priv) {
 2655         ret = LOCK_DESTROY(&priv->snaplist_lock);
 2656         if (ret != 0) {
 2657             gf_msg(this->name, GF_LOG_WARNING, errno,
 2658                    SVS_MSG_LOCK_DESTROY_FAILED,
 2659                    "Could not destroy mutex snaplist_lock");
 2660         }
 2661 
 2662         if (priv->dirents) {
 2663             GF_FREE(priv->dirents);
 2664         }
 2665 
 2666         if (priv->rpc) {
 2667             /* cleanup the saved-frames before last unref */
 2668             rpc_clnt_connection_cleanup(&priv->rpc->conn);
 2669             rpc_clnt_unref(priv->rpc);
 2670         }
 2671 
 2672         GF_FREE(priv);
 2673     }
 2674 
 2675     return;
 2676 }
 2677 
 2678 struct xlator_fops fops = {
 2679     .lookup = svs_lookup,
 2680     .stat = svs_stat,
 2681     .statfs = svs_statfs,
 2682     .opendir = svs_opendir,
 2683     .readdirp = svs_readdirp,
 2684     .readdir = svs_readdir,
 2685     .open = svs_open,
 2686     .readv = svs_readv,
 2687     .flush = svs_flush,
 2688     .fstat = svs_fstat,
 2689     .getxattr = svs_getxattr,
 2690     .access = svs_access,
 2691     .readlink = svs_readlink,
 2692     /* entry fops */
 2693 };
 2694 
 2695 struct xlator_cbks cbks = {
 2696     .release = svs_release,
 2697     .releasedir = svs_releasedir,
 2698     .forget = svs_forget,
 2699 };
 2700 
 2701 struct volume_options options[] = {
 2702     {
 2703         .key = {"volname"},
 2704         .type = GF_OPTION_TYPE_STR,
 2705     },
 2706     {.key = {NULL}},
 2707 };
 2708 
 2709 xlator_api_t xlator_api = {
 2710     .init = init,
 2711     .fini = fini,
 2712     .notify = notify,
 2713     .mem_acct_init = mem_acct_init,
 2714     .op_version = {1},
 2715     .fops = &fops,
 2716     .cbks = &cbks,
 2717     .options = options,
 2718     .identifier = "snapview-server",
 2719     .category = GF_MAINTAINED,
 2720 };