"Fossies" - the Fresh Open Source Software Archive

Member "fuse-3.10.4/doc/html/fuse-3_810_83_2lib_2fuse__lowlevel_8c_source.html" (9 Jun 2021, 538741 Bytes) of package /linux/misc/fuse-3.10.4.tar.xz:


Caution: In this restricted "Fossies" environment the current HTML page may not be correctly presentated and may have some non-functional links. You can here alternatively try to browse the pure source code or just view or download the uninterpreted raw source code. If the rendering is insufficient you may try to find and view the page on the project site itself.

libfuse
fuse_lowlevel.c
1 /*
2  FUSE: Filesystem in Userspace
3  Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4 
5  Implementation of (most of) the low-level FUSE API. The session loop
6  functions are implemented in separate files.
7 
8  This program can be distributed under the terms of the GNU LGPLv2.
9  See the file COPYING.LIB
10 */
11 
12 #define _GNU_SOURCE
13 
14 #include "config.h"
15 #include "fuse_i.h"
16 #include "fuse_kernel.h"
17 #include "fuse_opt.h"
18 #include "fuse_misc.h"
19 #include "mount_util.h"
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <stddef.h>
24 #include <string.h>
25 #include <unistd.h>
26 #include <limits.h>
27 #include <errno.h>
28 #include <assert.h>
29 #include <sys/file.h>
30 
31 #ifndef F_LINUX_SPECIFIC_BASE
32 #define F_LINUX_SPECIFIC_BASE 1024
33 #endif
34 #ifndef F_SETPIPE_SZ
35 #define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
36 #endif
37 
38 
39 #define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
40 #define OFFSET_MAX 0x7fffffffffffffffLL
41 
42 #define container_of(ptr, type, member) ({ \
43  const typeof( ((type *)0)->member ) *__mptr = (ptr); \
44  (type *)( (char *)__mptr - offsetof(type,member) );})
45 
46 struct fuse_pollhandle {
47  uint64_t kh;
48  struct fuse_session *se;
49 };
50 
51 static size_t pagesize;
52 
53 static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
54 {
55  pagesize = getpagesize();
56 }
57 
58 static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
59 {
60  attr->ino = stbuf->st_ino;
61  attr->mode = stbuf->st_mode;
62  attr->nlink = stbuf->st_nlink;
63  attr->uid = stbuf->st_uid;
64  attr->gid = stbuf->st_gid;
65  attr->rdev = stbuf->st_rdev;
66  attr->size = stbuf->st_size;
67  attr->blksize = stbuf->st_blksize;
68  attr->blocks = stbuf->st_blocks;
69  attr->atime = stbuf->st_atime;
70  attr->mtime = stbuf->st_mtime;
71  attr->ctime = stbuf->st_ctime;
72  attr->atimensec = ST_ATIM_NSEC(stbuf);
73  attr->mtimensec = ST_MTIM_NSEC(stbuf);
74  attr->ctimensec = ST_CTIM_NSEC(stbuf);
75 }
76 
77 static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
78 {
79  stbuf->st_mode = attr->mode;
80  stbuf->st_uid = attr->uid;
81  stbuf->st_gid = attr->gid;
82  stbuf->st_size = attr->size;
83  stbuf->st_atime = attr->atime;
84  stbuf->st_mtime = attr->mtime;
85  stbuf->st_ctime = attr->ctime;
86  ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
87  ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
88  ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
89 }
90 
91 static size_t iov_length(const struct iovec *iov, size_t count)
92 {
93  size_t seg;
94  size_t ret = 0;
95 
96  for (seg = 0; seg < count; seg++)
97  ret += iov[seg].iov_len;
98  return ret;
99 }
100 
101 static void list_init_req(struct fuse_req *req)
102 {
103  req->next = req;
104  req->prev = req;
105 }
106 
107 static void list_del_req(struct fuse_req *req)
108 {
109  struct fuse_req *prev = req->prev;
110  struct fuse_req *next = req->next;
111  prev->next = next;
112  next->prev = prev;
113 }
114 
115 static void list_add_req(struct fuse_req *req, struct fuse_req *next)
116 {
117  struct fuse_req *prev = next->prev;
118  req->next = next;
119  req->prev = prev;
120  prev->next = req;
121  next->prev = req;
122 }
123 
124 static void destroy_req(fuse_req_t req)
125 {
126  pthread_mutex_destroy(&req->lock);
127  free(req);
128 }
129 
130 void fuse_free_req(fuse_req_t req)
131 {
132  int ctr;
133  struct fuse_session *se = req->se;
134 
135  pthread_mutex_lock(&se->lock);
136  req->u.ni.func = NULL;
137  req->u.ni.data = NULL;
138  list_del_req(req);
139  ctr = --req->ctr;
140  fuse_chan_put(req->ch);
141  req->ch = NULL;
142  pthread_mutex_unlock(&se->lock);
143  if (!ctr)
144  destroy_req(req);
145 }
146 
147 static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
148 {
149  struct fuse_req *req;
150 
151  req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
152  if (req == NULL) {
153  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
154  } else {
155  req->se = se;
156  req->ctr = 1;
157  list_init_req(req);
158  pthread_mutex_init(&req->lock, NULL);
159  }
160 
161  return req;
162 }
163 
164 /* Send data. If *ch* is NULL, send via session master fd */
165 static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
166  struct iovec *iov, int count)
167 {
168  struct fuse_out_header *out = iov[0].iov_base;
169 
170  assert(se != NULL);
171  out->len = iov_length(iov, count);
172  if (se->debug) {
173  if (out->unique == 0) {
174  fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
175  out->error, out->len);
176  } else if (out->error) {
177  fuse_log(FUSE_LOG_DEBUG,
178  " unique: %llu, error: %i (%s), outsize: %i\n",
179  (unsigned long long) out->unique, out->error,
180  strerror(-out->error), out->len);
181  } else {
182  fuse_log(FUSE_LOG_DEBUG,
183  " unique: %llu, success, outsize: %i\n",
184  (unsigned long long) out->unique, out->len);
185  }
186  }
187 
188  ssize_t res = writev(ch ? ch->fd : se->fd,
189  iov, count);
190  int err = errno;
191 
192  if (res == -1) {
193  /* ENOENT means the operation was interrupted */
194  if (!fuse_session_exited(se) && err != ENOENT)
195  perror("fuse: writing device");
196  return -err;
197  }
198 
199  return 0;
200 }
201 
202 
203 int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
204  int count)
205 {
206  struct fuse_out_header out;
207 
208  if (error <= -1000 || error > 0) {
209  fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
210  error = -ERANGE;
211  }
212 
213  out.unique = req->unique;
214  out.error = error;
215 
216  iov[0].iov_base = &out;
217  iov[0].iov_len = sizeof(struct fuse_out_header);
218 
219  return fuse_send_msg(req->se, req->ch, iov, count);
220 }
221 
222 static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
223  int count)
224 {
225  int res;
226 
227  res = fuse_send_reply_iov_nofree(req, error, iov, count);
228  fuse_free_req(req);
229  return res;
230 }
231 
232 static int send_reply(fuse_req_t req, int error, const void *arg,
233  size_t argsize)
234 {
235  struct iovec iov[2];
236  int count = 1;
237  if (argsize) {
238  iov[1].iov_base = (void *) arg;
239  iov[1].iov_len = argsize;
240  count++;
241  }
242  return send_reply_iov(req, error, iov, count);
243 }
244 
245 int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
246 {
247  int res;
248  struct iovec *padded_iov;
249 
250  padded_iov = malloc((count + 1) * sizeof(struct iovec));
251  if (padded_iov == NULL)
252  return fuse_reply_err(req, ENOMEM);
253 
254  memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
255  count++;
256 
257  res = send_reply_iov(req, 0, padded_iov, count);
258  free(padded_iov);
259 
260  return res;
261 }
262 
263 
264 /* `buf` is allowed to be empty so that the proper size may be
265  allocated by the caller */
266 size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
267  const char *name, const struct stat *stbuf, off_t off)
268 {
269  (void)req;
270  size_t namelen;
271  size_t entlen;
272  size_t entlen_padded;
273  struct fuse_dirent *dirent;
274 
275  namelen = strlen(name);
276  entlen = FUSE_NAME_OFFSET + namelen;
277  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
278 
279  if ((buf == NULL) || (entlen_padded > bufsize))
280  return entlen_padded;
281 
282  dirent = (struct fuse_dirent*) buf;
283  dirent->ino = stbuf->st_ino;
284  dirent->off = off;
285  dirent->namelen = namelen;
286  dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
287  memcpy(dirent->name, name, namelen);
288  memset(dirent->name + namelen, 0, entlen_padded - entlen);
289 
290  return entlen_padded;
291 }
292 
293 static void convert_statfs(const struct statvfs *stbuf,
294  struct fuse_kstatfs *kstatfs)
295 {
296  kstatfs->bsize = stbuf->f_bsize;
297  kstatfs->frsize = stbuf->f_frsize;
298  kstatfs->blocks = stbuf->f_blocks;
299  kstatfs->bfree = stbuf->f_bfree;
300  kstatfs->bavail = stbuf->f_bavail;
301  kstatfs->files = stbuf->f_files;
302  kstatfs->ffree = stbuf->f_ffree;
303  kstatfs->namelen = stbuf->f_namemax;
304 }
305 
306 static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
307 {
308  return send_reply(req, 0, arg, argsize);
309 }
310 
311 int fuse_reply_err(fuse_req_t req, int err)
312 {
313  return send_reply(req, -err, NULL, 0);
314 }
315 
316 void fuse_reply_none(fuse_req_t req)
317 {
318  fuse_free_req(req);
319 }
320 
321 static unsigned long calc_timeout_sec(double t)
322 {
323  if (t > (double) ULONG_MAX)
324  return ULONG_MAX;
325  else if (t < 0.0)
326  return 0;
327  else
328  return (unsigned long) t;
329 }
330 
331 static unsigned int calc_timeout_nsec(double t)
332 {
333  double f = t - (double) calc_timeout_sec(t);
334  if (f < 0.0)
335  return 0;
336  else if (f >= 0.999999999)
337  return 999999999;
338  else
339  return (unsigned int) (f * 1.0e9);
340 }
341 
342 static void fill_entry(struct fuse_entry_out *arg,
343  const struct fuse_entry_param *e)
344 {
345  arg->nodeid = e->ino;
346  arg->generation = e->generation;
347  arg->entry_valid = calc_timeout_sec(e->entry_timeout);
348  arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
349  arg->attr_valid = calc_timeout_sec(e->attr_timeout);
350  arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
351  convert_stat(&e->attr, &arg->attr);
352 }
353 
354 /* `buf` is allowed to be empty so that the proper size may be
355  allocated by the caller */
356 size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
357  const char *name,
358  const struct fuse_entry_param *e, off_t off)
359 {
360  (void)req;
361  size_t namelen;
362  size_t entlen;
363  size_t entlen_padded;
364 
365  namelen = strlen(name);
366  entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
367  entlen_padded = FUSE_DIRENT_ALIGN(entlen);
368  if ((buf == NULL) || (entlen_padded > bufsize))
369  return entlen_padded;
370 
371  struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
372  memset(&dp->entry_out, 0, sizeof(dp->entry_out));
373  fill_entry(&dp->entry_out, e);
374 
375  struct fuse_dirent *dirent = &dp->dirent;
376  dirent->ino = e->attr.st_ino;
377  dirent->off = off;
378  dirent->namelen = namelen;
379  dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
380  memcpy(dirent->name, name, namelen);
381  memset(dirent->name + namelen, 0, entlen_padded - entlen);
382 
383  return entlen_padded;
384 }
385 
386 static void fill_open(struct fuse_open_out *arg,
387  const struct fuse_file_info *f)
388 {
389  arg->fh = f->fh;
390  if (f->direct_io)
391  arg->open_flags |= FOPEN_DIRECT_IO;
392  if (f->keep_cache)
393  arg->open_flags |= FOPEN_KEEP_CACHE;
394  if (f->cache_readdir)
395  arg->open_flags |= FOPEN_CACHE_DIR;
396  if (f->nonseekable)
397  arg->open_flags |= FOPEN_NONSEEKABLE;
398 }
399 
400 int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
401 {
402  struct fuse_entry_out arg;
403  size_t size = req->se->conn.proto_minor < 9 ?
404  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
405 
406  /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
407  negative entry */
408  if (!e->ino && req->se->conn.proto_minor < 4)
409  return fuse_reply_err(req, ENOENT);
410 
411  memset(&arg, 0, sizeof(arg));
412  fill_entry(&arg, e);
413  return send_reply_ok(req, &arg, size);
414 }
415 
416 int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
417  const struct fuse_file_info *f)
418 {
419  char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
420  size_t entrysize = req->se->conn.proto_minor < 9 ?
421  FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
422  struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
423  struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
424 
425  memset(buf, 0, sizeof(buf));
426  fill_entry(earg, e);
427  fill_open(oarg, f);
428  return send_reply_ok(req, buf,
429  entrysize + sizeof(struct fuse_open_out));
430 }
431 
432 int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
433  double attr_timeout)
434 {
435  struct fuse_attr_out arg;
436  size_t size = req->se->conn.proto_minor < 9 ?
437  FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
438 
439  memset(&arg, 0, sizeof(arg));
440  arg.attr_valid = calc_timeout_sec(attr_timeout);
441  arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
442  convert_stat(attr, &arg.attr);
443 
444  return send_reply_ok(req, &arg, size);
445 }
446 
447 int fuse_reply_readlink(fuse_req_t req, const char *linkname)
448 {
449  return send_reply_ok(req, linkname, strlen(linkname));
450 }
451 
452 int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
453 {
454  struct fuse_open_out arg;
455 
456  memset(&arg, 0, sizeof(arg));
457  fill_open(&arg, f);
458  return send_reply_ok(req, &arg, sizeof(arg));
459 }
460 
461 int fuse_reply_write(fuse_req_t req, size_t count)
462 {
463  struct fuse_write_out arg;
464 
465  memset(&arg, 0, sizeof(arg));
466  arg.size = count;
467 
468  return send_reply_ok(req, &arg, sizeof(arg));
469 }
470 
471 int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
472 {
473  return send_reply_ok(req, buf, size);
474 }
475 
476 static int fuse_send_data_iov_fallback(struct fuse_session *se,
477  struct fuse_chan *ch,
478  struct iovec *iov, int iov_count,
479  struct fuse_bufvec *buf,
480  size_t len)
481 {
482  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
483  void *mbuf;
484  int res;
485 
486  /* Optimize common case */
487  if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
488  !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
489  /* FIXME: also avoid memory copy if there are multiple buffers
490  but none of them contain an fd */
491 
492  iov[iov_count].iov_base = buf->buf[0].mem;
493  iov[iov_count].iov_len = len;
494  iov_count++;
495  return fuse_send_msg(se, ch, iov, iov_count);
496  }
497 
498  res = posix_memalign(&mbuf, pagesize, len);
499  if (res != 0)
500  return res;
501 
502  mem_buf.buf[0].mem = mbuf;
503  res = fuse_buf_copy(&mem_buf, buf, 0);
504  if (res < 0) {
505  free(mbuf);
506  return -res;
507  }
508  len = res;
509 
510  iov[iov_count].iov_base = mbuf;
511  iov[iov_count].iov_len = len;
512  iov_count++;
513  res = fuse_send_msg(se, ch, iov, iov_count);
514  free(mbuf);
515 
516  return res;
517 }
518 
519 struct fuse_ll_pipe {
520  size_t size;
521  int can_grow;
522  int pipe[2];
523 };
524 
525 static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
526 {
527  close(llp->pipe[0]);
528  close(llp->pipe[1]);
529  free(llp);
530 }
531 
532 #ifdef HAVE_SPLICE
533 #if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
534 static int fuse_pipe(int fds[2])
535 {
536  int rv = pipe(fds);
537 
538  if (rv == -1)
539  return rv;
540 
541  if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
542  fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
543  fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
544  fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
545  close(fds[0]);
546  close(fds[1]);
547  rv = -1;
548  }
549  return rv;
550 }
551 #else
552 static int fuse_pipe(int fds[2])
553 {
554  return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
555 }
556 #endif
557 
558 static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
559 {
560  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
561  if (llp == NULL) {
562  int res;
563 
564  llp = malloc(sizeof(struct fuse_ll_pipe));
565  if (llp == NULL)
566  return NULL;
567 
568  res = fuse_pipe(llp->pipe);
569  if (res == -1) {
570  free(llp);
571  return NULL;
572  }
573 
574  /*
575  *the default size is 16 pages on linux
576  */
577  llp->size = pagesize * 16;
578  llp->can_grow = 1;
579 
580  pthread_setspecific(se->pipe_key, llp);
581  }
582 
583  return llp;
584 }
585 #endif
586 
587 static void fuse_ll_clear_pipe(struct fuse_session *se)
588 {
589  struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
590  if (llp) {
591  pthread_setspecific(se->pipe_key, NULL);
592  fuse_ll_pipe_free(llp);
593  }
594 }
595 
596 #if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
597 static int read_back(int fd, char *buf, size_t len)
598 {
599  int res;
600 
601  res = read(fd, buf, len);
602  if (res == -1) {
603  fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
604  return -EIO;
605  }
606  if (res != len) {
607  fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
608  return -EIO;
609  }
610  return 0;
611 }
612 
613 static int grow_pipe_to_max(int pipefd)
614 {
615  int max;
616  int res;
617  int maxfd;
618  char buf[32];
619 
620  maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
621  if (maxfd < 0)
622  return -errno;
623 
624  res = read(maxfd, buf, sizeof(buf) - 1);
625  if (res < 0) {
626  int saved_errno;
627 
628  saved_errno = errno;
629  close(maxfd);
630  return -saved_errno;
631  }
632  close(maxfd);
633  buf[res] = '\0';
634 
635  max = atoi(buf);
636  res = fcntl(pipefd, F_SETPIPE_SZ, max);
637  if (res < 0)
638  return -errno;
639  return max;
640 }
641 
642 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
643  struct iovec *iov, int iov_count,
644  struct fuse_bufvec *buf, unsigned int flags)
645 {
646  int res;
647  size_t len = fuse_buf_size(buf);
648  struct fuse_out_header *out = iov[0].iov_base;
649  struct fuse_ll_pipe *llp;
650  int splice_flags;
651  size_t pipesize;
652  size_t total_buf_size;
653  size_t idx;
654  size_t headerlen;
655  struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
656 
657  if (se->broken_splice_nonblock)
658  goto fallback;
659 
660  if (flags & FUSE_BUF_NO_SPLICE)
661  goto fallback;
662 
663  total_buf_size = 0;
664  for (idx = buf->idx; idx < buf->count; idx++) {
665  total_buf_size += buf->buf[idx].size;
666  if (idx == buf->idx)
667  total_buf_size -= buf->off;
668  }
669  if (total_buf_size < 2 * pagesize)
670  goto fallback;
671 
672  if (se->conn.proto_minor < 14 ||
673  !(se->conn.want & FUSE_CAP_SPLICE_WRITE))
674  goto fallback;
675 
676  llp = fuse_ll_get_pipe(se);
677  if (llp == NULL)
678  goto fallback;
679 
680 
681  headerlen = iov_length(iov, iov_count);
682 
683  out->len = headerlen + len;
684 
685  /*
686  * Heuristic for the required pipe size, does not work if the
687  * source contains less than page size fragments
688  */
689  pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
690 
691  if (llp->size < pipesize) {
692  if (llp->can_grow) {
693  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
694  if (res == -1) {
695  res = grow_pipe_to_max(llp->pipe[0]);
696  if (res > 0)
697  llp->size = res;
698  llp->can_grow = 0;
699  goto fallback;
700  }
701  llp->size = res;
702  }
703  if (llp->size < pipesize)
704  goto fallback;
705  }
706 
707 
708  res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
709  if (res == -1)
710  goto fallback;
711 
712  if (res != headerlen) {
713  res = -EIO;
714  fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
715  headerlen);
716  goto clear_pipe;
717  }
718 
719  pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
720  pipe_buf.buf[0].fd = llp->pipe[1];
721 
722  res = fuse_buf_copy(&pipe_buf, buf,
724  if (res < 0) {
725  if (res == -EAGAIN || res == -EINVAL) {
726  /*
727  * Should only get EAGAIN on kernels with
728  * broken SPLICE_F_NONBLOCK support (<=
729  * 2.6.35) where this error or a short read is
730  * returned even if the pipe itself is not
731  * full
732  *
733  * EINVAL might mean that splice can't handle
734  * this combination of input and output.
735  */
736  if (res == -EAGAIN)
737  se->broken_splice_nonblock = 1;
738 
739  pthread_setspecific(se->pipe_key, NULL);
740  fuse_ll_pipe_free(llp);
741  goto fallback;
742  }
743  res = -res;
744  goto clear_pipe;
745  }
746 
747  if (res != 0 && res < len) {
748  struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
749  void *mbuf;
750  size_t now_len = res;
751  /*
752  * For regular files a short count is either
753  * 1) due to EOF, or
754  * 2) because of broken SPLICE_F_NONBLOCK (see above)
755  *
756  * For other inputs it's possible that we overflowed
757  * the pipe because of small buffer fragments.
758  */
759 
760  res = posix_memalign(&mbuf, pagesize, len);
761  if (res != 0)
762  goto clear_pipe;
763 
764  mem_buf.buf[0].mem = mbuf;
765  mem_buf.off = now_len;
766  res = fuse_buf_copy(&mem_buf, buf, 0);
767  if (res > 0) {
768  char *tmpbuf;
769  size_t extra_len = res;
770  /*
771  * Trickiest case: got more data. Need to get
772  * back the data from the pipe and then fall
773  * back to regular write.
774  */
775  tmpbuf = malloc(headerlen);
776  if (tmpbuf == NULL) {
777  free(mbuf);
778  res = ENOMEM;
779  goto clear_pipe;
780  }
781  res = read_back(llp->pipe[0], tmpbuf, headerlen);
782  free(tmpbuf);
783  if (res != 0) {
784  free(mbuf);
785  goto clear_pipe;
786  }
787  res = read_back(llp->pipe[0], mbuf, now_len);
788  if (res != 0) {
789  free(mbuf);
790  goto clear_pipe;
791  }
792  len = now_len + extra_len;
793  iov[iov_count].iov_base = mbuf;
794  iov[iov_count].iov_len = len;
795  iov_count++;
796  res = fuse_send_msg(se, ch, iov, iov_count);
797  free(mbuf);
798  return res;
799  }
800  free(mbuf);
801  res = now_len;
802  }
803  len = res;
804  out->len = headerlen + len;
805 
806  if (se->debug) {
807  fuse_log(FUSE_LOG_DEBUG,
808  " unique: %llu, success, outsize: %i (splice)\n",
809  (unsigned long long) out->unique, out->len);
810  }
811 
812  splice_flags = 0;
813  if ((flags & FUSE_BUF_SPLICE_MOVE) &&
814  (se->conn.want & FUSE_CAP_SPLICE_MOVE))
815  splice_flags |= SPLICE_F_MOVE;
816 
817  res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd,
818  NULL, out->len, splice_flags);
819  if (res == -1) {
820  res = -errno;
821  perror("fuse: splice from pipe");
822  goto clear_pipe;
823  }
824  if (res != out->len) {
825  res = -EIO;
826  fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
827  res, out->len);
828  goto clear_pipe;
829  }
830  return 0;
831 
832 clear_pipe:
833  fuse_ll_clear_pipe(se);
834  return res;
835 
836 fallback:
837  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
838 }
839 #else
840 static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
841  struct iovec *iov, int iov_count,
842  struct fuse_bufvec *buf, unsigned int flags)
843 {
844  size_t len = fuse_buf_size(buf);
845  (void) flags;
846 
847  return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
848 }
849 #endif
850 
851 int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
852  enum fuse_buf_copy_flags flags)
853 {
854  struct iovec iov[2];
855  struct fuse_out_header out;
856  int res;
857 
858  iov[0].iov_base = &out;
859  iov[0].iov_len = sizeof(struct fuse_out_header);
860 
861  out.unique = req->unique;
862  out.error = 0;
863 
864  res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
865  if (res <= 0) {
866  fuse_free_req(req);
867  return res;
868  } else {
869  return fuse_reply_err(req, res);
870  }
871 }
872 
873 int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
874 {
875  struct fuse_statfs_out arg;
876  size_t size = req->se->conn.proto_minor < 4 ?
877  FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
878 
879  memset(&arg, 0, sizeof(arg));
880  convert_statfs(stbuf, &arg.st);
881 
882  return send_reply_ok(req, &arg, size);
883 }
884 
885 int fuse_reply_xattr(fuse_req_t req, size_t count)
886 {
887  struct fuse_getxattr_out arg;
888 
889  memset(&arg, 0, sizeof(arg));
890  arg.size = count;
891 
892  return send_reply_ok(req, &arg, sizeof(arg));
893 }
894 
895 int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
896 {
897  struct fuse_lk_out arg;
898 
899  memset(&arg, 0, sizeof(arg));
900  arg.lk.type = lock->l_type;
901  if (lock->l_type != F_UNLCK) {
902  arg.lk.start = lock->l_start;
903  if (lock->l_len == 0)
904  arg.lk.end = OFFSET_MAX;
905  else
906  arg.lk.end = lock->l_start + lock->l_len - 1;
907  }
908  arg.lk.pid = lock->l_pid;
909  return send_reply_ok(req, &arg, sizeof(arg));
910 }
911 
912 int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
913 {
914  struct fuse_bmap_out arg;
915 
916  memset(&arg, 0, sizeof(arg));
917  arg.block = idx;
918 
919  return send_reply_ok(req, &arg, sizeof(arg));
920 }
921 
922 static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
923  size_t count)
924 {
925  struct fuse_ioctl_iovec *fiov;
926  size_t i;
927 
928  fiov = malloc(sizeof(fiov[0]) * count);
929  if (!fiov)
930  return NULL;
931 
932  for (i = 0; i < count; i++) {
933  fiov[i].base = (uintptr_t) iov[i].iov_base;
934  fiov[i].len = iov[i].iov_len;
935  }
936 
937  return fiov;
938 }
939 
941  const struct iovec *in_iov, size_t in_count,
942  const struct iovec *out_iov, size_t out_count)
943 {
944  struct fuse_ioctl_out arg;
945  struct fuse_ioctl_iovec *in_fiov = NULL;
946  struct fuse_ioctl_iovec *out_fiov = NULL;
947  struct iovec iov[4];
948  size_t count = 1;
949  int res;
950 
951  memset(&arg, 0, sizeof(arg));
952  arg.flags |= FUSE_IOCTL_RETRY;
953  arg.in_iovs = in_count;
954  arg.out_iovs = out_count;
955  iov[count].iov_base = &arg;
956  iov[count].iov_len = sizeof(arg);
957  count++;
958 
959  if (req->se->conn.proto_minor < 16) {
960  if (in_count) {
961  iov[count].iov_base = (void *)in_iov;
962  iov[count].iov_len = sizeof(in_iov[0]) * in_count;
963  count++;
964  }
965 
966  if (out_count) {
967  iov[count].iov_base = (void *)out_iov;
968  iov[count].iov_len = sizeof(out_iov[0]) * out_count;
969  count++;
970  }
971  } else {
972  /* Can't handle non-compat 64bit ioctls on 32bit */
973  if (sizeof(void *) == 4 && req->ioctl_64bit) {
974  res = fuse_reply_err(req, EINVAL);
975  goto out;
976  }
977 
978  if (in_count) {
979  in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
980  if (!in_fiov)
981  goto enomem;
982 
983  iov[count].iov_base = (void *)in_fiov;
984  iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
985  count++;
986  }
987  if (out_count) {
988  out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
989  if (!out_fiov)
990  goto enomem;
991 
992  iov[count].iov_base = (void *)out_fiov;
993  iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
994  count++;
995  }
996  }
997 
998  res = send_reply_iov(req, 0, iov, count);
999 out:
1000  free(in_fiov);
1001  free(out_fiov);
1002 
1003  return res;
1004 
1005 enomem:
1006  res = fuse_reply_err(req, ENOMEM);
1007  goto out;
1008 }
1009 
1010 int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1011 {
1012  struct fuse_ioctl_out arg;
1013  struct iovec iov[3];
1014  size_t count = 1;
1015 
1016  memset(&arg, 0, sizeof(arg));
1017  arg.result = result;
1018  iov[count].iov_base = &arg;
1019  iov[count].iov_len = sizeof(arg);
1020  count++;
1021 
1022  if (size) {
1023  iov[count].iov_base = (char *) buf;
1024  iov[count].iov_len = size;
1025  count++;
1026  }
1027 
1028  return send_reply_iov(req, 0, iov, count);
1029 }
1030 
1031 int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1032  int count)
1033 {
1034  struct iovec *padded_iov;
1035  struct fuse_ioctl_out arg;
1036  int res;
1037 
1038  padded_iov = malloc((count + 2) * sizeof(struct iovec));
1039  if (padded_iov == NULL)
1040  return fuse_reply_err(req, ENOMEM);
1041 
1042  memset(&arg, 0, sizeof(arg));
1043  arg.result = result;
1044  padded_iov[1].iov_base = &arg;
1045  padded_iov[1].iov_len = sizeof(arg);
1046 
1047  memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1048 
1049  res = send_reply_iov(req, 0, padded_iov, count + 2);
1050  free(padded_iov);
1051 
1052  return res;
1053 }
1054 
1055 int fuse_reply_poll(fuse_req_t req, unsigned revents)
1056 {
1057  struct fuse_poll_out arg;
1058 
1059  memset(&arg, 0, sizeof(arg));
1060  arg.revents = revents;
1061 
1062  return send_reply_ok(req, &arg, sizeof(arg));
1063 }
1064 
1065 int fuse_reply_lseek(fuse_req_t req, off_t off)
1066 {
1067  struct fuse_lseek_out arg;
1068 
1069  memset(&arg, 0, sizeof(arg));
1070  arg.offset = off;
1071 
1072  return send_reply_ok(req, &arg, sizeof(arg));
1073 }
1074 
1075 static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1076 {
1077  char *name = (char *) inarg;
1078 
1079  if (req->se->op.lookup)
1080  req->se->op.lookup(req, nodeid, name);
1081  else
1082  fuse_reply_err(req, ENOSYS);
1083 }
1084 
1085 static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1086 {
1087  struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1088 
1089  if (req->se->op.forget)
1090  req->se->op.forget(req, nodeid, arg->nlookup);
1091  else
1092  fuse_reply_none(req);
1093 }
1094 
1095 static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1096  const void *inarg)
1097 {
1098  struct fuse_batch_forget_in *arg = (void *) inarg;
1099  struct fuse_forget_one *param = (void *) PARAM(arg);
1100  unsigned int i;
1101 
1102  (void) nodeid;
1103 
1104  if (req->se->op.forget_multi) {
1105  req->se->op.forget_multi(req, arg->count,
1106  (struct fuse_forget_data *) param);
1107  } else if (req->se->op.forget) {
1108  for (i = 0; i < arg->count; i++) {
1109  struct fuse_forget_one *forget = &param[i];
1110  struct fuse_req *dummy_req;
1111 
1112  dummy_req = fuse_ll_alloc_req(req->se);
1113  if (dummy_req == NULL)
1114  break;
1115 
1116  dummy_req->unique = req->unique;
1117  dummy_req->ctx = req->ctx;
1118  dummy_req->ch = NULL;
1119 
1120  req->se->op.forget(dummy_req, forget->nodeid,
1121  forget->nlookup);
1122  }
1123  fuse_reply_none(req);
1124  } else {
1125  fuse_reply_none(req);
1126  }
1127 }
1128 
1129 static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1130 {
1131  struct fuse_file_info *fip = NULL;
1132  struct fuse_file_info fi;
1133 
1134  if (req->se->conn.proto_minor >= 9) {
1135  struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1136 
1137  if (arg->getattr_flags & FUSE_GETATTR_FH) {
1138  memset(&fi, 0, sizeof(fi));
1139  fi.fh = arg->fh;
1140  fip = &fi;
1141  }
1142  }
1143 
1144  if (req->se->op.getattr)
1145  req->se->op.getattr(req, nodeid, fip);
1146  else
1147  fuse_reply_err(req, ENOSYS);
1148 }
1149 
1150 static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1151 {
1152  struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1153 
1154  if (req->se->op.setattr) {
1155  struct fuse_file_info *fi = NULL;
1156  struct fuse_file_info fi_store;
1157  struct stat stbuf;
1158  memset(&stbuf, 0, sizeof(stbuf));
1159  convert_attr(arg, &stbuf);
1160  if (arg->valid & FATTR_FH) {
1161  arg->valid &= ~FATTR_FH;
1162  memset(&fi_store, 0, sizeof(fi_store));
1163  fi = &fi_store;
1164  fi->fh = arg->fh;
1165  }
1166  arg->valid &=
1167  FUSE_SET_ATTR_MODE |
1168  FUSE_SET_ATTR_UID |
1169  FUSE_SET_ATTR_GID |
1170  FUSE_SET_ATTR_SIZE |
1171  FUSE_SET_ATTR_ATIME |
1172  FUSE_SET_ATTR_MTIME |
1173  FUSE_SET_ATTR_ATIME_NOW |
1174  FUSE_SET_ATTR_MTIME_NOW |
1175  FUSE_SET_ATTR_CTIME;
1176 
1177  req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1178  } else
1179  fuse_reply_err(req, ENOSYS);
1180 }
1181 
1182 static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1183 {
1184  struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1185 
1186  if (req->se->op.access)
1187  req->se->op.access(req, nodeid, arg->mask);
1188  else
1189  fuse_reply_err(req, ENOSYS);
1190 }
1191 
1192 static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1193 {
1194  (void) inarg;
1195 
1196  if (req->se->op.readlink)
1197  req->se->op.readlink(req, nodeid);
1198  else
1199  fuse_reply_err(req, ENOSYS);
1200 }
1201 
1202 static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1203 {
1204  struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1205  char *name = PARAM(arg);
1206 
1207  if (req->se->conn.proto_minor >= 12)
1208  req->ctx.umask = arg->umask;
1209  else
1210  name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1211 
1212  if (req->se->op.mknod)
1213  req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1214  else
1215  fuse_reply_err(req, ENOSYS);
1216 }
1217 
1218 static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1219 {
1220  struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1221 
1222  if (req->se->conn.proto_minor >= 12)
1223  req->ctx.umask = arg->umask;
1224 
1225  if (req->se->op.mkdir)
1226  req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1227  else
1228  fuse_reply_err(req, ENOSYS);
1229 }
1230 
1231 static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1232 {
1233  char *name = (char *) inarg;
1234 
1235  if (req->se->op.unlink)
1236  req->se->op.unlink(req, nodeid, name);
1237  else
1238  fuse_reply_err(req, ENOSYS);
1239 }
1240 
1241 static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1242 {
1243  char *name = (char *) inarg;
1244 
1245  if (req->se->op.rmdir)
1246  req->se->op.rmdir(req, nodeid, name);
1247  else
1248  fuse_reply_err(req, ENOSYS);
1249 }
1250 
1251 static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1252 {
1253  char *name = (char *) inarg;
1254  char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1255 
1256  if (req->se->op.symlink)
1257  req->se->op.symlink(req, linkname, nodeid, name);
1258  else
1259  fuse_reply_err(req, ENOSYS);
1260 }
1261 
1262 static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1263 {
1264  struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1265  char *oldname = PARAM(arg);
1266  char *newname = oldname + strlen(oldname) + 1;
1267 
1268  if (req->se->op.rename)
1269  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1270  0);
1271  else
1272  fuse_reply_err(req, ENOSYS);
1273 }
1274 
1275 static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1276 {
1277  struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1278  char *oldname = PARAM(arg);
1279  char *newname = oldname + strlen(oldname) + 1;
1280 
1281  if (req->se->op.rename)
1282  req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1283  arg->flags);
1284  else
1285  fuse_reply_err(req, ENOSYS);
1286 }
1287 
1288 static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1289 {
1290  struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1291 
1292  if (req->se->op.link)
1293  req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1294  else
1295  fuse_reply_err(req, ENOSYS);
1296 }
1297 
1298 static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1299 {
1300  struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1301 
1302  if (req->se->op.create) {
1303  struct fuse_file_info fi;
1304  char *name = PARAM(arg);
1305 
1306  memset(&fi, 0, sizeof(fi));
1307  fi.flags = arg->flags;
1308 
1309  if (req->se->conn.proto_minor >= 12)
1310  req->ctx.umask = arg->umask;
1311  else
1312  name = (char *) inarg + sizeof(struct fuse_open_in);
1313 
1314  req->se->op.create(req, nodeid, name, arg->mode, &fi);
1315  } else
1316  fuse_reply_err(req, ENOSYS);
1317 }
1318 
1319 static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1320 {
1321  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1322  struct fuse_file_info fi;
1323 
1324  memset(&fi, 0, sizeof(fi));
1325  fi.flags = arg->flags;
1326 
1327  if (req->se->op.open)
1328  req->se->op.open(req, nodeid, &fi);
1329  else
1330  fuse_reply_open(req, &fi);
1331 }
1332 
1333 static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1334 {
1335  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1336 
1337  if (req->se->op.read) {
1338  struct fuse_file_info fi;
1339 
1340  memset(&fi, 0, sizeof(fi));
1341  fi.fh = arg->fh;
1342  if (req->se->conn.proto_minor >= 9) {
1343  fi.lock_owner = arg->lock_owner;
1344  fi.flags = arg->flags;
1345  }
1346  req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1347  } else
1348  fuse_reply_err(req, ENOSYS);
1349 }
1350 
1351 static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1352 {
1353  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1354  struct fuse_file_info fi;
1355  char *param;
1356 
1357  memset(&fi, 0, sizeof(fi));
1358  fi.fh = arg->fh;
1359  fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1360 
1361  if (req->se->conn.proto_minor < 9) {
1362  param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1363  } else {
1364  fi.lock_owner = arg->lock_owner;
1365  fi.flags = arg->flags;
1366  param = PARAM(arg);
1367  }
1368 
1369  if (req->se->op.write)
1370  req->se->op.write(req, nodeid, param, arg->size,
1371  arg->offset, &fi);
1372  else
1373  fuse_reply_err(req, ENOSYS);
1374 }
1375 
1376 static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1377  const struct fuse_buf *ibuf)
1378 {
1379  struct fuse_session *se = req->se;
1380  struct fuse_bufvec bufv = {
1381  .buf[0] = *ibuf,
1382  .count = 1,
1383  };
1384  struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1385  struct fuse_file_info fi;
1386 
1387  memset(&fi, 0, sizeof(fi));
1388  fi.fh = arg->fh;
1389  fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1390 
1391  if (se->conn.proto_minor < 9) {
1392  bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1393  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1394  FUSE_COMPAT_WRITE_IN_SIZE;
1395  assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1396  } else {
1397  fi.lock_owner = arg->lock_owner;
1398  fi.flags = arg->flags;
1399  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1400  bufv.buf[0].mem = PARAM(arg);
1401 
1402  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1403  sizeof(struct fuse_write_in);
1404  }
1405  if (bufv.buf[0].size < arg->size) {
1406  fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1407  fuse_reply_err(req, EIO);
1408  goto out;
1409  }
1410  bufv.buf[0].size = arg->size;
1411 
1412  se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1413 
1414 out:
1415  /* Need to reset the pipe if ->write_buf() didn't consume all data */
1416  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1417  fuse_ll_clear_pipe(se);
1418 }
1419 
1420 static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1421 {
1422  struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1423  struct fuse_file_info fi;
1424 
1425  memset(&fi, 0, sizeof(fi));
1426  fi.fh = arg->fh;
1427  fi.flush = 1;
1428  if (req->se->conn.proto_minor >= 7)
1429  fi.lock_owner = arg->lock_owner;
1430 
1431  if (req->se->op.flush)
1432  req->se->op.flush(req, nodeid, &fi);
1433  else
1434  fuse_reply_err(req, ENOSYS);
1435 }
1436 
1437 static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1438 {
1439  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1440  struct fuse_file_info fi;
1441 
1442  memset(&fi, 0, sizeof(fi));
1443  fi.flags = arg->flags;
1444  fi.fh = arg->fh;
1445  if (req->se->conn.proto_minor >= 8) {
1446  fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1447  fi.lock_owner = arg->lock_owner;
1448  }
1449  if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1450  fi.flock_release = 1;
1451  fi.lock_owner = arg->lock_owner;
1452  }
1453 
1454  if (req->se->op.release)
1455  req->se->op.release(req, nodeid, &fi);
1456  else
1457  fuse_reply_err(req, 0);
1458 }
1459 
1460 static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1461 {
1462  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1463  struct fuse_file_info fi;
1464  int datasync = arg->fsync_flags & 1;
1465 
1466  memset(&fi, 0, sizeof(fi));
1467  fi.fh = arg->fh;
1468 
1469  if (req->se->op.fsync)
1470  req->se->op.fsync(req, nodeid, datasync, &fi);
1471  else
1472  fuse_reply_err(req, ENOSYS);
1473 }
1474 
1475 static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1476 {
1477  struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1478  struct fuse_file_info fi;
1479 
1480  memset(&fi, 0, sizeof(fi));
1481  fi.flags = arg->flags;
1482 
1483  if (req->se->op.opendir)
1484  req->se->op.opendir(req, nodeid, &fi);
1485  else
1486  fuse_reply_open(req, &fi);
1487 }
1488 
1489 static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1490 {
1491  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1492  struct fuse_file_info fi;
1493 
1494  memset(&fi, 0, sizeof(fi));
1495  fi.fh = arg->fh;
1496 
1497  if (req->se->op.readdir)
1498  req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1499  else
1500  fuse_reply_err(req, ENOSYS);
1501 }
1502 
1503 static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1504 {
1505  struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1506  struct fuse_file_info fi;
1507 
1508  memset(&fi, 0, sizeof(fi));
1509  fi.fh = arg->fh;
1510 
1511  if (req->se->op.readdirplus)
1512  req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1513  else
1514  fuse_reply_err(req, ENOSYS);
1515 }
1516 
1517 static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1518 {
1519  struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1520  struct fuse_file_info fi;
1521 
1522  memset(&fi, 0, sizeof(fi));
1523  fi.flags = arg->flags;
1524  fi.fh = arg->fh;
1525 
1526  if (req->se->op.releasedir)
1527  req->se->op.releasedir(req, nodeid, &fi);
1528  else
1529  fuse_reply_err(req, 0);
1530 }
1531 
1532 static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1533 {
1534  struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1535  struct fuse_file_info fi;
1536  int datasync = arg->fsync_flags & 1;
1537 
1538  memset(&fi, 0, sizeof(fi));
1539  fi.fh = arg->fh;
1540 
1541  if (req->se->op.fsyncdir)
1542  req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1543  else
1544  fuse_reply_err(req, ENOSYS);
1545 }
1546 
1547 static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1548 {
1549  (void) nodeid;
1550  (void) inarg;
1551 
1552  if (req->se->op.statfs)
1553  req->se->op.statfs(req, nodeid);
1554  else {
1555  struct statvfs buf = {
1556  .f_namemax = 255,
1557  .f_bsize = 512,
1558  };
1559  fuse_reply_statfs(req, &buf);
1560  }
1561 }
1562 
1563 static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1564 {
1565  struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1566  char *name = PARAM(arg);
1567  char *value = name + strlen(name) + 1;
1568 
1569  if (req->se->op.setxattr)
1570  req->se->op.setxattr(req, nodeid, name, value, arg->size,
1571  arg->flags);
1572  else
1573  fuse_reply_err(req, ENOSYS);
1574 }
1575 
1576 static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1577 {
1578  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1579 
1580  if (req->se->op.getxattr)
1581  req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1582  else
1583  fuse_reply_err(req, ENOSYS);
1584 }
1585 
1586 static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1587 {
1588  struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1589 
1590  if (req->se->op.listxattr)
1591  req->se->op.listxattr(req, nodeid, arg->size);
1592  else
1593  fuse_reply_err(req, ENOSYS);
1594 }
1595 
1596 static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1597 {
1598  char *name = (char *) inarg;
1599 
1600  if (req->se->op.removexattr)
1601  req->se->op.removexattr(req, nodeid, name);
1602  else
1603  fuse_reply_err(req, ENOSYS);
1604 }
1605 
1606 static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1607  struct flock *flock)
1608 {
1609  memset(flock, 0, sizeof(struct flock));
1610  flock->l_type = fl->type;
1611  flock->l_whence = SEEK_SET;
1612  flock->l_start = fl->start;
1613  if (fl->end == OFFSET_MAX)
1614  flock->l_len = 0;
1615  else
1616  flock->l_len = fl->end - fl->start + 1;
1617  flock->l_pid = fl->pid;
1618 }
1619 
1620 static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1621 {
1622  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1623  struct fuse_file_info fi;
1624  struct flock flock;
1625 
1626  memset(&fi, 0, sizeof(fi));
1627  fi.fh = arg->fh;
1628  fi.lock_owner = arg->owner;
1629 
1630  convert_fuse_file_lock(&arg->lk, &flock);
1631  if (req->se->op.getlk)
1632  req->se->op.getlk(req, nodeid, &fi, &flock);
1633  else
1634  fuse_reply_err(req, ENOSYS);
1635 }
1636 
1637 static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1638  const void *inarg, int sleep)
1639 {
1640  struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1641  struct fuse_file_info fi;
1642  struct flock flock;
1643 
1644  memset(&fi, 0, sizeof(fi));
1645  fi.fh = arg->fh;
1646  fi.lock_owner = arg->owner;
1647 
1648  if (arg->lk_flags & FUSE_LK_FLOCK) {
1649  int op = 0;
1650 
1651  switch (arg->lk.type) {
1652  case F_RDLCK:
1653  op = LOCK_SH;
1654  break;
1655  case F_WRLCK:
1656  op = LOCK_EX;
1657  break;
1658  case F_UNLCK:
1659  op = LOCK_UN;
1660  break;
1661  }
1662  if (!sleep)
1663  op |= LOCK_NB;
1664 
1665  if (req->se->op.flock)
1666  req->se->op.flock(req, nodeid, &fi, op);
1667  else
1668  fuse_reply_err(req, ENOSYS);
1669  } else {
1670  convert_fuse_file_lock(&arg->lk, &flock);
1671  if (req->se->op.setlk)
1672  req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1673  else
1674  fuse_reply_err(req, ENOSYS);
1675  }
1676 }
1677 
1678 static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1679 {
1680  do_setlk_common(req, nodeid, inarg, 0);
1681 }
1682 
1683 static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1684 {
1685  do_setlk_common(req, nodeid, inarg, 1);
1686 }
1687 
1688 static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1689 {
1690  struct fuse_req *curr;
1691 
1692  for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1693  if (curr->unique == req->u.i.unique) {
1694  fuse_interrupt_func_t func;
1695  void *data;
1696 
1697  curr->ctr++;
1698  pthread_mutex_unlock(&se->lock);
1699 
1700  /* Ugh, ugly locking */
1701  pthread_mutex_lock(&curr->lock);
1702  pthread_mutex_lock(&se->lock);
1703  curr->interrupted = 1;
1704  func = curr->u.ni.func;
1705  data = curr->u.ni.data;
1706  pthread_mutex_unlock(&se->lock);
1707  if (func)
1708  func(curr, data);
1709  pthread_mutex_unlock(&curr->lock);
1710 
1711  pthread_mutex_lock(&se->lock);
1712  curr->ctr--;
1713  if (!curr->ctr)
1714  destroy_req(curr);
1715 
1716  return 1;
1717  }
1718  }
1719  for (curr = se->interrupts.next; curr != &se->interrupts;
1720  curr = curr->next) {
1721  if (curr->u.i.unique == req->u.i.unique)
1722  return 1;
1723  }
1724  return 0;
1725 }
1726 
1727 static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1728 {
1729  struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1730  struct fuse_session *se = req->se;
1731 
1732  (void) nodeid;
1733  if (se->debug)
1734  fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1735  (unsigned long long) arg->unique);
1736 
1737  req->u.i.unique = arg->unique;
1738 
1739  pthread_mutex_lock(&se->lock);
1740  if (find_interrupted(se, req))
1741  destroy_req(req);
1742  else
1743  list_add_req(req, &se->interrupts);
1744  pthread_mutex_unlock(&se->lock);
1745 }
1746 
1747 static struct fuse_req *check_interrupt(struct fuse_session *se,
1748  struct fuse_req *req)
1749 {
1750  struct fuse_req *curr;
1751 
1752  for (curr = se->interrupts.next; curr != &se->interrupts;
1753  curr = curr->next) {
1754  if (curr->u.i.unique == req->unique) {
1755  req->interrupted = 1;
1756  list_del_req(curr);
1757  free(curr);
1758  return NULL;
1759  }
1760  }
1761  curr = se->interrupts.next;
1762  if (curr != &se->interrupts) {
1763  list_del_req(curr);
1764  list_init_req(curr);
1765  return curr;
1766  } else
1767  return NULL;
1768 }
1769 
1770 static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1771 {
1772  struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1773 
1774  if (req->se->op.bmap)
1775  req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1776  else
1777  fuse_reply_err(req, ENOSYS);
1778 }
1779 
1780 static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1781 {
1782  struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1783  unsigned int flags = arg->flags;
1784  void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1785  struct fuse_file_info fi;
1786 
1787  if (flags & FUSE_IOCTL_DIR &&
1788  !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
1789  fuse_reply_err(req, ENOTTY);
1790  return;
1791  }
1792 
1793  memset(&fi, 0, sizeof(fi));
1794  fi.fh = arg->fh;
1795 
1796  if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1797  !(flags & FUSE_IOCTL_32BIT)) {
1798  req->ioctl_64bit = 1;
1799  }
1800 
1801  if (req->se->op.ioctl)
1802  req->se->op.ioctl(req, nodeid, arg->cmd,
1803  (void *)(uintptr_t)arg->arg, &fi, flags,
1804  in_buf, arg->in_size, arg->out_size);
1805  else
1806  fuse_reply_err(req, ENOSYS);
1807 }
1808 
1809 void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1810 {
1811  free(ph);
1812 }
1813 
1814 static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1815 {
1816  struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1817  struct fuse_file_info fi;
1818 
1819  memset(&fi, 0, sizeof(fi));
1820  fi.fh = arg->fh;
1821  fi.poll_events = arg->events;
1822 
1823  if (req->se->op.poll) {
1824  struct fuse_pollhandle *ph = NULL;
1825 
1826  if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1827  ph = malloc(sizeof(struct fuse_pollhandle));
1828  if (ph == NULL) {
1829  fuse_reply_err(req, ENOMEM);
1830  return;
1831  }
1832  ph->kh = arg->kh;
1833  ph->se = req->se;
1834  }
1835 
1836  req->se->op.poll(req, nodeid, &fi, ph);
1837  } else {
1838  fuse_reply_err(req, ENOSYS);
1839  }
1840 }
1841 
1842 static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1843 {
1844  struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1845  struct fuse_file_info fi;
1846 
1847  memset(&fi, 0, sizeof(fi));
1848  fi.fh = arg->fh;
1849 
1850  if (req->se->op.fallocate)
1851  req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1852  else
1853  fuse_reply_err(req, ENOSYS);
1854 }
1855 
1856 static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1857 {
1858  struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1859  struct fuse_file_info fi_in, fi_out;
1860 
1861  memset(&fi_in, 0, sizeof(fi_in));
1862  fi_in.fh = arg->fh_in;
1863 
1864  memset(&fi_out, 0, sizeof(fi_out));
1865  fi_out.fh = arg->fh_out;
1866 
1867 
1868  if (req->se->op.copy_file_range)
1869  req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1870  &fi_in, arg->nodeid_out,
1871  arg->off_out, &fi_out, arg->len,
1872  arg->flags);
1873  else
1874  fuse_reply_err(req, ENOSYS);
1875 }
1876 
1877 static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1878 {
1879  struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
1880  struct fuse_file_info fi;
1881 
1882  memset(&fi, 0, sizeof(fi));
1883  fi.fh = arg->fh;
1884 
1885  if (req->se->op.lseek)
1886  req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1887  else
1888  fuse_reply_err(req, ENOSYS);
1889 }
1890 
1891 /* Prevent bogus data races (bogus since "init" is called before
1892  * multi-threading becomes relevant */
1893 static __attribute__((no_sanitize("thread")))
1894 void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1895 {
1896  struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
1897  struct fuse_init_out outarg;
1898  struct fuse_session *se = req->se;
1899  size_t bufsize = se->bufsize;
1900  size_t outargsize = sizeof(outarg);
1901 
1902  (void) nodeid;
1903  if (se->debug) {
1904  fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
1905  if (arg->major == 7 && arg->minor >= 6) {
1906  fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
1907  fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
1908  arg->max_readahead);
1909  }
1910  }
1911  se->conn.proto_major = arg->major;
1912  se->conn.proto_minor = arg->minor;
1913  se->conn.capable = 0;
1914  se->conn.want = 0;
1915 
1916  memset(&outarg, 0, sizeof(outarg));
1917  outarg.major = FUSE_KERNEL_VERSION;
1918  outarg.minor = FUSE_KERNEL_MINOR_VERSION;
1919 
1920  if (arg->major < 7) {
1921  fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
1922  arg->major, arg->minor);
1923  fuse_reply_err(req, EPROTO);
1924  return;
1925  }
1926 
1927  if (arg->major > 7) {
1928  /* Wait for a second INIT request with a 7.X version */
1929  send_reply_ok(req, &outarg, sizeof(outarg));
1930  return;
1931  }
1932 
1933  if (arg->minor >= 6) {
1934  if (arg->max_readahead < se->conn.max_readahead)
1935  se->conn.max_readahead = arg->max_readahead;
1936  if (arg->flags & FUSE_ASYNC_READ)
1937  se->conn.capable |= FUSE_CAP_ASYNC_READ;
1938  if (arg->flags & FUSE_POSIX_LOCKS)
1939  se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
1940  if (arg->flags & FUSE_ATOMIC_O_TRUNC)
1941  se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
1942  if (arg->flags & FUSE_EXPORT_SUPPORT)
1943  se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
1944  if (arg->flags & FUSE_DONT_MASK)
1945  se->conn.capable |= FUSE_CAP_DONT_MASK;
1946  if (arg->flags & FUSE_FLOCK_LOCKS)
1947  se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
1948  if (arg->flags & FUSE_AUTO_INVAL_DATA)
1949  se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
1950  if (arg->flags & FUSE_DO_READDIRPLUS)
1951  se->conn.capable |= FUSE_CAP_READDIRPLUS;
1952  if (arg->flags & FUSE_READDIRPLUS_AUTO)
1953  se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
1954  if (arg->flags & FUSE_ASYNC_DIO)
1955  se->conn.capable |= FUSE_CAP_ASYNC_DIO;
1956  if (arg->flags & FUSE_WRITEBACK_CACHE)
1957  se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
1958  if (arg->flags & FUSE_NO_OPEN_SUPPORT)
1959  se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
1960  if (arg->flags & FUSE_PARALLEL_DIROPS)
1961  se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
1962  if (arg->flags & FUSE_POSIX_ACL)
1963  se->conn.capable |= FUSE_CAP_POSIX_ACL;
1964  if (arg->flags & FUSE_HANDLE_KILLPRIV)
1965  se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
1966  if (arg->flags & FUSE_CACHE_SYMLINKS)
1967  se->conn.capable |= FUSE_CAP_CACHE_SYMLINKS;
1968  if (arg->flags & FUSE_NO_OPENDIR_SUPPORT)
1969  se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
1970  if (arg->flags & FUSE_EXPLICIT_INVAL_DATA)
1971  se->conn.capable |= FUSE_CAP_EXPLICIT_INVAL_DATA;
1972  if (!(arg->flags & FUSE_MAX_PAGES)) {
1973  size_t max_bufsize =
1974  FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
1975  + FUSE_BUFFER_HEADER_SIZE;
1976  if (bufsize > max_bufsize) {
1977  bufsize = max_bufsize;
1978  }
1979  }
1980  } else {
1981  se->conn.max_readahead = 0;
1982  }
1983 
1984  if (se->conn.proto_minor >= 14) {
1985 #ifdef HAVE_SPLICE
1986 #ifdef HAVE_VMSPLICE
1987  se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
1988 #endif
1989  se->conn.capable |= FUSE_CAP_SPLICE_READ;
1990 #endif
1991  }
1992  if (se->conn.proto_minor >= 18)
1993  se->conn.capable |= FUSE_CAP_IOCTL_DIR;
1994 
1995  /* Default settings for modern filesystems.
1996  *
1997  * Most of these capabilities were disabled by default in
1998  * libfuse2 for backwards compatibility reasons. In libfuse3,
1999  * we can finally enable them by default (as long as they're
2000  * supported by the kernel).
2001  */
2002 #define LL_SET_DEFAULT(cond, cap) \
2003  if ((cond) && (se->conn.capable & (cap))) \
2004  se->conn.want |= (cap)
2005  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2006  LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS);
2007  LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2008  LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV);
2009  LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2010  LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2011  LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2012  LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2013  LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2015  LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2016  LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2017  LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2019  se->conn.time_gran = 1;
2020 
2021  if (bufsize < FUSE_MIN_READ_BUFFER) {
2022  fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
2023  bufsize);
2024  bufsize = FUSE_MIN_READ_BUFFER;
2025  }
2026  se->bufsize = bufsize;
2027 
2028  if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE)
2029  se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2030 
2031  se->got_init = 1;
2032  if (se->op.init)
2033  se->op.init(se->userdata, &se->conn);
2034 
2035  if (se->conn.want & (~se->conn.capable)) {
2036  fuse_log(FUSE_LOG_ERR, "fuse: error: filesystem requested capabilities "
2037  "0x%x that are not supported by kernel, aborting.\n",
2038  se->conn.want & (~se->conn.capable));
2039  fuse_reply_err(req, EPROTO);
2040  se->error = -EPROTO;
2041  fuse_session_exit(se);
2042  return;
2043  }
2044 
2045  unsigned max_read_mo = get_max_read(se->mo);
2046  if (se->conn.max_read != max_read_mo) {
2047  fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2048  "requested different maximum read size (%u vs %u)\n",
2049  se->conn.max_read, max_read_mo);
2050  fuse_reply_err(req, EPROTO);
2051  se->error = -EPROTO;
2052  fuse_session_exit(se);
2053  return;
2054  }
2055 
2056  if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2057  se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2058  }
2059  if (arg->flags & FUSE_MAX_PAGES) {
2060  outarg.flags |= FUSE_MAX_PAGES;
2061  outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2062  }
2063 
2064  /* Always enable big writes, this is superseded
2065  by the max_write option */
2066  outarg.flags |= FUSE_BIG_WRITES;
2067 
2068  if (se->conn.want & FUSE_CAP_ASYNC_READ)
2069  outarg.flags |= FUSE_ASYNC_READ;
2070  if (se->conn.want & FUSE_CAP_POSIX_LOCKS)
2071  outarg.flags |= FUSE_POSIX_LOCKS;
2072  if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC)
2073  outarg.flags |= FUSE_ATOMIC_O_TRUNC;
2074  if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT)
2075  outarg.flags |= FUSE_EXPORT_SUPPORT;
2076  if (se->conn.want & FUSE_CAP_DONT_MASK)
2077  outarg.flags |= FUSE_DONT_MASK;
2078  if (se->conn.want & FUSE_CAP_FLOCK_LOCKS)
2079  outarg.flags |= FUSE_FLOCK_LOCKS;
2080  if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA)
2081  outarg.flags |= FUSE_AUTO_INVAL_DATA;
2082  if (se->conn.want & FUSE_CAP_READDIRPLUS)
2083  outarg.flags |= FUSE_DO_READDIRPLUS;
2084  if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO)
2085  outarg.flags |= FUSE_READDIRPLUS_AUTO;
2086  if (se->conn.want & FUSE_CAP_ASYNC_DIO)
2087  outarg.flags |= FUSE_ASYNC_DIO;
2088  if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE)
2089  outarg.flags |= FUSE_WRITEBACK_CACHE;
2090  if (se->conn.want & FUSE_CAP_POSIX_ACL)
2091  outarg.flags |= FUSE_POSIX_ACL;
2092  if (se->conn.want & FUSE_CAP_CACHE_SYMLINKS)
2093  outarg.flags |= FUSE_CACHE_SYMLINKS;
2094  if (se->conn.want & FUSE_CAP_EXPLICIT_INVAL_DATA)
2095  outarg.flags |= FUSE_EXPLICIT_INVAL_DATA;
2096  outarg.max_readahead = se->conn.max_readahead;
2097  outarg.max_write = se->conn.max_write;
2098  if (se->conn.proto_minor >= 13) {
2099  if (se->conn.max_background >= (1 << 16))
2100  se->conn.max_background = (1 << 16) - 1;
2101  if (se->conn.congestion_threshold > se->conn.max_background)
2102  se->conn.congestion_threshold = se->conn.max_background;
2103  if (!se->conn.congestion_threshold) {
2104  se->conn.congestion_threshold =
2105  se->conn.max_background * 3 / 4;
2106  }
2107 
2108  outarg.max_background = se->conn.max_background;
2109  outarg.congestion_threshold = se->conn.congestion_threshold;
2110  }
2111  if (se->conn.proto_minor >= 23)
2112  outarg.time_gran = se->conn.time_gran;
2113 
2114  if (se->debug) {
2115  fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2116  fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2117  fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2118  outarg.max_readahead);
2119  fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2120  fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2121  outarg.max_background);
2122  fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2123  outarg.congestion_threshold);
2124  fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2125  outarg.time_gran);
2126  }
2127  if (arg->minor < 5)
2128  outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2129  else if (arg->minor < 23)
2130  outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2131 
2132  send_reply_ok(req, &outarg, outargsize);
2133 }
2134 
2135 static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2136 {
2137  struct fuse_session *se = req->se;
2138 
2139  (void) nodeid;
2140  (void) inarg;
2141 
2142  se->got_destroy = 1;
2143  if (se->op.destroy)
2144  se->op.destroy(se->userdata);
2145 
2146  send_reply_ok(req, NULL, 0);
2147 }
2148 
2149 static void list_del_nreq(struct fuse_notify_req *nreq)
2150 {
2151  struct fuse_notify_req *prev = nreq->prev;
2152  struct fuse_notify_req *next = nreq->next;
2153  prev->next = next;
2154  next->prev = prev;
2155 }
2156 
2157 static void list_add_nreq(struct fuse_notify_req *nreq,
2158  struct fuse_notify_req *next)
2159 {
2160  struct fuse_notify_req *prev = next->prev;
2161  nreq->next = next;
2162  nreq->prev = prev;
2163  prev->next = nreq;
2164  next->prev = nreq;
2165 }
2166 
2167 static void list_init_nreq(struct fuse_notify_req *nreq)
2168 {
2169  nreq->next = nreq;
2170  nreq->prev = nreq;
2171 }
2172 
2173 static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2174  const void *inarg, const struct fuse_buf *buf)
2175 {
2176  struct fuse_session *se = req->se;
2177  struct fuse_notify_req *nreq;
2178  struct fuse_notify_req *head;
2179 
2180  pthread_mutex_lock(&se->lock);
2181  head = &se->notify_list;
2182  for (nreq = head->next; nreq != head; nreq = nreq->next) {
2183  if (nreq->unique == req->unique) {
2184  list_del_nreq(nreq);
2185  break;
2186  }
2187  }
2188  pthread_mutex_unlock(&se->lock);
2189 
2190  if (nreq != head)
2191  nreq->reply(nreq, req, nodeid, inarg, buf);
2192 }
2193 
2194 static int send_notify_iov(struct fuse_session *se, int notify_code,
2195  struct iovec *iov, int count)
2196 {
2197  struct fuse_out_header out;
2198 
2199  if (!se->got_init)
2200  return -ENOTCONN;
2201 
2202  out.unique = 0;
2203  out.error = notify_code;
2204  iov[0].iov_base = &out;
2205  iov[0].iov_len = sizeof(struct fuse_out_header);
2206 
2207  return fuse_send_msg(se, NULL, iov, count);
2208 }
2209 
2210 int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2211 {
2212  if (ph != NULL) {
2213  struct fuse_notify_poll_wakeup_out outarg;
2214  struct iovec iov[2];
2215 
2216  outarg.kh = ph->kh;
2217 
2218  iov[1].iov_base = &outarg;
2219  iov[1].iov_len = sizeof(outarg);
2220 
2221  return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2222  } else {
2223  return 0;
2224  }
2225 }
2226 
2227 int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2228  off_t off, off_t len)
2229 {
2230  struct fuse_notify_inval_inode_out outarg;
2231  struct iovec iov[2];
2232 
2233  if (!se)
2234  return -EINVAL;
2235 
2236  if (se->conn.proto_minor < 12)
2237  return -ENOSYS;
2238 
2239  outarg.ino = ino;
2240  outarg.off = off;
2241  outarg.len = len;
2242 
2243  iov[1].iov_base = &outarg;
2244  iov[1].iov_len = sizeof(outarg);
2245 
2246  return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2247 }
2248 
2249 int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2250  const char *name, size_t namelen)
2251 {
2252  struct fuse_notify_inval_entry_out outarg;
2253  struct iovec iov[3];
2254 
2255  if (!se)
2256  return -EINVAL;
2257 
2258  if (se->conn.proto_minor < 12)
2259  return -ENOSYS;
2260 
2261  outarg.parent = parent;
2262  outarg.namelen = namelen;
2263  outarg.padding = 0;
2264 
2265  iov[1].iov_base = &outarg;
2266  iov[1].iov_len = sizeof(outarg);
2267  iov[2].iov_base = (void *)name;
2268  iov[2].iov_len = namelen + 1;
2269 
2270  return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2271 }
2272 
2273 int fuse_lowlevel_notify_delete(struct fuse_session *se,
2274  fuse_ino_t parent, fuse_ino_t child,
2275  const char *name, size_t namelen)
2276 {
2277  struct fuse_notify_delete_out outarg;
2278  struct iovec iov[3];
2279 
2280  if (!se)
2281  return -EINVAL;
2282 
2283  if (se->conn.proto_minor < 18)
2284  return -ENOSYS;
2285 
2286  outarg.parent = parent;
2287  outarg.child = child;
2288  outarg.namelen = namelen;
2289  outarg.padding = 0;
2290 
2291  iov[1].iov_base = &outarg;
2292  iov[1].iov_len = sizeof(outarg);
2293  iov[2].iov_base = (void *)name;
2294  iov[2].iov_len = namelen + 1;
2295 
2296  return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2297 }
2298 
2299 int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2300  off_t offset, struct fuse_bufvec *bufv,
2301  enum fuse_buf_copy_flags flags)
2302 {
2303  struct fuse_out_header out;
2304  struct fuse_notify_store_out outarg;
2305  struct iovec iov[3];
2306  size_t size = fuse_buf_size(bufv);
2307  int res;
2308 
2309  if (!se)
2310  return -EINVAL;
2311 
2312  if (se->conn.proto_minor < 15)
2313  return -ENOSYS;
2314 
2315  out.unique = 0;
2316  out.error = FUSE_NOTIFY_STORE;
2317 
2318  outarg.nodeid = ino;
2319  outarg.offset = offset;
2320  outarg.size = size;
2321  outarg.padding = 0;
2322 
2323  iov[0].iov_base = &out;
2324  iov[0].iov_len = sizeof(out);
2325  iov[1].iov_base = &outarg;
2326  iov[1].iov_len = sizeof(outarg);
2327 
2328  res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2329  if (res > 0)
2330  res = -res;
2331 
2332  return res;
2333 }
2334 
2335 struct fuse_retrieve_req {
2336  struct fuse_notify_req nreq;
2337  void *cookie;
2338 };
2339 
2340 static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2341  fuse_req_t req, fuse_ino_t ino,
2342  const void *inarg,
2343  const struct fuse_buf *ibuf)
2344 {
2345  struct fuse_session *se = req->se;
2346  struct fuse_retrieve_req *rreq =
2347  container_of(nreq, struct fuse_retrieve_req, nreq);
2348  const struct fuse_notify_retrieve_in *arg = inarg;
2349  struct fuse_bufvec bufv = {
2350  .buf[0] = *ibuf,
2351  .count = 1,
2352  };
2353 
2354  if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2355  bufv.buf[0].mem = PARAM(arg);
2356 
2357  bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2358  sizeof(struct fuse_notify_retrieve_in);
2359 
2360  if (bufv.buf[0].size < arg->size) {
2361  fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2362  fuse_reply_none(req);
2363  goto out;
2364  }
2365  bufv.buf[0].size = arg->size;
2366 
2367  if (se->op.retrieve_reply) {
2368  se->op.retrieve_reply(req, rreq->cookie, ino,
2369  arg->offset, &bufv);
2370  } else {
2371  fuse_reply_none(req);
2372  }
2373 out:
2374  free(rreq);
2375  if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2376  fuse_ll_clear_pipe(se);
2377 }
2378 
2379 int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2380  size_t size, off_t offset, void *cookie)
2381 {
2382  struct fuse_notify_retrieve_out outarg;
2383  struct iovec iov[2];
2384  struct fuse_retrieve_req *rreq;
2385  int err;
2386 
2387  if (!se)
2388  return -EINVAL;
2389 
2390  if (se->conn.proto_minor < 15)
2391  return -ENOSYS;
2392 
2393  rreq = malloc(sizeof(*rreq));
2394  if (rreq == NULL)
2395  return -ENOMEM;
2396 
2397  pthread_mutex_lock(&se->lock);
2398  rreq->cookie = cookie;
2399  rreq->nreq.unique = se->notify_ctr++;
2400  rreq->nreq.reply = fuse_ll_retrieve_reply;
2401  list_add_nreq(&rreq->nreq, &se->notify_list);
2402  pthread_mutex_unlock(&se->lock);
2403 
2404  outarg.notify_unique = rreq->nreq.unique;
2405  outarg.nodeid = ino;
2406  outarg.offset = offset;
2407  outarg.size = size;
2408  outarg.padding = 0;
2409 
2410  iov[1].iov_base = &outarg;
2411  iov[1].iov_len = sizeof(outarg);
2412 
2413  err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2414  if (err) {
2415  pthread_mutex_lock(&se->lock);
2416  list_del_nreq(&rreq->nreq);
2417  pthread_mutex_unlock(&se->lock);
2418  free(rreq);
2419  }
2420 
2421  return err;
2422 }
2423 
2424 void *fuse_req_userdata(fuse_req_t req)
2425 {
2426  return req->se->userdata;
2427 }
2428 
2429 const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
2430 {
2431  return &req->ctx;
2432 }
2433 
2435  void *data)
2436 {
2437  pthread_mutex_lock(&req->lock);
2438  pthread_mutex_lock(&req->se->lock);
2439  req->u.ni.func = func;
2440  req->u.ni.data = data;
2441  pthread_mutex_unlock(&req->se->lock);
2442  if (req->interrupted && func)
2443  func(req, data);
2444  pthread_mutex_unlock(&req->lock);
2445 }
2446 
2448 {
2449  int interrupted;
2450 
2451  pthread_mutex_lock(&req->se->lock);
2452  interrupted = req->interrupted;
2453  pthread_mutex_unlock(&req->se->lock);
2454 
2455  return interrupted;
2456 }
2457 
2458 static struct {
2459  void (*func)(fuse_req_t, fuse_ino_t, const void *);
2460  const char *name;
2461 } fuse_ll_ops[] = {
2462  [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2463  [FUSE_FORGET] = { do_forget, "FORGET" },
2464  [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2465  [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2466  [FUSE_READLINK] = { do_readlink, "READLINK" },
2467  [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2468  [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2469  [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2470  [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2471  [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2472  [FUSE_RENAME] = { do_rename, "RENAME" },
2473  [FUSE_LINK] = { do_link, "LINK" },
2474  [FUSE_OPEN] = { do_open, "OPEN" },
2475  [FUSE_READ] = { do_read, "READ" },
2476  [FUSE_WRITE] = { do_write, "WRITE" },
2477  [FUSE_STATFS] = { do_statfs, "STATFS" },
2478  [FUSE_RELEASE] = { do_release, "RELEASE" },
2479  [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2480  [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2481  [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2482  [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2483  [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2484  [FUSE_FLUSH] = { do_flush, "FLUSH" },
2485  [FUSE_INIT] = { do_init, "INIT" },
2486  [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2487  [FUSE_READDIR] = { do_readdir, "READDIR" },
2488  [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2489  [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2490  [FUSE_GETLK] = { do_getlk, "GETLK" },
2491  [FUSE_SETLK] = { do_setlk, "SETLK" },
2492  [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2493  [FUSE_ACCESS] = { do_access, "ACCESS" },
2494  [FUSE_CREATE] = { do_create, "CREATE" },
2495  [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2496  [FUSE_BMAP] = { do_bmap, "BMAP" },
2497  [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2498  [FUSE_POLL] = { do_poll, "POLL" },
2499  [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2500  [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2501  [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2502  [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2503  [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2504  [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2505  [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2506  [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2507  [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2508 };
2509 
2510 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2511 
2512 static const char *opname(enum fuse_opcode opcode)
2513 {
2514  if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2515  return "???";
2516  else
2517  return fuse_ll_ops[opcode].name;
2518 }
2519 
2520 static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2521  struct fuse_bufvec *src)
2522 {
2523  ssize_t res = fuse_buf_copy(dst, src, 0);
2524  if (res < 0) {
2525  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2526  return res;
2527  }
2528  if ((size_t)res < fuse_buf_size(dst)) {
2529  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2530  return -1;
2531  }
2532  return 0;
2533 }
2534 
2535 void fuse_session_process_buf(struct fuse_session *se,
2536  const struct fuse_buf *buf)
2537 {
2538  fuse_session_process_buf_int(se, buf, NULL);
2539 }
2540 
2541 void fuse_session_process_buf_int(struct fuse_session *se,
2542  const struct fuse_buf *buf, struct fuse_chan *ch)
2543 {
2544  const size_t write_header_size = sizeof(struct fuse_in_header) +
2545  sizeof(struct fuse_write_in);
2546  struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2547  struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2548  struct fuse_in_header *in;
2549  const void *inarg;
2550  struct fuse_req *req;
2551  void *mbuf = NULL;
2552  int err;
2553  int res;
2554 
2555  if (buf->flags & FUSE_BUF_IS_FD) {
2556  if (buf->size < tmpbuf.buf[0].size)
2557  tmpbuf.buf[0].size = buf->size;
2558 
2559  mbuf = malloc(tmpbuf.buf[0].size);
2560  if (mbuf == NULL) {
2561  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2562  goto clear_pipe;
2563  }
2564  tmpbuf.buf[0].mem = mbuf;
2565 
2566  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2567  if (res < 0)
2568  goto clear_pipe;
2569 
2570  in = mbuf;
2571  } else {
2572  in = buf->mem;
2573  }
2574 
2575  if (se->debug) {
2576  fuse_log(FUSE_LOG_DEBUG,
2577  "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2578  (unsigned long long) in->unique,
2579  opname((enum fuse_opcode) in->opcode), in->opcode,
2580  (unsigned long long) in->nodeid, buf->size, in->pid);
2581  }
2582 
2583  req = fuse_ll_alloc_req(se);
2584  if (req == NULL) {
2585  struct fuse_out_header out = {
2586  .unique = in->unique,
2587  .error = -ENOMEM,
2588  };
2589  struct iovec iov = {
2590  .iov_base = &out,
2591  .iov_len = sizeof(struct fuse_out_header),
2592  };
2593 
2594  fuse_send_msg(se, ch, &iov, 1);
2595  goto clear_pipe;
2596  }
2597 
2598  req->unique = in->unique;
2599  req->ctx.uid = in->uid;
2600  req->ctx.gid = in->gid;
2601  req->ctx.pid = in->pid;
2602  req->ch = ch ? fuse_chan_get(ch) : NULL;
2603 
2604  err = EIO;
2605  if (!se->got_init) {
2606  enum fuse_opcode expected;
2607 
2608  expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2609  if (in->opcode != expected)
2610  goto reply_err;
2611  } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2612  goto reply_err;
2613 
2614  err = EACCES;
2615  /* Implement -o allow_root */
2616  if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2617  in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2618  in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2619  in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2620  in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2621  in->opcode != FUSE_NOTIFY_REPLY &&
2622  in->opcode != FUSE_READDIRPLUS)
2623  goto reply_err;
2624 
2625  err = ENOSYS;
2626  if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2627  goto reply_err;
2628  if (in->opcode != FUSE_INTERRUPT) {
2629  struct fuse_req *intr;
2630  pthread_mutex_lock(&se->lock);
2631  intr = check_interrupt(se, req);
2632  list_add_req(req, &se->list);
2633  pthread_mutex_unlock(&se->lock);
2634  if (intr)
2635  fuse_reply_err(intr, EAGAIN);
2636  }
2637 
2638  if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2639  (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2640  in->opcode != FUSE_NOTIFY_REPLY) {
2641  void *newmbuf;
2642 
2643  err = ENOMEM;
2644  newmbuf = realloc(mbuf, buf->size);
2645  if (newmbuf == NULL)
2646  goto reply_err;
2647  mbuf = newmbuf;
2648 
2649  tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2650  tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2651 
2652  res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2653  err = -res;
2654  if (res < 0)
2655  goto reply_err;
2656 
2657  in = mbuf;
2658  }
2659 
2660  inarg = (void *) &in[1];
2661  if (in->opcode == FUSE_WRITE && se->op.write_buf)
2662  do_write_buf(req, in->nodeid, inarg, buf);
2663  else if (in->opcode == FUSE_NOTIFY_REPLY)
2664  do_notify_reply(req, in->nodeid, inarg, buf);
2665  else
2666  fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2667 
2668 out_free:
2669  free(mbuf);
2670  return;
2671 
2672 reply_err:
2673  fuse_reply_err(req, err);
2674 clear_pipe:
2675  if (buf->flags & FUSE_BUF_IS_FD)
2676  fuse_ll_clear_pipe(se);
2677  goto out_free;
2678 }
2679 
2680 #define LL_OPTION(n,o,v) \
2681  { n, offsetof(struct fuse_session, o), v }
2682 
2683 static const struct fuse_opt fuse_ll_opts[] = {
2684  LL_OPTION("debug", debug, 1),
2685  LL_OPTION("-d", debug, 1),
2686  LL_OPTION("--debug", debug, 1),
2687  LL_OPTION("allow_root", deny_others, 1),
2688  FUSE_OPT_END
2689 };
2690 
2691 void fuse_lowlevel_version(void)
2692 {
2693  printf("using FUSE kernel interface version %i.%i\n",
2694  FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2695  fuse_mount_version();
2696 }
2697 
2698 void fuse_lowlevel_help(void)
2699 {
2700  /* These are not all options, but the ones that are
2701  potentially of interest to an end-user */
2702  printf(
2703 " -o allow_other allow access by all users\n"
2704 " -o allow_root allow access by root\n"
2705 " -o auto_unmount auto unmount on process termination\n");
2706 }
2707 
2708 void fuse_session_destroy(struct fuse_session *se)
2709 {
2710  struct fuse_ll_pipe *llp;
2711 
2712  if (se->got_init && !se->got_destroy) {
2713  if (se->op.destroy)
2714  se->op.destroy(se->userdata);
2715  }
2716  llp = pthread_getspecific(se->pipe_key);
2717  if (llp != NULL)
2718  fuse_ll_pipe_free(llp);
2719  pthread_key_delete(se->pipe_key);
2720  pthread_mutex_destroy(&se->lock);
2721  free(se->cuse_data);
2722  if (se->fd != -1)
2723  close(se->fd);
2724  destroy_mount_opts(se->mo);
2725  free(se);
2726 }
2727 
2728 
2729 static void fuse_ll_pipe_destructor(void *data)
2730 {
2731  struct fuse_ll_pipe *llp = data;
2732  fuse_ll_pipe_free(llp);
2733 }
2734 
2735 int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
2736 {
2737  return fuse_session_receive_buf_int(se, buf, NULL);
2738 }
2739 
2740 int fuse_session_receive_buf_int(struct fuse_session *se, struct fuse_buf *buf,
2741  struct fuse_chan *ch)
2742 {
2743  int err;
2744  ssize_t res;
2745 #ifdef HAVE_SPLICE
2746  size_t bufsize = se->bufsize;
2747  struct fuse_ll_pipe *llp;
2748  struct fuse_buf tmpbuf;
2749 
2750  if (se->conn.proto_minor < 14 || !(se->conn.want & FUSE_CAP_SPLICE_READ))
2751  goto fallback;
2752 
2753  llp = fuse_ll_get_pipe(se);
2754  if (llp == NULL)
2755  goto fallback;
2756 
2757  if (llp->size < bufsize) {
2758  if (llp->can_grow) {
2759  res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
2760  if (res == -1) {
2761  llp->can_grow = 0;
2762  res = grow_pipe_to_max(llp->pipe[0]);
2763  if (res > 0)
2764  llp->size = res;
2765  goto fallback;
2766  }
2767  llp->size = res;
2768  }
2769  if (llp->size < bufsize)
2770  goto fallback;
2771  }
2772 
2773  res = splice(ch ? ch->fd : se->fd,
2774  NULL, llp->pipe[1], NULL, bufsize, 0);
2775  err = errno;
2776 
2777  if (fuse_session_exited(se))
2778  return 0;
2779 
2780  if (res == -1) {
2781  if (err == ENODEV) {
2782  /* Filesystem was unmounted, or connection was aborted
2783  via /sys/fs/fuse/connections */
2784  fuse_session_exit(se);
2785  return 0;
2786  }
2787  if (err != EINTR && err != EAGAIN)
2788  perror("fuse: splice from device");
2789  return -err;
2790  }
2791 
2792  if (res < sizeof(struct fuse_in_header)) {
2793  fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
2794  return -EIO;
2795  }
2796 
2797  tmpbuf = (struct fuse_buf) {
2798  .size = res,
2799  .flags = FUSE_BUF_IS_FD,
2800  .fd = llp->pipe[0],
2801  };
2802 
2803  /*
2804  * Don't bother with zero copy for small requests.
2805  * fuse_loop_mt() needs to check for FORGET so this more than
2806  * just an optimization.
2807  */
2808  if (res < sizeof(struct fuse_in_header) +
2809  sizeof(struct fuse_write_in) + pagesize) {
2810  struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
2811  struct fuse_bufvec dst = { .count = 1 };
2812 
2813  if (!buf->mem) {
2814  buf->mem = malloc(se->bufsize);
2815  if (!buf->mem) {
2816  fuse_log(FUSE_LOG_ERR,
2817  "fuse: failed to allocate read buffer\n");
2818  return -ENOMEM;
2819  }
2820  }
2821  buf->size = se->bufsize;
2822  buf->flags = 0;
2823  dst.buf[0] = *buf;
2824 
2825  res = fuse_buf_copy(&dst, &src, 0);
2826  if (res < 0) {
2827  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
2828  strerror(-res));
2829  fuse_ll_clear_pipe(se);
2830  return res;
2831  }
2832  if (res < tmpbuf.size) {
2833  fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2834  fuse_ll_clear_pipe(se);
2835  return -EIO;
2836  }
2837  assert(res == tmpbuf.size);
2838 
2839  } else {
2840  /* Don't overwrite buf->mem, as that would cause a leak */
2841  buf->fd = tmpbuf.fd;
2842  buf->flags = tmpbuf.flags;
2843  }
2844  buf->size = tmpbuf.size;
2845 
2846  return res;
2847 
2848 fallback:
2849 #endif
2850  if (!buf->mem) {
2851  buf->mem = malloc(se->bufsize);
2852  if (!buf->mem) {
2853  fuse_log(FUSE_LOG_ERR,
2854  "fuse: failed to allocate read buffer\n");
2855  return -ENOMEM;
2856  }
2857  }
2858 
2859 restart:
2860  res = read(ch ? ch->fd : se->fd, buf->mem, se->bufsize);
2861  err = errno;
2862 
2863  if (fuse_session_exited(se))
2864  return 0;
2865  if (res == -1) {
2866  /* ENOENT means the operation was interrupted, it's safe
2867  to restart */
2868  if (err == ENOENT)
2869  goto restart;
2870 
2871  if (err == ENODEV) {
2872  /* Filesystem was unmounted, or connection was aborted
2873  via /sys/fs/fuse/connections */
2874  fuse_session_exit(se);
2875  return 0;
2876  }
2877  /* Errors occurring during normal operation: EINTR (read
2878  interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
2879  umounted) */
2880  if (err != EINTR && err != EAGAIN)
2881  perror("fuse: reading device");
2882  return -err;
2883  }
2884  if ((size_t) res < sizeof(struct fuse_in_header)) {
2885  fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
2886  return -EIO;
2887  }
2888 
2889  buf->size = res;
2890 
2891  return res;
2892 }
2893 
2894 struct fuse_session *fuse_session_new(struct fuse_args *args,
2895  const struct fuse_lowlevel_ops *op,
2896  size_t op_size, void *userdata)
2897 {
2898  int err;
2899  struct fuse_session *se;
2900  struct mount_opts *mo;
2901 
2902  if (sizeof(struct fuse_lowlevel_ops) < op_size) {
2903  fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
2904  op_size = sizeof(struct fuse_lowlevel_ops);
2905  }
2906 
2907  if (args->argc == 0) {
2908  fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
2909  return NULL;
2910  }
2911 
2912  se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
2913  if (se == NULL) {
2914  fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
2915  goto out1;
2916  }
2917  se->fd = -1;
2918  se->conn.max_write = UINT_MAX;
2919  se->conn.max_readahead = UINT_MAX;
2920 
2921  /* Parse options */
2922  if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
2923  goto out2;
2924  if(se->deny_others) {
2925  /* Allowing access only by root is done by instructing
2926  * kernel to allow access by everyone, and then restricting
2927  * access to root and mountpoint owner in libfuse.
2928  */
2929  // We may be adding the option a second time, but
2930  // that doesn't hurt.
2931  if(fuse_opt_add_arg(args, "-oallow_other") == -1)
2932  goto out2;
2933  }
2934  mo = parse_mount_opts(args);
2935  if (mo == NULL)
2936  goto out3;
2937 
2938  if(args->argc == 1 &&
2939  args->argv[0][0] == '-') {
2940  fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
2941  "will be ignored\n");
2942  } else if (args->argc != 1) {
2943  int i;
2944  fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
2945  for(i = 1; i < args->argc-1; i++)
2946  fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
2947  fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
2948  goto out4;
2949  }
2950 
2951  if (se->debug)
2952  fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
2953 
2954  se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() +
2955  FUSE_BUFFER_HEADER_SIZE;
2956 
2957  list_init_req(&se->list);
2958  list_init_req(&se->interrupts);
2959  list_init_nreq(&se->notify_list);
2960  se->notify_ctr = 1;
2961  pthread_mutex_init(&se->lock, NULL);
2962 
2963  err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
2964  if (err) {
2965  fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
2966  strerror(err));
2967  goto out5;
2968  }
2969 
2970  memcpy(&se->op, op, op_size);
2971  se->owner = getuid();
2972  se->userdata = userdata;
2973 
2974  se->mo = mo;
2975  return se;
2976 
2977 out5:
2978  pthread_mutex_destroy(&se->lock);
2979 out4:
2980  fuse_opt_free_args(args);
2981 out3:
2982  if (mo != NULL)
2983  destroy_mount_opts(mo);
2984 out2:
2985  free(se);
2986 out1:
2987  return NULL;
2988 }
2989 
2990 int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
2991 {
2992  int fd;
2993 
2994  /*
2995  * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
2996  * would ensue.
2997  */
2998  do {
2999  fd = open("/dev/null", O_RDWR);
3000  if (fd > 2)
3001  close(fd);
3002  } while (fd >= 0 && fd <= 2);
3003 
3004  /*
3005  * To allow FUSE daemons to run without privileges, the caller may open
3006  * /dev/fuse before launching the file system and pass on the file
3007  * descriptor by specifying /dev/fd/N as the mount point. Note that the
3008  * parent process takes care of performing the mount in this case.
3009  */
3010  fd = fuse_mnt_parse_fuse_fd(mountpoint);
3011  if (fd != -1) {
3012  if (fcntl(fd, F_GETFD) == -1) {
3013  fuse_log(FUSE_LOG_ERR,
3014  "fuse: Invalid file descriptor /dev/fd/%u\n",
3015  fd);
3016  return -1;
3017  }
3018  se->fd = fd;
3019  return 0;
3020  }
3021 
3022  /* Open channel */
3023  fd = fuse_kern_mount(mountpoint, se->mo);
3024  if (fd == -1)
3025  return -1;
3026  se->fd = fd;
3027 
3028  /* Save mountpoint */
3029  se->mountpoint = strdup(mountpoint);
3030  if (se->mountpoint == NULL)
3031  goto error_out;
3032 
3033  return 0;
3034 
3035 error_out:
3036  fuse_kern_unmount(mountpoint, fd);
3037  return -1;
3038 }
3039 
3040 int fuse_session_fd(struct fuse_session *se)
3041 {
3042  return se->fd;
3043 }
3044 
3045 void fuse_session_unmount(struct fuse_session *se)
3046 {
3047  if (se->mountpoint != NULL) {
3048  fuse_kern_unmount(se->mountpoint, se->fd);
3049  se->fd = -1;
3050  free(se->mountpoint);
3051  se->mountpoint = NULL;
3052  }
3053 }
3054 
3055 #ifdef linux
3056 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3057 {
3058  char *buf;
3059  size_t bufsize = 1024;
3060  char path[128];
3061  int ret;
3062  int fd;
3063  unsigned long pid = req->ctx.pid;
3064  char *s;
3065 
3066  sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3067 
3068 retry:
3069  buf = malloc(bufsize);
3070  if (buf == NULL)
3071  return -ENOMEM;
3072 
3073  ret = -EIO;
3074  fd = open(path, O_RDONLY);
3075  if (fd == -1)
3076  goto out_free;
3077 
3078  ret = read(fd, buf, bufsize);
3079  close(fd);
3080  if (ret < 0) {
3081  ret = -EIO;
3082  goto out_free;
3083  }
3084 
3085  if ((size_t)ret == bufsize) {
3086  free(buf);
3087  bufsize *= 4;
3088  goto retry;
3089  }
3090 
3091  ret = -EIO;
3092  s = strstr(buf, "\nGroups:");
3093  if (s == NULL)
3094  goto out_free;
3095 
3096  s += 8;
3097  ret = 0;
3098  while (1) {
3099  char *end;
3100  unsigned long val = strtoul(s, &end, 0);
3101  if (end == s)
3102  break;
3103 
3104  s = end;
3105  if (ret < size)
3106  list[ret] = val;
3107  ret++;
3108  }
3109 
3110 out_free:
3111  free(buf);
3112  return ret;
3113 }
3114 #else /* linux */
3115 /*
3116  * This is currently not implemented on other than Linux...
3117  */
3118 int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3119 {
3120  (void) req; (void) size; (void) list;
3121  return -ENOSYS;
3122 }
3123 #endif
3124 
3125 /* Prevent spurious data race warning - we don't care
3126  * about races for this flag */
3127 __attribute__((no_sanitize_thread))
3128 void fuse_session_exit(struct fuse_session *se)
3129 {
3130  se->exited = 1;
3131 }
3132 
3133 __attribute__((no_sanitize_thread))
3134 void fuse_session_reset(struct fuse_session *se)
3135 {
3136  se->exited = 0;
3137  se->error = 0;
3138 }
3139 
3140 __attribute__((no_sanitize_thread))
3141 int fuse_session_exited(struct fuse_session *se)
3142 {
3143  return se->exited;
3144 }
size_t off
Definition: fuse_common.h:757
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
uint64_t fh
Definition: fuse_common.h:93
struct fuse_session * fuse_session_new(struct fuse_args *args, const struct fuse_lowlevel_ops *op, size_t op_size, void *userdata)
void fuse_lowlevel_help(void)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
unsigned int writepage
Definition: fuse_common.h:54
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
int argc
Definition: fuse_opt.h:111
unsigned int direct_io
Definition: fuse_common.h:57
void fuse_lowlevel_version(void)
#define FUSE_CAP_POSIX_ACL
Definition: fuse_common.h:338
uint32_t poll_events
Definition: fuse_common.h:100
fuse_buf_copy_flags
Definition: fuse_common.h:657
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition: buffer.c:22
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
#define FUSE_CAP_READDIRPLUS
Definition: fuse_common.h:248
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
void fuse_session_destroy(struct fuse_session *se)
int fuse_session_fd(struct fuse_session *se)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
#define FUSE_CAP_SPLICE_WRITE
Definition: fuse_common.h:181
struct stat attr
Definition: fuse_lowlevel.h:88
#define FUSE_CAP_DONT_MASK
Definition: fuse_common.h:173
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
unsigned int keep_cache
Definition: fuse_common.h:64
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
Definition: fuse_lowlevel.h:59
struct fuse_req * fuse_req_t
Definition: fuse_lowlevel.h:49
fuse_ino_t ino
Definition: fuse_lowlevel.h:67
#define FUSE_CAP_PARALLEL_DIROPS
Definition: fuse_common.h:319
uint64_t lock_owner
Definition: fuse_common.h:96
#define FUSE_CAP_IOCTL_DIR
Definition: fuse_common.h:218
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
void fuse_session_exit(struct fuse_session *se)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
int fuse_session_exited(struct fuse_session *se)
void * fuse_req_userdata(fuse_req_t req)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
int fuse_reply_xattr(fuse_req_t req, size_t count)
#define FUSE_CAP_NO_OPEN_SUPPORT
Definition: fuse_common.h:309
void fuse_opt_free_args(struct fuse_args *args)
Definition: fuse_opt.c:34
#define FUSE_CAP_SPLICE_MOVE
Definition: fuse_common.h:189
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
void fuse_session_reset(struct fuse_session *se)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
#define FUSE_CAP_WRITEBACK_CACHE
Definition: fuse_common.h:296
char ** argv
Definition: fuse_opt.h:114
int fuse_reply_poll(fuse_req_t req, unsigned revents)
size_t idx
Definition: fuse_common.h:752
size_t count
Definition: fuse_common.h:747
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
#define FUSE_OPT_END
Definition: fuse_opt.h:104
unsigned int nonseekable
Definition: fuse_common.h:73
enum fuse_buf_flags flags
Definition: fuse_common.h:711
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
unsigned int flush
Definition: fuse_common.h:69
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition: buffer.c:284
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition: fuse_opt.c:55
#define FUSE_CAP_FLOCK_LOCKS
Definition: fuse_common.h:211
int fuse_reply_readlink(fuse_req_t req, const char *link)
#define FUSE_CAP_CACHE_SYMLINKS
Definition: fuse_common.h:360
#define FUSE_CAP_ASYNC_DIO
Definition: fuse_common.h:287
#define FUSE_CAP_ASYNC_READ
Definition: fuse_common.h:141
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
#define FUSE_CAP_NO_OPENDIR_SUPPORT
Definition: fuse_common.h:372
int fuse_reply_err(fuse_req_t req, int err)
#define FUSE_CAP_AUTO_INVAL_DATA
Definition: fuse_common.h:240
#define FUSE_CAP_ATOMIC_O_TRUNC
Definition: fuse_common.h:158
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
#define FUSE_CAP_HANDLE_KILLPRIV
Definition: fuse_common.h:347
uint64_t fuse_ino_t
Definition: fuse_lowlevel.h:46
#define FUSE_CAP_EXPLICIT_INVAL_DATA
Definition: fuse_common.h:395
uint64_t generation
Definition: fuse_lowlevel.h:79
void fuse_reply_none(fuse_req_t req)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition: fuse_log.c:33
unsigned int cache_readdir
Definition: fuse_common.h:84
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition: fuse_opt.c:398
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
void fuse_session_unmount(struct fuse_session *se)
void * mem
Definition: fuse_common.h:718
struct fuse_buf buf[1]
Definition: fuse_common.h:762
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_req_interrupted(fuse_req_t req)
size_t size
Definition: fuse_common.h:706
double entry_timeout
double attr_timeout
Definition: fuse_lowlevel.h:94
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
#define FUSE_CAP_SPLICE_READ
Definition: fuse_common.h:198
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
#define FUSE_CAP_EXPORT_SUPPORT
Definition: fuse_common.h:165
#define FUSE_CAP_POSIX_LOCKS
Definition: fuse_common.h:149
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
#define FUSE_CAP_READDIRPLUS_AUTO
Definition: fuse_common.h:276
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)