"Fossies" - the Fresh Open Source Software Archive 
Member "cryptsetup-2.4.3/lib/utils_device.c" (13 Jan 2022, 25565 Bytes) of package /linux/misc/cryptsetup-2.4.3.tar.xz:
As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style:
standard) with prefixed line numbers and
code folding option.
Alternatively you can here
view or
download the uninterpreted source code file.
For more information about "utils_device.c" see the
Fossies "Dox" file reference documentation and the last
Fossies "Diffs" side-by-side code changes report:
2.3.6_vs_2.4.0.
1 /*
2 * device backend utilities
3 *
4 * Copyright (C) 2004 Jana Saout <jana@saout.de>
5 * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
6 * Copyright (C) 2009-2021 Red Hat, Inc. All rights reserved.
7 * Copyright (C) 2009-2021 Milan Broz
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24 #include <assert.h>
25 #include <string.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <sys/ioctl.h>
31 #include <linux/fs.h>
32 #include <unistd.h>
33 #ifdef HAVE_SYS_SYSMACROS_H
34 # include <sys/sysmacros.h> /* for major, minor */
35 #endif
36 #ifdef HAVE_SYS_STATVFS_H
37 # include <sys/statvfs.h>
38 #endif
39 #include "internal.h"
40 #include "utils_device_locking.h"
41
42 struct device {
43 char *path;
44
45 char *file_path;
46 int loop_fd;
47
48 int ro_dev_fd;
49 int dev_fd;
50 int dev_fd_excl;
51
52 struct crypt_lock_handle *lh;
53
54 unsigned int o_direct:1;
55 unsigned int init_done:1; /* path is bdev or loop already initialized */
56
57 /* cached values */
58 size_t alignment;
59 size_t block_size;
60 size_t loop_block_size;
61 };
62
63 static size_t device_fs_block_size_fd(int fd)
64 {
65 size_t page_size = crypt_getpagesize();
66
67 #ifdef HAVE_SYS_STATVFS_H
68 struct statvfs buf;
69
70 /*
71 * NOTE: some filesystems (NFS) returns bogus blocksize (1MB).
72 * Page-size io should always work and avoids increasing IO beyond aligned LUKS header.
73 */
74 if (!fstatvfs(fd, &buf) && buf.f_bsize && buf.f_bsize <= page_size)
75 return (size_t)buf.f_bsize;
76 #endif
77 return page_size;
78 }
79
80 static size_t device_block_size_fd(int fd, size_t *min_size)
81 {
82 struct stat st;
83 size_t bsize;
84 int arg;
85
86 if (fstat(fd, &st) < 0)
87 return 0;
88
89 if (S_ISREG(st.st_mode))
90 bsize = device_fs_block_size_fd(fd);
91 else {
92 if (ioctl(fd, BLKSSZGET, &arg) < 0)
93 bsize = crypt_getpagesize();
94 else
95 bsize = (size_t)arg;
96 }
97
98 if (!min_size)
99 return bsize;
100
101 if (S_ISREG(st.st_mode)) {
102 /* file can be empty as well */
103 if (st.st_size > (ssize_t)bsize)
104 *min_size = bsize;
105 else
106 *min_size = st.st_size;
107 } else {
108 /* block device must have at least one block */
109 *min_size = bsize;
110 }
111
112 return bsize;
113 }
114
115 static size_t device_block_phys_size_fd(int fd)
116 {
117 struct stat st;
118 int arg;
119 size_t bsize = SECTOR_SIZE;
120
121 if (fstat(fd, &st) < 0)
122 return bsize;
123
124 if (S_ISREG(st.st_mode))
125 bsize = MAX_SECTOR_SIZE;
126 else if (ioctl(fd, BLKPBSZGET, &arg) >= 0)
127 bsize = (size_t)arg;
128
129 return bsize;
130 }
131
132 static size_t device_alignment_fd(int devfd)
133 {
134 long alignment = DEFAULT_MEM_ALIGNMENT;
135
136 #ifdef _PC_REC_XFER_ALIGN
137 alignment = fpathconf(devfd, _PC_REC_XFER_ALIGN);
138 if (alignment < 0)
139 alignment = DEFAULT_MEM_ALIGNMENT;
140 #endif
141 return (size_t)alignment;
142 }
143
144 static int device_read_test(int devfd)
145 {
146 char buffer[512];
147 int r = -EIO;
148 size_t minsize = 0, blocksize, alignment;
149
150 blocksize = device_block_size_fd(devfd, &minsize);
151 alignment = device_alignment_fd(devfd);
152
153 if (!blocksize || !alignment)
154 return -EINVAL;
155
156 if (minsize == 0)
157 return 0;
158
159 if (minsize > sizeof(buffer))
160 minsize = sizeof(buffer);
161
162 if (read_blockwise(devfd, blocksize, alignment, buffer, minsize) == (ssize_t)minsize)
163 r = 0;
164
165 crypt_safe_memzero(buffer, sizeof(buffer));
166 return r;
167 }
168
169 /*
170 * The direct-io is always preferred. The header is usually mapped to the same
171 * device and can be accessed when the rest of device is mapped to data device.
172 * Using direct-io ensures that we do not mess with data in cache.
173 * (But proper alignment should prevent this in the first place.)
174 * The read test is needed to detect broken configurations (seen with remote
175 * block devices) that allow open with direct-io but then fails on read.
176 */
177 static int device_ready(struct crypt_device *cd, struct device *device)
178 {
179 int devfd = -1, r = 0;
180 struct stat st;
181 size_t tmp_size;
182
183 if (!device)
184 return -EINVAL;
185
186 if (device->o_direct) {
187 log_dbg(cd, "Trying to open and read device %s with direct-io.",
188 device_path(device));
189 device->o_direct = 0;
190 devfd = open(device_path(device), O_RDONLY | O_DIRECT);
191 if (devfd >= 0) {
192 if (device_read_test(devfd) == 0) {
193 device->o_direct = 1;
194 } else {
195 close(devfd);
196 devfd = -1;
197 }
198 }
199 }
200
201 if (devfd < 0) {
202 log_dbg(cd, "Trying to open device %s without direct-io.",
203 device_path(device));
204 devfd = open(device_path(device), O_RDONLY);
205 }
206
207 if (devfd < 0) {
208 log_err(cd, _("Device %s does not exist or access denied."),
209 device_path(device));
210 return -EINVAL;
211 }
212
213 if (fstat(devfd, &st) < 0)
214 r = -EINVAL;
215 else if (!S_ISBLK(st.st_mode))
216 r = S_ISREG(st.st_mode) ? -ENOTBLK : -EINVAL;
217 if (r == -EINVAL) {
218 log_err(cd, _("Device %s is not compatible."),
219 device_path(device));
220 close(devfd);
221 return r;
222 }
223
224 /* Allow only increase (loop device) */
225 tmp_size = device_alignment_fd(devfd);
226 if (tmp_size > device->alignment)
227 device->alignment = tmp_size;
228
229 tmp_size = device_block_size_fd(devfd, NULL);
230 if (tmp_size > device->block_size)
231 device->block_size = tmp_size;
232
233 close(devfd);
234 return r;
235 }
236
237 static int _open_locked(struct crypt_device *cd, struct device *device, int flags)
238 {
239 int fd;
240
241 if (!device)
242 return -EINVAL;
243
244 log_dbg(cd, "Opening locked device %s", device_path(device));
245
246 if ((flags & O_ACCMODE) != O_RDONLY && device_locked_readonly(device->lh)) {
247 log_dbg(cd, "Cannot open locked device %s in write mode. Read lock held.", device_path(device));
248 return -EAGAIN;
249 }
250
251 fd = open(device_path(device), flags);
252 if (fd < 0)
253 return -errno;
254
255 if (device_locked_verify(cd, fd, device->lh)) {
256 /* fd doesn't correspond to a locked resource */
257 close(fd);
258 log_dbg(cd, "Failed to verify lock resource for device %s.", device_path(device));
259 return -EINVAL;
260 }
261
262 return fd;
263 }
264
265 /*
266 * Common wrapper for device sync.
267 */
268 void device_sync(struct crypt_device *cd, struct device *device)
269 {
270 if (!device || device->dev_fd < 0)
271 return;
272
273 if (fsync(device->dev_fd) == -1)
274 log_dbg(cd, "Cannot sync device %s.", device_path(device));
275 }
276
277 /*
278 * in non-locked mode returns always fd or -1
279 *
280 * in locked mode:
281 * opened fd or one of:
282 * -EAGAIN : requested write mode while device being locked in via shared lock
283 * -EINVAL : invalid lock fd state
284 * -1 : all other errors
285 */
286 static int device_open_internal(struct crypt_device *cd, struct device *device, int flags)
287 {
288 int access, devfd;
289
290 if (device->o_direct)
291 flags |= O_DIRECT;
292
293 access = flags & O_ACCMODE;
294 if (access == O_WRONLY)
295 access = O_RDWR;
296
297 if (access == O_RDONLY && device->ro_dev_fd >= 0) {
298 log_dbg(cd, "Reusing open r%c fd on device %s", 'o', device_path(device));
299 return device->ro_dev_fd;
300 } else if (access == O_RDWR && device->dev_fd >= 0) {
301 log_dbg(cd, "Reusing open r%c fd on device %s", 'w', device_path(device));
302 return device->dev_fd;
303 }
304
305 if (device_locked(device->lh))
306 devfd = _open_locked(cd, device, flags);
307 else
308 devfd = open(device_path(device), flags);
309
310 if (devfd < 0) {
311 log_dbg(cd, "Cannot open device %s%s.",
312 device_path(device),
313 access != O_RDONLY ? " for write" : "");
314 return devfd;
315 }
316
317 if (access == O_RDONLY)
318 device->ro_dev_fd = devfd;
319 else
320 device->dev_fd = devfd;
321
322 return devfd;
323 }
324
325 int device_open(struct crypt_device *cd, struct device *device, int flags)
326 {
327 if (!device)
328 return -EINVAL;
329
330 assert(!device_locked(device->lh));
331 return device_open_internal(cd, device, flags);
332 }
333
334 int device_open_excl(struct crypt_device *cd, struct device *device, int flags)
335 {
336 const char *path;
337 struct stat st;
338
339 if (!device)
340 return -EINVAL;
341
342 assert(!device_locked(device->lh));
343
344 if (device->dev_fd_excl < 0) {
345 path = device_path(device);
346 if (stat(path, &st))
347 return -EINVAL;
348 if (!S_ISBLK(st.st_mode))
349 log_dbg(cd, "%s is not a block device. Can't open in exclusive mode.",
350 path);
351 else {
352 /* open(2) with O_EXCL (w/o O_CREAT) on regular file is undefined behaviour according to man page */
353 /* coverity[toctou] */
354 device->dev_fd_excl = open(path, O_RDONLY | O_EXCL);
355 if (device->dev_fd_excl < 0)
356 return errno == EBUSY ? -EBUSY : device->dev_fd_excl;
357 if (fstat(device->dev_fd_excl, &st) || !S_ISBLK(st.st_mode)) {
358 log_dbg(cd, "%s is not a block device. Can't open in exclusive mode.",
359 path);
360 close(device->dev_fd_excl);
361 device->dev_fd_excl = -1;
362 } else
363 log_dbg(cd, "Device %s is blocked for exclusive open.", path);
364 }
365 }
366
367 return device_open_internal(cd, device, flags);
368 }
369
370 void device_release_excl(struct crypt_device *cd, struct device *device)
371 {
372 if (device && device->dev_fd_excl >= 0) {
373 if (close(device->dev_fd_excl))
374 log_dbg(cd, "Failed to release exclusive handle on device %s.",
375 device_path(device));
376 else
377 log_dbg(cd, "Closed exclusive fd for %s.", device_path(device));
378 device->dev_fd_excl = -1;
379 }
380 }
381
382 int device_open_locked(struct crypt_device *cd, struct device *device, int flags)
383 {
384 if (!device)
385 return -EINVAL;
386
387 assert(!crypt_metadata_locking_enabled() || device_locked(device->lh));
388 return device_open_internal(cd, device, flags);
389 }
390
391 /* Avoid any read from device, expects direct-io to work. */
392 int device_alloc_no_check(struct device **device, const char *path)
393 {
394 struct device *dev;
395
396 if (!path) {
397 *device = NULL;
398 return 0;
399 }
400
401 dev = malloc(sizeof(struct device));
402 if (!dev)
403 return -ENOMEM;
404
405 memset(dev, 0, sizeof(struct device));
406 dev->path = strdup(path);
407 if (!dev->path) {
408 free(dev);
409 return -ENOMEM;
410 }
411 dev->loop_fd = -1;
412 dev->ro_dev_fd = -1;
413 dev->dev_fd = -1;
414 dev->dev_fd_excl = -1;
415 dev->o_direct = 1;
416
417 *device = dev;
418 return 0;
419 }
420
421 int device_alloc(struct crypt_device *cd, struct device **device, const char *path)
422 {
423 struct device *dev;
424 int r;
425
426 r = device_alloc_no_check(&dev, path);
427 if (r < 0)
428 return r;
429
430 if (dev) {
431 r = device_ready(cd, dev);
432 if (!r) {
433 dev->init_done = 1;
434 } else if (r == -ENOTBLK) {
435 /* alloc loop later */
436 } else if (r < 0) {
437 free(dev->path);
438 free(dev);
439 return -ENOTBLK;
440 }
441 }
442
443 *device = dev;
444 return 0;
445 }
446
447 void device_free(struct crypt_device *cd, struct device *device)
448 {
449 if (!device)
450 return;
451
452 device_close(cd, device);
453
454 if (device->dev_fd_excl != -1) {
455 log_dbg(cd, "Closed exclusive fd for %s.", device_path(device));
456 close(device->dev_fd_excl);
457 }
458
459 if (device->loop_fd != -1) {
460 log_dbg(cd, "Closed loop %s (%s).", device->path, device->file_path);
461 close(device->loop_fd);
462 }
463
464 assert(!device_locked(device->lh));
465
466 free(device->file_path);
467 free(device->path);
468 free(device);
469 }
470
471 /* Get block device path */
472 const char *device_block_path(const struct device *device)
473 {
474 if (!device || !device->init_done)
475 return NULL;
476
477 return device->path;
478 }
479
480 /* Get device-mapper name of device (if possible) */
481 const char *device_dm_name(const struct device *device)
482 {
483 const char *dmdir = dm_get_dir();
484 size_t dmdir_len = strlen(dmdir);
485
486 if (!device || !device->init_done)
487 return NULL;
488
489 if (strncmp(device->path, dmdir, dmdir_len))
490 return NULL;
491
492 return &device->path[dmdir_len+1];
493 }
494
495 /* Get path to device / file */
496 const char *device_path(const struct device *device)
497 {
498 if (!device)
499 return NULL;
500
501 if (device->file_path)
502 return device->file_path;
503
504 return device->path;
505 }
506
507 /* block device topology ioctls, introduced in 2.6.32 */
508 #ifndef BLKIOMIN
509 #define BLKIOMIN _IO(0x12,120)
510 #define BLKIOOPT _IO(0x12,121)
511 #define BLKALIGNOFF _IO(0x12,122)
512 #endif
513
514 void device_topology_alignment(struct crypt_device *cd,
515 struct device *device,
516 unsigned long *required_alignment, /* bytes */
517 unsigned long *alignment_offset, /* bytes */
518 unsigned long default_alignment)
519 {
520 int dev_alignment_offset = 0;
521 unsigned int min_io_size = 0, opt_io_size = 0;
522 unsigned long temp_alignment = 0;
523 int fd;
524
525 *required_alignment = default_alignment;
526 *alignment_offset = 0;
527
528 if (!device || !device->path) //FIXME
529 return;
530
531 fd = open(device->path, O_RDONLY);
532 if (fd == -1)
533 return;
534
535 /* minimum io size */
536 if (ioctl(fd, BLKIOMIN, &min_io_size) == -1) {
537 log_dbg(cd, "Topology info for %s not supported, using default offset %lu bytes.",
538 device->path, default_alignment);
539 goto out;
540 }
541
542 /* optimal io size */
543 if (ioctl(fd, BLKIOOPT, &opt_io_size) == -1)
544 opt_io_size = min_io_size;
545
546 /* alignment offset, bogus -1 means misaligned/unknown */
547 if (ioctl(fd, BLKALIGNOFF, &dev_alignment_offset) == -1 || dev_alignment_offset < 0)
548 dev_alignment_offset = 0;
549 *alignment_offset = (unsigned long)dev_alignment_offset;
550
551 temp_alignment = (unsigned long)min_io_size;
552
553 /*
554 * Ignore bogus opt-io that could break alignment.
555 * Also real opt_io_size should be aligned to minimal page size (4k).
556 * Some bogus USB enclosures reports wrong data here.
557 */
558 if ((temp_alignment < (unsigned long)opt_io_size) &&
559 !((unsigned long)opt_io_size % temp_alignment) && !MISALIGNED_4K(opt_io_size))
560 temp_alignment = (unsigned long)opt_io_size;
561 else if (opt_io_size && (opt_io_size != min_io_size))
562 log_err(cd, _("Ignoring bogus optimal-io size for data device (%u bytes)."), opt_io_size);
563
564 /* If calculated alignment is multiple of default, keep default */
565 if (temp_alignment && (default_alignment % temp_alignment))
566 *required_alignment = temp_alignment;
567
568 log_dbg(cd, "Topology: IO (%u/%u), offset = %lu; Required alignment is %lu bytes.",
569 min_io_size, opt_io_size, *alignment_offset, *required_alignment);
570 out:
571 (void)close(fd);
572 }
573
574 size_t device_block_size(struct crypt_device *cd, struct device *device)
575 {
576 int fd;
577
578 if (!device)
579 return 0;
580
581 if (device->block_size)
582 return device->block_size;
583
584 fd = open(device->file_path ?: device->path, O_RDONLY);
585 if (fd >= 0) {
586 device->block_size = device_block_size_fd(fd, NULL);
587 close(fd);
588 }
589
590 if (!device->block_size)
591 log_dbg(cd, "Cannot get block size for device %s.", device_path(device));
592
593 return device->block_size;
594 }
595
596 size_t device_optimal_encryption_sector_size(struct crypt_device *cd, struct device *device)
597 {
598 int fd;
599 size_t phys_block_size;
600
601 if (!device)
602 return SECTOR_SIZE;
603
604 fd = open(device->file_path ?: device->path, O_RDONLY);
605 if (fd < 0) {
606 log_dbg(cd, "Cannot get optimal encryption sector size for device %s.", device_path(device));
607 return SECTOR_SIZE;
608 }
609
610 /* cache device block size */
611 device->block_size = device_block_size_fd(fd, NULL);
612 if (!device->block_size) {
613 close(fd);
614 log_dbg(cd, "Cannot get block size for device %s.", device_path(device));
615 return SECTOR_SIZE;
616 }
617
618 if (device->block_size >= MAX_SECTOR_SIZE) {
619 close(fd);
620 return MISALIGNED(device->block_size, MAX_SECTOR_SIZE) ? SECTOR_SIZE : MAX_SECTOR_SIZE;
621 }
622
623 phys_block_size = device_block_phys_size_fd(fd);
624 close(fd);
625
626 if (device->block_size >= phys_block_size || phys_block_size <= SECTOR_SIZE || phys_block_size > MAX_SECTOR_SIZE || MISALIGNED(phys_block_size, device->block_size))
627 return device->block_size;
628
629 return phys_block_size;
630 }
631
632 int device_read_ahead(struct device *device, uint32_t *read_ahead)
633 {
634 int fd, r = 0;
635 long read_ahead_long;
636
637 if (!device)
638 return 0;
639
640 if ((fd = open(device->path, O_RDONLY)) < 0)
641 return 0;
642
643 r = ioctl(fd, BLKRAGET, &read_ahead_long) ? 0 : 1;
644 close(fd);
645
646 if (r)
647 *read_ahead = (uint32_t) read_ahead_long;
648
649 return r;
650 }
651
652 /* Get data size in bytes */
653 int device_size(struct device *device, uint64_t *size)
654 {
655 struct stat st;
656 int devfd, r = -EINVAL;
657
658 if (!device)
659 return -EINVAL;
660
661 devfd = open(device->path, O_RDONLY);
662 if (devfd == -1)
663 return -EINVAL;
664
665 if (fstat(devfd, &st) < 0)
666 goto out;
667
668 if (S_ISREG(st.st_mode)) {
669 *size = (uint64_t)st.st_size;
670 r = 0;
671 } else if (ioctl(devfd, BLKGETSIZE64, size) >= 0)
672 r = 0;
673 out:
674 close(devfd);
675 return r;
676 }
677
678 /* For a file, allocate the required space */
679 int device_fallocate(struct device *device, uint64_t size)
680 {
681 struct stat st;
682 int devfd, r = -EINVAL;
683
684 if (!device)
685 return -EINVAL;
686
687 devfd = open(device_path(device), O_RDWR);
688 if (devfd == -1)
689 return -EINVAL;
690
691 if (!fstat(devfd, &st) && S_ISREG(st.st_mode) &&
692 ((uint64_t)st.st_size >= size || !posix_fallocate(devfd, 0, size))) {
693 r = 0;
694 if (device->file_path && crypt_loop_resize(device->path))
695 r = -EINVAL;
696 }
697
698 close(devfd);
699 return r;
700 }
701
702 int device_check_size(struct crypt_device *cd,
703 struct device *device,
704 uint64_t req_offset, int falloc)
705 {
706 uint64_t dev_size;
707
708 if (device_size(device, &dev_size)) {
709 log_dbg(cd, "Cannot get device size for device %s.", device_path(device));
710 return -EIO;
711 }
712
713 log_dbg(cd, "Device size %" PRIu64 ", offset %" PRIu64 ".", dev_size, req_offset);
714
715 if (req_offset > dev_size) {
716 /* If it is header file, increase its size */
717 if (falloc && !device_fallocate(device, req_offset))
718 return 0;
719
720 log_err(cd, _("Device %s is too small. Need at least %" PRIu64 " bytes."),
721 device_path(device), req_offset);
722 return -EINVAL;
723 }
724
725 return 0;
726 }
727
728 static int device_info(struct crypt_device *cd,
729 struct device *device,
730 enum devcheck device_check,
731 int *readonly, uint64_t *size)
732 {
733 struct stat st;
734 int fd = -1, r, flags = 0, real_readonly;
735 uint64_t real_size;
736
737 if (!device)
738 return -ENOTBLK;
739
740 real_readonly = 0;
741 real_size = 0;
742
743 if (stat(device->path, &st) < 0) {
744 r = -EINVAL;
745 goto out;
746 }
747
748 /* never wipe header on mounted device */
749 if (device_check == DEV_EXCL && S_ISBLK(st.st_mode))
750 flags |= O_EXCL;
751
752 /* Try to open read-write to check whether it is a read-only device */
753 /* coverity[toctou] */
754 fd = open(device->path, O_RDWR | flags);
755 if (fd == -1 && errno == EROFS) {
756 real_readonly = 1;
757 fd = open(device->path, O_RDONLY | flags);
758 }
759
760 if (fd == -1 && device_check == DEV_EXCL && errno == EBUSY) {
761 r = -EBUSY;
762 goto out;
763 }
764
765 if (fd == -1) {
766 r = errno ? -errno : -EINVAL;
767 goto out;
768 }
769
770 r = 0;
771 if (S_ISREG(st.st_mode)) {
772 //FIXME: add readonly check
773 real_size = (uint64_t)st.st_size;
774 real_size >>= SECTOR_SHIFT;
775 } else {
776 /* If the device can be opened read-write, i.e. readonly is still 0, then
777 * check whether BKROGET says that it is read-only. E.g. read-only loop
778 * devices may be opened read-write but are read-only according to BLKROGET
779 */
780 if (real_readonly == 0 && (r = ioctl(fd, BLKROGET, &real_readonly)) < 0)
781 goto out;
782
783 r = ioctl(fd, BLKGETSIZE64, &real_size);
784 if (r >= 0) {
785 real_size >>= SECTOR_SHIFT;
786 goto out;
787 }
788 }
789 out:
790 if (fd != -1)
791 close(fd);
792
793 switch (r) {
794 case 0:
795 if (readonly)
796 *readonly = real_readonly;
797 if (size)
798 *size = real_size;
799 break;
800 case -EBUSY:
801 log_err(cd, _("Cannot use device %s which is in use "
802 "(already mapped or mounted)."), device_path(device));
803 break;
804 case -EACCES:
805 log_err(cd, _("Cannot use device %s, permission denied."), device_path(device));
806 break;
807 default:
808 log_err(cd, _("Cannot get info about device %s."), device_path(device));
809 r = -EINVAL;
810 }
811
812 return r;
813 }
814
815 int device_check_access(struct crypt_device *cd,
816 struct device *device,
817 enum devcheck device_check)
818 {
819 return device_info(cd, device, device_check, NULL, NULL);
820 }
821
822 static int device_internal_prepare(struct crypt_device *cd, struct device *device)
823 {
824 char *loop_device = NULL, *file_path = NULL;
825 int r, loop_fd, readonly = 0;
826
827 if (device->init_done)
828 return 0;
829
830 if (getuid() || geteuid()) {
831 log_err(cd, _("Cannot use a loopback device, "
832 "running as non-root user."));
833 return -ENOTSUP;
834 }
835
836 log_dbg(cd, "Allocating a free loop device (block size: %zu).",
837 device->loop_block_size ?: SECTOR_SIZE);
838
839 /* Keep the loop open, detached on last close. */
840 loop_fd = crypt_loop_attach(&loop_device, device->path, 0, 1, &readonly, device->loop_block_size);
841 if (loop_fd == -1) {
842 log_err(cd, _("Attaching loopback device failed "
843 "(loop device with autoclear flag is required)."));
844 free(loop_device);
845 return -EINVAL;
846 }
847
848 file_path = device->path;
849 device->path = loop_device;
850
851 r = device_ready(cd, device);
852 if (r < 0) {
853 device->path = file_path;
854 crypt_loop_detach(loop_device);
855 free(loop_device);
856 return r;
857 }
858
859 log_dbg(cd, "Attached loop device block size is %zu bytes.", device_block_size_fd(loop_fd, NULL));
860
861 device->loop_fd = loop_fd;
862 device->file_path = file_path;
863 device->init_done = 1;
864
865 return 0;
866 }
867
868 int device_block_adjust(struct crypt_device *cd,
869 struct device *device,
870 enum devcheck device_check,
871 uint64_t device_offset,
872 uint64_t *size,
873 uint32_t *flags)
874 {
875 int r, real_readonly;
876 uint64_t real_size;
877
878 if (!device)
879 return -ENOTBLK;
880
881 r = device_internal_prepare(cd, device);
882 if (r)
883 return r;
884
885 r = device_info(cd, device, device_check, &real_readonly, &real_size);
886 if (r)
887 return r;
888
889 if (device_offset >= real_size) {
890 log_err(cd, _("Requested offset is beyond real size of device %s."),
891 device_path(device));
892 return -EINVAL;
893 }
894
895 if (size && !*size) {
896 *size = real_size;
897 if (!*size) {
898 log_err(cd, _("Device %s has zero size."), device_path(device));
899 return -ENOTBLK;
900 }
901 *size -= device_offset;
902 }
903
904 /* in case of size is set by parameter */
905 if (size && ((real_size - device_offset) < *size)) {
906 log_dbg(cd, "Device %s: offset = %" PRIu64 " requested size = %" PRIu64
907 ", backing device size = %" PRIu64,
908 device->path, device_offset, *size, real_size);
909 log_err(cd, _("Device %s is too small."), device_path(device));
910 return -EINVAL;
911 }
912
913 if (flags && real_readonly)
914 *flags |= CRYPT_ACTIVATE_READONLY;
915
916 if (size)
917 log_dbg(cd, "Calculated device size is %" PRIu64" sectors (%s), offset %" PRIu64 ".",
918 *size, real_readonly ? "RO" : "RW", device_offset);
919 return 0;
920 }
921
922 size_t size_round_up(size_t size, size_t block)
923 {
924 size_t s = (size + (block - 1)) / block;
925 return s * block;
926 }
927
928 void device_disable_direct_io(struct device *device)
929 {
930 if (device)
931 device->o_direct = 0;
932 }
933
934 int device_direct_io(const struct device *device)
935 {
936 return device ? device->o_direct : 0;
937 }
938
939 static int device_compare_path(const char *path1, const char *path2)
940 {
941 struct stat st_path1, st_path2;
942
943 if (stat(path1, &st_path1 ) < 0 || stat(path2, &st_path2 ) < 0)
944 return -EINVAL;
945
946 if (S_ISBLK(st_path1.st_mode) && S_ISBLK(st_path2.st_mode))
947 return (st_path1.st_rdev == st_path2.st_rdev) ? 1 : 0;
948
949 if (S_ISREG(st_path1.st_mode) && S_ISREG(st_path2.st_mode))
950 return (st_path1.st_ino == st_path2.st_ino &&
951 st_path1.st_dev == st_path2.st_dev) ? 1 : 0;
952
953 return 0;
954 }
955
956 int device_is_identical(struct device *device1, struct device *device2)
957 {
958 if (!device1 || !device2)
959 return 0;
960
961 if (device1 == device2)
962 return 1;
963
964 if (!strcmp(device_path(device1), device_path(device2)))
965 return 1;
966
967 return device_compare_path(device_path(device1), device_path(device2));
968 }
969
970 int device_is_rotational(struct device *device)
971 {
972 struct stat st;
973
974 if (!device)
975 return -EINVAL;
976
977 if (stat(device_path(device), &st) < 0)
978 return -EINVAL;
979
980 if (!S_ISBLK(st.st_mode))
981 return 0;
982
983 return crypt_dev_is_rotational(major(st.st_rdev), minor(st.st_rdev));
984 }
985
986 size_t device_alignment(struct device *device)
987 {
988 int devfd;
989
990 if (!device)
991 return -EINVAL;
992
993 if (!device->alignment) {
994 devfd = open(device_path(device), O_RDONLY);
995 if (devfd != -1) {
996 device->alignment = device_alignment_fd(devfd);
997 close(devfd);
998 }
999 }
1000
1001 return device->alignment;
1002 }
1003
1004 void device_set_lock_handle(struct device *device, struct crypt_lock_handle *h)
1005 {
1006 if (device)
1007 device->lh = h;
1008 }
1009
1010 struct crypt_lock_handle *device_get_lock_handle(struct device *device)
1011 {
1012 return device ? device->lh : NULL;
1013 }
1014
1015 int device_read_lock(struct crypt_device *cd, struct device *device)
1016 {
1017 if (!device || !crypt_metadata_locking_enabled())
1018 return 0;
1019
1020 if (device_read_lock_internal(cd, device))
1021 return -EBUSY;
1022
1023 return 0;
1024 }
1025
1026 int device_write_lock(struct crypt_device *cd, struct device *device)
1027 {
1028 if (!device || !crypt_metadata_locking_enabled())
1029 return 0;
1030
1031 assert(!device_locked(device->lh) || !device_locked_readonly(device->lh));
1032
1033 return device_write_lock_internal(cd, device);
1034 }
1035
1036 void device_read_unlock(struct crypt_device *cd, struct device *device)
1037 {
1038 if (!device || !crypt_metadata_locking_enabled())
1039 return;
1040
1041 assert(device_locked(device->lh));
1042
1043 device_unlock_internal(cd, device);
1044 }
1045
1046 void device_write_unlock(struct crypt_device *cd, struct device *device)
1047 {
1048 if (!device || !crypt_metadata_locking_enabled())
1049 return;
1050
1051 assert(device_locked(device->lh) && !device_locked_readonly(device->lh));
1052
1053 device_unlock_internal(cd, device);
1054 }
1055
1056 bool device_is_locked(struct device *device)
1057 {
1058 return device ? device_locked(device->lh) : 0;
1059 }
1060
1061 void device_close(struct crypt_device *cd, struct device *device)
1062 {
1063 if (!device)
1064 return;
1065
1066 if (device->ro_dev_fd != -1) {
1067 log_dbg(cd, "Closing read only fd for %s.", device_path(device));
1068 if (close(device->ro_dev_fd))
1069 log_dbg(cd, "Failed to close read only fd for %s.", device_path(device));
1070 device->ro_dev_fd = -1;
1071 }
1072
1073 if (device->dev_fd != -1) {
1074 log_dbg(cd, "Closing read write fd for %s.", device_path(device));
1075 if (close(device->dev_fd))
1076 log_dbg(cd, "Failed to close read write fd for %s.", device_path(device));
1077 device->dev_fd = -1;
1078 }
1079 }
1080
1081 void device_set_block_size(struct device *device, size_t size)
1082 {
1083 if (!device)
1084 return;
1085
1086 device->loop_block_size = size;
1087 }