"Fossies" - the Fresh Open Source Software Archive 
Member "cryptsetup-2.4.3/lib/setup.c" (13 Jan 2022, 162953 Bytes) of package /linux/misc/cryptsetup-2.4.3.tar.xz:
As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style:
standard) with prefixed line numbers and
code folding option.
Alternatively you can here
view or
download the uninterpreted source code file.
For more information about "setup.c" see the
Fossies "Dox" file reference documentation and the latest
Fossies "Diffs" side-by-side code changes report:
2.4.2_vs_2.4.3.
1 /*
2 * libcryptsetup - cryptsetup library
3 *
4 * Copyright (C) 2004 Jana Saout <jana@saout.de>
5 * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
6 * Copyright (C) 2009-2021 Red Hat, Inc. All rights reserved.
7 * Copyright (C) 2009-2021 Milan Broz
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24 #include <string.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdarg.h>
28 #include <sys/utsname.h>
29 #include <errno.h>
30
31 #include "libcryptsetup.h"
32 #include "luks1/luks.h"
33 #include "luks2/luks2.h"
34 #include "loopaes/loopaes.h"
35 #include "verity/verity.h"
36 #include "tcrypt/tcrypt.h"
37 #include "integrity/integrity.h"
38 #include "bitlk/bitlk.h"
39 #include "utils_device_locking.h"
40 #include "internal.h"
41
42 #define CRYPT_CD_UNRESTRICTED (1 << 0)
43 #define CRYPT_CD_QUIET (1 << 1)
44
45 struct crypt_device {
46 char *type;
47
48 struct device *device;
49 struct device *metadata_device;
50
51 struct volume_key *volume_key;
52 int rng_type;
53 uint32_t compatibility;
54 struct crypt_pbkdf_type pbkdf;
55
56 /* global context scope settings */
57 unsigned key_in_keyring:1;
58
59 uint64_t data_offset;
60 uint64_t metadata_size; /* Used in LUKS2 format */
61 uint64_t keyslots_size; /* Used in LUKS2 format */
62
63 /* Workaround for OOM during parallel activation (like in systemd) */
64 bool memory_hard_pbkdf_lock_enabled;
65 struct crypt_lock_handle *pbkdf_memory_hard_lock;
66
67 union {
68 struct { /* used in CRYPT_LUKS1 */
69 struct luks_phdr hdr;
70 char *cipher_spec;
71 } luks1;
72 struct { /* used in CRYPT_LUKS2 */
73 struct luks2_hdr hdr;
74 char cipher[MAX_CIPHER_LEN]; /* only for compatibility */
75 char cipher_mode[MAX_CIPHER_LEN]; /* only for compatibility */
76 char *keyslot_cipher;
77 unsigned int keyslot_key_size;
78 struct luks2_reencrypt *rh;
79 } luks2;
80 struct { /* used in CRYPT_PLAIN */
81 struct crypt_params_plain hdr;
82 char *cipher_spec;
83 char *cipher;
84 const char *cipher_mode;
85 unsigned int key_size;
86 } plain;
87 struct { /* used in CRYPT_LOOPAES */
88 struct crypt_params_loopaes hdr;
89 char *cipher_spec;
90 char *cipher;
91 const char *cipher_mode;
92 unsigned int key_size;
93 } loopaes;
94 struct { /* used in CRYPT_VERITY */
95 struct crypt_params_verity hdr;
96 const char *root_hash;
97 unsigned int root_hash_size;
98 char *uuid;
99 struct device *fec_device;
100 } verity;
101 struct { /* used in CRYPT_TCRYPT */
102 struct crypt_params_tcrypt params;
103 struct tcrypt_phdr hdr;
104 } tcrypt;
105 struct { /* used in CRYPT_INTEGRITY */
106 struct crypt_params_integrity params;
107 struct volume_key *journal_mac_key;
108 struct volume_key *journal_crypt_key;
109 uint32_t sb_flags;
110 } integrity;
111 struct { /* used in CRYPT_BITLK */
112 struct bitlk_metadata params;
113 char *cipher_spec;
114 } bitlk;
115 struct { /* used if initialized without header by name */
116 char *active_name;
117 /* buffers, must refresh from kernel on every query */
118 char cipher_spec[MAX_CIPHER_LEN*2+1];
119 char cipher[MAX_CIPHER_LEN];
120 const char *cipher_mode;
121 unsigned int key_size;
122 } none;
123 } u;
124
125 /* callbacks definitions */
126 void (*log)(int level, const char *msg, void *usrptr);
127 void *log_usrptr;
128 int (*confirm)(const char *msg, void *usrptr);
129 void *confirm_usrptr;
130 };
131
132 /* Just to suppress redundant messages about crypto backend */
133 static int _crypto_logged = 0;
134
135 /* Log helper */
136 static void (*_default_log)(int level, const char *msg, void *usrptr) = NULL;
137 static void *_default_log_usrptr = NULL;
138 static int _debug_level = 0;
139
140 /* Library can do metadata locking */
141 static int _metadata_locking = 1;
142
143 /* Library scope detection for kernel keyring support */
144 static int _kernel_keyring_supported;
145
146 /* Library allowed to use kernel keyring for loading VK in kernel crypto layer */
147 static int _vk_via_keyring = 1;
148
149 void crypt_set_debug_level(int level)
150 {
151 _debug_level = level;
152 }
153
154 int crypt_get_debug_level(void)
155 {
156 return _debug_level;
157 }
158
159 void crypt_log(struct crypt_device *cd, int level, const char *msg)
160 {
161 if (!msg)
162 return;
163
164 if (level < _debug_level)
165 return;
166
167 if (cd && cd->log)
168 cd->log(level, msg, cd->log_usrptr);
169 else if (_default_log)
170 _default_log(level, msg, _default_log_usrptr);
171 /* Default to stdout/stderr if there is no callback. */
172 else
173 fprintf(level == CRYPT_LOG_ERROR ? stderr : stdout, "%s", msg);
174 }
175
176 __attribute__((format(printf, 3, 4)))
177 void crypt_logf(struct crypt_device *cd, int level, const char *format, ...)
178 {
179 va_list argp;
180 char target[LOG_MAX_LEN + 2];
181 int len;
182
183 va_start(argp, format);
184
185 len = vsnprintf(&target[0], LOG_MAX_LEN, format, argp);
186 if (len > 0 && len < LOG_MAX_LEN) {
187 /* All verbose and error messages in tools end with EOL. */
188 if (level == CRYPT_LOG_VERBOSE || level == CRYPT_LOG_ERROR ||
189 level == CRYPT_LOG_DEBUG || level == CRYPT_LOG_DEBUG_JSON)
190 strncat(target, "\n", LOG_MAX_LEN);
191
192 crypt_log(cd, level, target);
193 }
194
195 va_end(argp);
196 }
197
198 static const char *mdata_device_path(struct crypt_device *cd)
199 {
200 return device_path(cd->metadata_device ?: cd->device);
201 }
202
203 static const char *data_device_path(struct crypt_device *cd)
204 {
205 return device_path(cd->device);
206 }
207
208 /* internal only */
209 struct device *crypt_metadata_device(struct crypt_device *cd)
210 {
211 return cd->metadata_device ?: cd->device;
212 }
213
214 struct device *crypt_data_device(struct crypt_device *cd)
215 {
216 return cd->device;
217 }
218
219 int init_crypto(struct crypt_device *ctx)
220 {
221 struct utsname uts;
222 int r;
223
224 r = crypt_random_init(ctx);
225 if (r < 0) {
226 log_err(ctx, _("Cannot initialize crypto RNG backend."));
227 return r;
228 }
229
230 r = crypt_backend_init(crypt_fips_mode());
231 if (r < 0)
232 log_err(ctx, _("Cannot initialize crypto backend."));
233
234 if (!r && !_crypto_logged) {
235 log_dbg(ctx, "Crypto backend (%s) initialized in cryptsetup library version %s.",
236 crypt_backend_version(), PACKAGE_VERSION);
237 if (!uname(&uts))
238 log_dbg(ctx, "Detected kernel %s %s %s.",
239 uts.sysname, uts.release, uts.machine);
240 _crypto_logged = 1;
241 }
242
243 return r;
244 }
245
246 static int process_key(struct crypt_device *cd, const char *hash_name,
247 size_t key_size, const char *pass, size_t passLen,
248 struct volume_key **vk)
249 {
250 int r;
251
252 if (!key_size)
253 return -EINVAL;
254
255 *vk = crypt_alloc_volume_key(key_size, NULL);
256 if (!*vk)
257 return -ENOMEM;
258
259 if (hash_name) {
260 r = crypt_plain_hash(cd, hash_name, (*vk)->key, key_size, pass, passLen);
261 if (r < 0) {
262 if (r == -ENOENT)
263 log_err(cd, _("Hash algorithm %s not supported."),
264 hash_name);
265 else
266 log_err(cd, _("Key processing error (using hash %s)."),
267 hash_name);
268 crypt_free_volume_key(*vk);
269 *vk = NULL;
270 return -EINVAL;
271 }
272 } else if (passLen > key_size) {
273 memcpy((*vk)->key, pass, key_size);
274 } else {
275 memcpy((*vk)->key, pass, passLen);
276 }
277
278 return 0;
279 }
280
281 static int isPLAIN(const char *type)
282 {
283 return (type && !strcmp(CRYPT_PLAIN, type));
284 }
285
286 static int isLUKS1(const char *type)
287 {
288 return (type && !strcmp(CRYPT_LUKS1, type));
289 }
290
291 static int isLUKS2(const char *type)
292 {
293 return (type && !strcmp(CRYPT_LUKS2, type));
294 }
295
296 static int isLUKS(const char *type)
297 {
298 return (isLUKS2(type) || isLUKS1(type));
299 }
300
301 static int isLOOPAES(const char *type)
302 {
303 return (type && !strcmp(CRYPT_LOOPAES, type));
304 }
305
306 static int isVERITY(const char *type)
307 {
308 return (type && !strcmp(CRYPT_VERITY, type));
309 }
310
311 static int isTCRYPT(const char *type)
312 {
313 return (type && !strcmp(CRYPT_TCRYPT, type));
314 }
315
316 static int isINTEGRITY(const char *type)
317 {
318 return (type && !strcmp(CRYPT_INTEGRITY, type));
319 }
320
321 static int isBITLK(const char *type)
322 {
323 return (type && !strcmp(CRYPT_BITLK, type));
324 }
325
326 static int _onlyLUKS(struct crypt_device *cd, uint32_t cdflags)
327 {
328 int r = 0;
329
330 if (cd && !cd->type) {
331 if (!(cdflags & CRYPT_CD_QUIET))
332 log_err(cd, _("Cannot determine device type. Incompatible activation of device?"));
333 r = -EINVAL;
334 }
335
336 if (!cd || !isLUKS(cd->type)) {
337 if (!(cdflags & CRYPT_CD_QUIET))
338 log_err(cd, _("This operation is supported only for LUKS device."));
339 r = -EINVAL;
340 }
341
342 if (r || (cdflags & CRYPT_CD_UNRESTRICTED) || isLUKS1(cd->type))
343 return r;
344
345 return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, cdflags & CRYPT_CD_QUIET);
346 }
347
348 static int onlyLUKS(struct crypt_device *cd)
349 {
350 return _onlyLUKS(cd, 0);
351 }
352
353 static int _onlyLUKS2(struct crypt_device *cd, uint32_t cdflags, uint32_t mask)
354 {
355 int r = 0;
356
357 if (cd && !cd->type) {
358 if (!(cdflags & CRYPT_CD_QUIET))
359 log_err(cd, _("Cannot determine device type. Incompatible activation of device?"));
360 r = -EINVAL;
361 }
362
363 if (!cd || !isLUKS2(cd->type)) {
364 if (!(cdflags & CRYPT_CD_QUIET))
365 log_err(cd, _("This operation is supported only for LUKS2 device."));
366 r = -EINVAL;
367 }
368
369 if (r || (cdflags & CRYPT_CD_UNRESTRICTED))
370 return r;
371
372 return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, mask, cdflags & CRYPT_CD_QUIET);
373 }
374
375 /* Internal only */
376 int onlyLUKS2(struct crypt_device *cd)
377 {
378 return _onlyLUKS2(cd, 0, 0);
379 }
380
381 /* Internal only */
382 int onlyLUKS2mask(struct crypt_device *cd, uint32_t mask)
383 {
384 return _onlyLUKS2(cd, 0, mask);
385 }
386
387 static void crypt_set_null_type(struct crypt_device *cd)
388 {
389 if (!cd->type)
390 return;
391
392 free(cd->type);
393 cd->type = NULL;
394 cd->u.none.active_name = NULL;
395 cd->data_offset = 0;
396 cd->metadata_size = 0;
397 cd->keyslots_size = 0;
398 }
399
400 static void crypt_reset_null_type(struct crypt_device *cd)
401 {
402 if (cd->type)
403 return;
404
405 free(cd->u.none.active_name);
406 cd->u.none.active_name = NULL;
407 }
408
409 /* keyslot helpers */
410 static int keyslot_verify_or_find_empty(struct crypt_device *cd, int *keyslot)
411 {
412 crypt_keyslot_info ki;
413
414 if (*keyslot == CRYPT_ANY_SLOT) {
415 if (isLUKS1(cd->type))
416 *keyslot = LUKS_keyslot_find_empty(&cd->u.luks1.hdr);
417 else
418 *keyslot = LUKS2_keyslot_find_empty(cd, &cd->u.luks2.hdr, 0);
419 if (*keyslot < 0) {
420 log_err(cd, _("All key slots full."));
421 return -EINVAL;
422 }
423 }
424
425 if (isLUKS1(cd->type))
426 ki = LUKS_keyslot_info(&cd->u.luks1.hdr, *keyslot);
427 else
428 ki = LUKS2_keyslot_info(&cd->u.luks2.hdr, *keyslot);
429 switch (ki) {
430 case CRYPT_SLOT_INVALID:
431 log_err(cd, _("Key slot %d is invalid, please select between 0 and %d."),
432 *keyslot, crypt_keyslot_max(cd->type) - 1);
433 return -EINVAL;
434 case CRYPT_SLOT_INACTIVE:
435 break;
436 default:
437 log_err(cd, _("Key slot %d is full, please select another one."),
438 *keyslot);
439 return -EINVAL;
440 }
441
442 log_dbg(cd, "Selected keyslot %d.", *keyslot);
443 return 0;
444 }
445
446 /*
447 * compares UUIDs returned by device-mapper (striped by cryptsetup) and uuid in header
448 */
449 int crypt_uuid_cmp(const char *dm_uuid, const char *hdr_uuid)
450 {
451 int i, j;
452 char *str;
453
454 if (!dm_uuid || !hdr_uuid)
455 return -EINVAL;
456
457 str = strchr(dm_uuid, '-');
458 if (!str)
459 return -EINVAL;
460
461 for (i = 0, j = 1; hdr_uuid[i]; i++) {
462 if (hdr_uuid[i] == '-')
463 continue;
464
465 if (!str[j] || str[j] == '-')
466 return -EINVAL;
467
468 if (str[j] != hdr_uuid[i])
469 return -EINVAL;
470 j++;
471 }
472
473 return 0;
474 }
475
476 /*
477 * compares type of active device to provided string (only if there is no explicit type)
478 */
479 static int crypt_uuid_type_cmp(struct crypt_device *cd, const char *type)
480 {
481 struct crypt_dm_active_device dmd;
482 size_t len;
483 int r;
484
485 /* Must user header-on-disk if we know type here */
486 if (cd->type || !cd->u.none.active_name)
487 return -EINVAL;
488
489 log_dbg(cd, "Checking if active device %s without header has UUID type %s.",
490 cd->u.none.active_name, type);
491
492 r = dm_query_device(cd, cd->u.none.active_name, DM_ACTIVE_UUID, &dmd);
493 if (r < 0)
494 return r;
495
496 r = -ENODEV;
497 len = strlen(type);
498 if (dmd.uuid && strlen(dmd.uuid) > len &&
499 !strncmp(dmd.uuid, type, len) && dmd.uuid[len] == '-')
500 r = 0;
501
502 free(CONST_CAST(void*)dmd.uuid);
503 return r;
504 }
505
506 int PLAIN_activate(struct crypt_device *cd,
507 const char *name,
508 struct volume_key *vk,
509 uint64_t size,
510 uint32_t flags)
511 {
512 int r;
513 struct crypt_dm_active_device dmd = {
514 .flags = flags,
515 .size = size,
516 };
517
518 log_dbg(cd, "Trying to activate PLAIN device %s using cipher %s.",
519 name, crypt_get_cipher_spec(cd));
520
521 if (MISALIGNED(size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) {
522 log_err(cd, _("Device size is not aligned to device logical block size."));
523 return -EINVAL;
524 }
525
526 r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
527 vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
528 crypt_get_data_offset(cd), crypt_get_integrity(cd),
529 crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
530 if (r < 0)
531 return r;
532
533 r = create_or_reload_device(cd, name, CRYPT_PLAIN, &dmd);
534
535 dm_targets_free(cd, &dmd);
536 return r;
537 }
538
539 int crypt_confirm(struct crypt_device *cd, const char *msg)
540 {
541 if (!cd || !cd->confirm)
542 return 1;
543 else
544 return cd->confirm(msg, cd->confirm_usrptr);
545 }
546
547 void crypt_set_log_callback(struct crypt_device *cd,
548 void (*log)(int level, const char *msg, void *usrptr),
549 void *usrptr)
550 {
551 if (!cd) {
552 _default_log = log;
553 _default_log_usrptr = usrptr;
554 } else {
555 cd->log = log;
556 cd->log_usrptr = usrptr;
557 }
558 }
559
560 void crypt_set_confirm_callback(struct crypt_device *cd,
561 int (*confirm)(const char *msg, void *usrptr),
562 void *usrptr)
563 {
564 if (cd) {
565 cd->confirm = confirm;
566 cd->confirm_usrptr = usrptr;
567 }
568 }
569
570 const char *crypt_get_dir(void)
571 {
572 return dm_get_dir();
573 }
574
575 int crypt_init(struct crypt_device **cd, const char *device)
576 {
577 struct crypt_device *h = NULL;
578 int r;
579
580 if (!cd)
581 return -EINVAL;
582
583 log_dbg(NULL, "Allocating context for crypt device %s.", device ?: "(none)");
584 #if !HAVE_DECL_O_CLOEXEC
585 log_dbg(NULL, "Running without O_CLOEXEC.");
586 #endif
587
588 if (!(h = malloc(sizeof(struct crypt_device))))
589 return -ENOMEM;
590
591 memset(h, 0, sizeof(*h));
592
593 r = device_alloc(NULL, &h->device, device);
594 if (r < 0) {
595 free(h);
596 return r;
597 }
598
599 dm_backend_init(NULL);
600
601 h->rng_type = crypt_random_default_key_rng();
602
603 *cd = h;
604 return 0;
605 }
606
607 static int crypt_check_data_device_size(struct crypt_device *cd)
608 {
609 int r;
610 uint64_t size, size_min;
611
612 /* Check data device size, require at least header or one sector */
613 size_min = crypt_get_data_offset(cd) << SECTOR_SHIFT ?: SECTOR_SIZE;
614
615 r = device_size(cd->device, &size);
616 if (r < 0)
617 return r;
618
619 if (size < size_min) {
620 log_err(cd, _("Header detected but device %s is too small."),
621 device_path(cd->device));
622 return -EINVAL;
623 }
624
625 return r;
626 }
627
628 static int _crypt_set_data_device(struct crypt_device *cd, const char *device)
629 {
630 struct device *dev = NULL;
631 int r;
632
633 r = device_alloc(cd, &dev, device);
634 if (r < 0)
635 return r;
636
637 if (!cd->metadata_device) {
638 cd->metadata_device = cd->device;
639 } else
640 device_free(cd, cd->device);
641
642 cd->device = dev;
643
644 r = crypt_check_data_device_size(cd);
645 if (!r && isLUKS2(cd->type))
646 device_set_block_size(crypt_data_device(cd), LUKS2_get_sector_size(&cd->u.luks2.hdr));
647
648 return r;
649 }
650
651 int crypt_set_data_device(struct crypt_device *cd, const char *device)
652 {
653 /* metadata device must be set */
654 if (!cd || !cd->device || !device)
655 return -EINVAL;
656
657 log_dbg(cd, "Setting ciphertext data device to %s.", device ?: "(none)");
658
659 if (!isLUKS1(cd->type) && !isLUKS2(cd->type) && !isVERITY(cd->type) &&
660 !isINTEGRITY(cd->type) && !isTCRYPT(cd->type)) {
661 log_err(cd, _("This operation is not supported for this device type."));
662 return -EINVAL;
663 }
664
665 if (isLUKS2(cd->type) && crypt_get_luks2_reencrypt(cd)) {
666 log_err(cd, _("Illegal operation with reencryption in-progress."));
667 return -EINVAL;
668 }
669
670 return _crypt_set_data_device(cd, device);
671 }
672
673 int crypt_init_data_device(struct crypt_device **cd, const char *device, const char *data_device)
674 {
675 int r;
676
677 if (!cd)
678 return -EINVAL;
679
680 r = crypt_init(cd, device);
681 if (r || !data_device || !strcmp(device, data_device))
682 return r;
683
684 log_dbg(NULL, "Setting ciphertext data device to %s.", data_device);
685 r = _crypt_set_data_device(*cd, data_device);
686 if (r) {
687 crypt_free(*cd);
688 *cd = NULL;
689 }
690
691 return r;
692 }
693
694
695 /* internal only */
696 struct crypt_pbkdf_type *crypt_get_pbkdf(struct crypt_device *cd)
697 {
698 return &cd->pbkdf;
699 }
700
701 /*
702 * crypt_load() helpers
703 */
704 static int _crypt_load_luks2(struct crypt_device *cd, int reload, int repair)
705 {
706 int r;
707 char *type = NULL;
708 struct luks2_hdr hdr2 = {};
709
710 log_dbg(cd, "%soading LUKS2 header (repair %sabled).", reload ? "Rel" : "L", repair ? "en" : "dis");
711
712 r = LUKS2_hdr_read(cd, &hdr2, repair);
713 if (r)
714 return r;
715
716 if (!reload && !(type = strdup(CRYPT_LUKS2))) {
717 r = -ENOMEM;
718 goto out;
719 }
720
721 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
722 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2);
723 if (r)
724 goto out;
725 }
726
727 if (reload) {
728 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
729 free(cd->u.luks2.keyslot_cipher);
730 } else
731 cd->type = type;
732
733 r = 0;
734 memcpy(&cd->u.luks2.hdr, &hdr2, sizeof(hdr2));
735 cd->u.luks2.keyslot_cipher = NULL;
736 cd->u.luks2.rh = NULL;
737
738 out:
739 if (r) {
740 free(type);
741 LUKS2_hdr_free(cd, &hdr2);
742 }
743 return r;
744 }
745
746 static void _luks2_reload(struct crypt_device *cd)
747 {
748 if (!cd || !isLUKS2(cd->type))
749 return;
750
751 (void) _crypt_load_luks2(cd, 1, 0);
752 }
753
754 static int _crypt_load_luks(struct crypt_device *cd, const char *requested_type,
755 int require_header, int repair)
756 {
757 char *cipher_spec;
758 struct luks_phdr hdr = {};
759 int r, version;
760
761 r = init_crypto(cd);
762 if (r < 0)
763 return r;
764
765 /* This will return 0 if primary LUKS2 header is damaged */
766 version = LUKS2_hdr_version_unlocked(cd, NULL);
767
768 if ((isLUKS1(requested_type) && version == 2) ||
769 (isLUKS2(requested_type) && version == 1))
770 return -EINVAL;
771
772 if (requested_type)
773 version = 0;
774
775 if (isLUKS1(requested_type) || version == 1) {
776 if (isLUKS2(cd->type)) {
777 log_dbg(cd, "Context is already initialized to type %s", cd->type);
778 return -EINVAL;
779 }
780
781 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
782 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1);
783 if (r)
784 return r;
785 }
786
787 r = LUKS_read_phdr(&hdr, require_header, repair, cd);
788 if (r)
789 goto out;
790
791 if (!cd->type && !(cd->type = strdup(CRYPT_LUKS1))) {
792 r = -ENOMEM;
793 goto out;
794 }
795
796 /* Set hash to the same as in the loaded header */
797 if (!cd->pbkdf.hash || strcmp(cd->pbkdf.hash, hdr.hashSpec)) {
798 free(CONST_CAST(void*)cd->pbkdf.hash);
799 cd->pbkdf.hash = strdup(hdr.hashSpec);
800 if (!cd->pbkdf.hash) {
801 r = -ENOMEM;
802 goto out;
803 }
804 }
805
806 if (asprintf(&cipher_spec, "%s-%s", hdr.cipherName, hdr.cipherMode) < 0) {
807 r = -ENOMEM;
808 goto out;
809 }
810
811 free(cd->u.luks1.cipher_spec);
812 cd->u.luks1.cipher_spec = cipher_spec;
813
814 memcpy(&cd->u.luks1.hdr, &hdr, sizeof(hdr));
815 } else if (isLUKS2(requested_type) || version == 2 || version == 0) {
816 if (isLUKS1(cd->type)) {
817 log_dbg(cd, "Context is already initialized to type %s", cd->type);
818 return -EINVAL;
819 }
820
821 /*
822 * Current LUKS2 repair just overrides blkid probes
823 * and perform auto-recovery if possible. This is safe
824 * unless future LUKS2 repair code do something more
825 * sophisticated. In such case we would need to check
826 * for LUKS2 requirements and decide if it's safe to
827 * perform repair.
828 */
829 r = _crypt_load_luks2(cd, cd->type != NULL, repair);
830 if (!r)
831 device_set_block_size(crypt_data_device(cd), LUKS2_get_sector_size(&cd->u.luks2.hdr));
832 } else {
833 if (version > 2)
834 log_err(cd, _("Unsupported LUKS version %d."), version);
835 r = -EINVAL;
836 }
837 out:
838 crypt_safe_memzero(&hdr, sizeof(hdr));
839
840 return r;
841 }
842
843 static int _crypt_load_tcrypt(struct crypt_device *cd, struct crypt_params_tcrypt *params)
844 {
845 int r;
846
847 if (!params)
848 return -EINVAL;
849
850 r = init_crypto(cd);
851 if (r < 0)
852 return r;
853
854 memcpy(&cd->u.tcrypt.params, params, sizeof(*params));
855
856 r = TCRYPT_read_phdr(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
857
858 cd->u.tcrypt.params.passphrase = NULL;
859 cd->u.tcrypt.params.passphrase_size = 0;
860 cd->u.tcrypt.params.keyfiles = NULL;
861 cd->u.tcrypt.params.keyfiles_count = 0;
862 cd->u.tcrypt.params.veracrypt_pim = 0;
863
864 if (r < 0)
865 return r;
866
867 if (!cd->type && !(cd->type = strdup(CRYPT_TCRYPT)))
868 return -ENOMEM;
869
870 return r;
871 }
872
873 static int _crypt_load_verity(struct crypt_device *cd, struct crypt_params_verity *params)
874 {
875 int r;
876 size_t sb_offset = 0;
877
878 r = init_crypto(cd);
879 if (r < 0)
880 return r;
881
882 if (params && params->flags & CRYPT_VERITY_NO_HEADER)
883 return -EINVAL;
884
885 if (params)
886 sb_offset = params->hash_area_offset;
887
888 r = VERITY_read_sb(cd, sb_offset, &cd->u.verity.uuid, &cd->u.verity.hdr);
889 if (r < 0)
890 return r;
891
892 if (!cd->type && !(cd->type = strdup(CRYPT_VERITY))) {
893 free(CONST_CAST(void*)cd->u.verity.hdr.hash_name);
894 free(CONST_CAST(void*)cd->u.verity.hdr.salt);
895 free(cd->u.verity.uuid);
896 crypt_safe_memzero(&cd->u.verity.hdr, sizeof(cd->u.verity.hdr));
897 return -ENOMEM;
898 }
899
900 if (params)
901 cd->u.verity.hdr.flags = params->flags;
902
903 /* Hash availability checked in sb load */
904 cd->u.verity.root_hash_size = crypt_hash_size(cd->u.verity.hdr.hash_name);
905 if (cd->u.verity.root_hash_size > 4096)
906 return -EINVAL;
907
908 if (params && params->data_device &&
909 (r = crypt_set_data_device(cd, params->data_device)) < 0)
910 return r;
911
912 if (params && params->fec_device) {
913 r = device_alloc(cd, &cd->u.verity.fec_device, params->fec_device);
914 if (r < 0)
915 return r;
916 cd->u.verity.hdr.fec_area_offset = params->fec_area_offset;
917 cd->u.verity.hdr.fec_roots = params->fec_roots;
918 }
919
920 return r;
921 }
922
923 static int _crypt_load_integrity(struct crypt_device *cd,
924 struct crypt_params_integrity *params)
925 {
926 int r;
927
928 r = init_crypto(cd);
929 if (r < 0)
930 return r;
931
932 r = INTEGRITY_read_sb(cd, &cd->u.integrity.params, &cd->u.integrity.sb_flags);
933 if (r < 0)
934 return r;
935
936 // FIXME: add checks for fields in integrity sb vs params
937
938 if (params) {
939 cd->u.integrity.params.journal_watermark = params->journal_watermark;
940 cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
941 cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
942 // FIXME: check ENOMEM
943 if (params->integrity)
944 cd->u.integrity.params.integrity = strdup(params->integrity);
945 cd->u.integrity.params.integrity_key_size = params->integrity_key_size;
946 if (params->journal_integrity)
947 cd->u.integrity.params.journal_integrity = strdup(params->journal_integrity);
948 if (params->journal_crypt)
949 cd->u.integrity.params.journal_crypt = strdup(params->journal_crypt);
950
951 if (params->journal_crypt_key) {
952 cd->u.integrity.journal_crypt_key =
953 crypt_alloc_volume_key(params->journal_crypt_key_size,
954 params->journal_crypt_key);
955 if (!cd->u.integrity.journal_crypt_key)
956 return -ENOMEM;
957 }
958 if (params->journal_integrity_key) {
959 cd->u.integrity.journal_mac_key =
960 crypt_alloc_volume_key(params->journal_integrity_key_size,
961 params->journal_integrity_key);
962 if (!cd->u.integrity.journal_mac_key)
963 return -ENOMEM;
964 }
965 }
966
967 if (!cd->type && !(cd->type = strdup(CRYPT_INTEGRITY))) {
968 free(CONST_CAST(void*)cd->u.integrity.params.integrity);
969 return -ENOMEM;
970 }
971
972 return 0;
973 }
974
975 static int _crypt_load_bitlk(struct crypt_device *cd,
976 struct bitlk_metadata *params __attribute__((unused)))
977 {
978 int r;
979
980 r = init_crypto(cd);
981 if (r < 0)
982 return r;
983
984 r = BITLK_read_sb(cd, &cd->u.bitlk.params);
985 if (r < 0)
986 return r;
987
988 if (asprintf(&cd->u.bitlk.cipher_spec, "%s-%s",
989 cd->u.bitlk.params.cipher, cd->u.bitlk.params.cipher_mode) < 0) {
990 cd->u.bitlk.cipher_spec = NULL;
991 return -ENOMEM;
992 }
993
994 if (!cd->type && !(cd->type = strdup(CRYPT_BITLK)))
995 return -ENOMEM;
996
997 device_set_block_size(crypt_data_device(cd), cd->u.bitlk.params.sector_size);
998
999 return 0;
1000 }
1001
1002 int crypt_load(struct crypt_device *cd,
1003 const char *requested_type,
1004 void *params)
1005 {
1006 int r;
1007
1008 if (!cd)
1009 return -EINVAL;
1010
1011 log_dbg(cd, "Trying to load %s crypt type from device %s.",
1012 requested_type ?: "any", mdata_device_path(cd) ?: "(none)");
1013
1014 if (!crypt_metadata_device(cd))
1015 return -EINVAL;
1016
1017 crypt_reset_null_type(cd);
1018 cd->data_offset = 0;
1019 cd->metadata_size = 0;
1020 cd->keyslots_size = 0;
1021
1022 if (!requested_type || isLUKS1(requested_type) || isLUKS2(requested_type)) {
1023 if (cd->type && !isLUKS1(cd->type) && !isLUKS2(cd->type)) {
1024 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1025 return -EINVAL;
1026 }
1027
1028 r = _crypt_load_luks(cd, requested_type, 1, 0);
1029 } else if (isVERITY(requested_type)) {
1030 if (cd->type && !isVERITY(cd->type)) {
1031 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1032 return -EINVAL;
1033 }
1034 r = _crypt_load_verity(cd, params);
1035 } else if (isTCRYPT(requested_type)) {
1036 if (cd->type && !isTCRYPT(cd->type)) {
1037 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1038 return -EINVAL;
1039 }
1040 r = _crypt_load_tcrypt(cd, params);
1041 } else if (isINTEGRITY(requested_type)) {
1042 if (cd->type && !isINTEGRITY(cd->type)) {
1043 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1044 return -EINVAL;
1045 }
1046 r = _crypt_load_integrity(cd, params);
1047 } else if (isBITLK(requested_type)) {
1048 if (cd->type && !isBITLK(cd->type)) {
1049 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1050 return -EINVAL;
1051 }
1052 r = _crypt_load_bitlk(cd, params);
1053 } else
1054 return -EINVAL;
1055
1056 return r;
1057 }
1058
1059 /*
1060 * crypt_init() helpers
1061 */
1062 static int _init_by_name_crypt_none(struct crypt_device *cd)
1063 {
1064 int r;
1065 char _mode[MAX_CIPHER_LEN];
1066 struct crypt_dm_active_device dmd;
1067 struct dm_target *tgt = &dmd.segment;
1068
1069 if (cd->type || !cd->u.none.active_name)
1070 return -EINVAL;
1071
1072 r = dm_query_device(cd, cd->u.none.active_name,
1073 DM_ACTIVE_CRYPT_CIPHER |
1074 DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
1075 if (r < 0)
1076 return r;
1077 if (!single_segment(&dmd) || tgt->type != DM_CRYPT)
1078 r = -EINVAL;
1079 if (r >= 0)
1080 r = crypt_parse_name_and_mode(tgt->u.crypt.cipher,
1081 cd->u.none.cipher, NULL,
1082 _mode);
1083
1084 if (!r) {
1085 r = snprintf(cd->u.none.cipher_spec, sizeof(cd->u.none.cipher_spec),
1086 "%s-%s", cd->u.none.cipher, _mode);
1087 if (r < 0 || (size_t)r >= sizeof(cd->u.none.cipher_spec))
1088 r = -EINVAL;
1089 else {
1090 cd->u.none.cipher_mode = cd->u.none.cipher_spec + strlen(cd->u.none.cipher) + 1;
1091 cd->u.none.key_size = tgt->u.crypt.vk->keylength;
1092 r = 0;
1093 }
1094 }
1095
1096 dm_targets_free(cd, &dmd);
1097 return r;
1098 }
1099
1100 static const char *LUKS_UUID(struct crypt_device *cd)
1101 {
1102 if (!cd)
1103 return NULL;
1104 else if (isLUKS1(cd->type))
1105 return cd->u.luks1.hdr.uuid;
1106 else if (isLUKS2(cd->type))
1107 return cd->u.luks2.hdr.uuid;
1108
1109 return NULL;
1110 }
1111
1112 static void crypt_free_type(struct crypt_device *cd)
1113 {
1114 if (isPLAIN(cd->type)) {
1115 free(CONST_CAST(void*)cd->u.plain.hdr.hash);
1116 free(cd->u.plain.cipher);
1117 free(cd->u.plain.cipher_spec);
1118 } else if (isLUKS2(cd->type)) {
1119 LUKS2_reencrypt_free(cd, cd->u.luks2.rh);
1120 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
1121 free(cd->u.luks2.keyslot_cipher);
1122 } else if (isLUKS1(cd->type)) {
1123 free(cd->u.luks1.cipher_spec);
1124 } else if (isLOOPAES(cd->type)) {
1125 free(CONST_CAST(void*)cd->u.loopaes.hdr.hash);
1126 free(cd->u.loopaes.cipher);
1127 free(cd->u.loopaes.cipher_spec);
1128 } else if (isVERITY(cd->type)) {
1129 free(CONST_CAST(void*)cd->u.verity.hdr.hash_name);
1130 free(CONST_CAST(void*)cd->u.verity.hdr.data_device);
1131 free(CONST_CAST(void*)cd->u.verity.hdr.hash_device);
1132 free(CONST_CAST(void*)cd->u.verity.hdr.fec_device);
1133 free(CONST_CAST(void*)cd->u.verity.hdr.salt);
1134 free(CONST_CAST(void*)cd->u.verity.root_hash);
1135 free(cd->u.verity.uuid);
1136 device_free(cd, cd->u.verity.fec_device);
1137 } else if (isINTEGRITY(cd->type)) {
1138 free(CONST_CAST(void*)cd->u.integrity.params.integrity);
1139 free(CONST_CAST(void*)cd->u.integrity.params.journal_integrity);
1140 free(CONST_CAST(void*)cd->u.integrity.params.journal_crypt);
1141 crypt_free_volume_key(cd->u.integrity.journal_crypt_key);
1142 crypt_free_volume_key(cd->u.integrity.journal_mac_key);
1143 } else if (isBITLK(cd->type)) {
1144 free(cd->u.bitlk.cipher_spec);
1145 BITLK_bitlk_metadata_free(&cd->u.bitlk.params);
1146 } else if (!cd->type) {
1147 free(cd->u.none.active_name);
1148 cd->u.none.active_name = NULL;
1149 }
1150
1151 crypt_set_null_type(cd);
1152 }
1153
1154 static int _init_by_name_crypt(struct crypt_device *cd, const char *name)
1155 {
1156 bool found = false;
1157 char **dep, *cipher_spec = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN], deps_uuid_prefix[40], *deps[MAX_DM_DEPS+1] = {};
1158 const char *dev, *namei;
1159 int key_nums, r;
1160 struct crypt_dm_active_device dmd, dmdi = {}, dmdep = {};
1161 struct dm_target *tgt = &dmd.segment, *tgti = &dmdi.segment;
1162
1163 r = dm_query_device(cd, name,
1164 DM_ACTIVE_DEVICE |
1165 DM_ACTIVE_UUID |
1166 DM_ACTIVE_CRYPT_CIPHER |
1167 DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
1168 if (r < 0)
1169 return r;
1170
1171 if (tgt->type != DM_CRYPT && tgt->type != DM_LINEAR) {
1172 log_dbg(cd, "Unsupported device table detected in %s.", name);
1173 r = -EINVAL;
1174 goto out;
1175 }
1176
1177 r = -EINVAL;
1178
1179 if (dmd.uuid) {
1180 r = snprintf(deps_uuid_prefix, sizeof(deps_uuid_prefix), CRYPT_SUBDEV "-%.32s", dmd.uuid + 6);
1181 if (r < 0 || (size_t)r != (sizeof(deps_uuid_prefix) - 1))
1182 r = -EINVAL;
1183 }
1184
1185 if (r >= 0) {
1186 r = dm_device_deps(cd, name, deps_uuid_prefix, deps, ARRAY_SIZE(deps));
1187 if (r)
1188 goto out;
1189 }
1190
1191 r = crypt_parse_name_and_mode(tgt->type == DM_LINEAR ? "null" : tgt->u.crypt.cipher, cipher,
1192 &key_nums, cipher_mode);
1193 if (r < 0) {
1194 log_dbg(cd, "Cannot parse cipher and mode from active device.");
1195 goto out;
1196 }
1197
1198 dep = deps;
1199
1200 if (tgt->type == DM_CRYPT && tgt->u.crypt.integrity && (namei = device_dm_name(tgt->data_device))) {
1201 r = dm_query_device(cd, namei, DM_ACTIVE_DEVICE, &dmdi);
1202 if (r < 0)
1203 goto out;
1204 if (!single_segment(&dmdi) || tgti->type != DM_INTEGRITY) {
1205 log_dbg(cd, "Unsupported device table detected in %s.", namei);
1206 r = -EINVAL;
1207 goto out;
1208 }
1209 if (!cd->metadata_device) {
1210 device_free(cd, cd->device);
1211 MOVE_REF(cd->device, tgti->data_device);
1212 }
1213 }
1214
1215 /* do not try to lookup LUKS2 header in detached header mode */
1216 if (dmd.uuid && !cd->metadata_device && !found) {
1217 while (*dep && !found) {
1218 r = dm_query_device(cd, *dep, DM_ACTIVE_DEVICE, &dmdep);
1219 if (r < 0)
1220 goto out;
1221
1222 tgt = &dmdep.segment;
1223
1224 while (tgt && !found) {
1225 dev = device_path(tgt->data_device);
1226 if (!dev) {
1227 tgt = tgt->next;
1228 continue;
1229 }
1230 if (!strstr(dev, dm_get_dir()) ||
1231 !crypt_string_in(dev + strlen(dm_get_dir()) + 1, deps, ARRAY_SIZE(deps))) {
1232 device_free(cd, cd->device);
1233 MOVE_REF(cd->device, tgt->data_device);
1234 found = true;
1235 }
1236 tgt = tgt->next;
1237 }
1238 dep++;
1239 dm_targets_free(cd, &dmdep);
1240 }
1241 }
1242
1243 if (asprintf(&cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1244 cipher_spec = NULL;
1245 r = -ENOMEM;
1246 goto out;
1247 }
1248
1249 tgt = &dmd.segment;
1250 r = 0;
1251
1252 if (isPLAIN(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1253 cd->u.plain.hdr.hash = NULL; /* no way to get this */
1254 cd->u.plain.hdr.offset = tgt->u.crypt.offset;
1255 cd->u.plain.hdr.skip = tgt->u.crypt.iv_offset;
1256 cd->u.plain.hdr.sector_size = tgt->u.crypt.sector_size;
1257 cd->u.plain.key_size = tgt->u.crypt.vk->keylength;
1258 cd->u.plain.cipher = strdup(cipher);
1259 MOVE_REF(cd->u.plain.cipher_spec, cipher_spec);
1260 cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1;
1261 } else if (isLOOPAES(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1262 cd->u.loopaes.hdr.offset = tgt->u.crypt.offset;
1263 cd->u.loopaes.cipher = strdup(cipher);
1264 MOVE_REF(cd->u.loopaes.cipher_spec, cipher_spec);
1265 cd->u.loopaes.cipher_mode = cd->u.loopaes.cipher_spec + strlen(cipher) + 1;
1266 /* version 3 uses last key for IV */
1267 if (tgt->u.crypt.vk->keylength % key_nums)
1268 key_nums++;
1269 cd->u.loopaes.key_size = tgt->u.crypt.vk->keylength / key_nums;
1270 } else if (isLUKS1(cd->type) || isLUKS2(cd->type)) {
1271 if (crypt_metadata_device(cd)) {
1272 r = _crypt_load_luks(cd, cd->type, 0, 0);
1273 if (r < 0) {
1274 log_dbg(cd, "LUKS device header does not match active device.");
1275 crypt_set_null_type(cd);
1276 device_close(cd, cd->metadata_device);
1277 device_close(cd, cd->device);
1278 r = 0;
1279 goto out;
1280 }
1281 /* check whether UUIDs match each other */
1282 r = crypt_uuid_cmp(dmd.uuid, LUKS_UUID(cd));
1283 if (r < 0) {
1284 log_dbg(cd, "LUKS device header uuid: %s mismatches DM returned uuid %s",
1285 LUKS_UUID(cd), dmd.uuid);
1286 crypt_free_type(cd);
1287 r = 0;
1288 goto out;
1289 }
1290 } else {
1291 log_dbg(cd, "LUKS device header not available.");
1292 crypt_set_null_type(cd);
1293 r = 0;
1294 }
1295 } else if (isTCRYPT(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1296 r = TCRYPT_init_by_name(cd, name, dmd.uuid, tgt, &cd->device,
1297 &cd->u.tcrypt.params, &cd->u.tcrypt.hdr);
1298 } else if (isBITLK(cd->type)) {
1299 r = _crypt_load_bitlk(cd, NULL);
1300 if (r < 0) {
1301 log_dbg(cd, "BITLK device header not available.");
1302 crypt_set_null_type(cd);
1303 r = 0;
1304 }
1305 }
1306 out:
1307 dm_targets_free(cd, &dmd);
1308 dm_targets_free(cd, &dmdi);
1309 dm_targets_free(cd, &dmdep);
1310 free(CONST_CAST(void*)dmd.uuid);
1311 free(cipher_spec);
1312 dep = deps;
1313 while (*dep)
1314 free(*dep++);
1315 return r;
1316 }
1317
1318 static int _init_by_name_verity(struct crypt_device *cd, const char *name)
1319 {
1320 struct crypt_dm_active_device dmd;
1321 struct dm_target *tgt = &dmd.segment;
1322 int r;
1323
1324 r = dm_query_device(cd, name,
1325 DM_ACTIVE_DEVICE |
1326 DM_ACTIVE_VERITY_HASH_DEVICE |
1327 DM_ACTIVE_VERITY_ROOT_HASH |
1328 DM_ACTIVE_VERITY_PARAMS, &dmd);
1329 if (r < 0)
1330 return r;
1331 if (!single_segment(&dmd) || tgt->type != DM_VERITY) {
1332 log_dbg(cd, "Unsupported device table detected in %s.", name);
1333 r = -EINVAL;
1334 goto out;
1335 }
1336 if (r > 0)
1337 r = 0;
1338
1339 if (isVERITY(cd->type)) {
1340 cd->u.verity.uuid = NULL; // FIXME
1341 cd->u.verity.hdr.flags = CRYPT_VERITY_NO_HEADER; //FIXME
1342 cd->u.verity.hdr.data_size = tgt->u.verity.vp->data_size;
1343 cd->u.verity.root_hash_size = tgt->u.verity.root_hash_size;
1344 MOVE_REF(cd->u.verity.hdr.hash_name, tgt->u.verity.vp->hash_name);
1345 cd->u.verity.hdr.data_device = NULL;
1346 cd->u.verity.hdr.hash_device = NULL;
1347 cd->u.verity.hdr.data_block_size = tgt->u.verity.vp->data_block_size;
1348 cd->u.verity.hdr.hash_block_size = tgt->u.verity.vp->hash_block_size;
1349 cd->u.verity.hdr.hash_area_offset = tgt->u.verity.hash_offset;
1350 cd->u.verity.hdr.fec_area_offset = tgt->u.verity.fec_offset;
1351 cd->u.verity.hdr.hash_type = tgt->u.verity.vp->hash_type;
1352 cd->u.verity.hdr.flags = tgt->u.verity.vp->flags;
1353 cd->u.verity.hdr.salt_size = tgt->u.verity.vp->salt_size;
1354 MOVE_REF(cd->u.verity.hdr.salt, tgt->u.verity.vp->salt);
1355 MOVE_REF(cd->u.verity.hdr.fec_device, tgt->u.verity.vp->fec_device);
1356 cd->u.verity.hdr.fec_roots = tgt->u.verity.vp->fec_roots;
1357 MOVE_REF(cd->u.verity.fec_device, tgt->u.verity.fec_device);
1358 MOVE_REF(cd->metadata_device, tgt->u.verity.hash_device);
1359 MOVE_REF(cd->u.verity.root_hash, tgt->u.verity.root_hash);
1360 }
1361 out:
1362 dm_targets_free(cd, &dmd);
1363 return r;
1364 }
1365
1366 static int _init_by_name_integrity(struct crypt_device *cd, const char *name)
1367 {
1368 struct crypt_dm_active_device dmd;
1369 struct dm_target *tgt = &dmd.segment;
1370 int r;
1371
1372 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE |
1373 DM_ACTIVE_CRYPT_KEY |
1374 DM_ACTIVE_CRYPT_KEYSIZE |
1375 DM_ACTIVE_INTEGRITY_PARAMS, &dmd);
1376 if (r < 0)
1377 return r;
1378 if (!single_segment(&dmd) || tgt->type != DM_INTEGRITY) {
1379 log_dbg(cd, "Unsupported device table detected in %s.", name);
1380 r = -EINVAL;
1381 goto out;
1382 }
1383 if (r > 0)
1384 r = 0;
1385
1386 if (isINTEGRITY(cd->type)) {
1387 cd->u.integrity.params.tag_size = tgt->u.integrity.tag_size;
1388 cd->u.integrity.params.sector_size = tgt->u.integrity.sector_size;
1389 cd->u.integrity.params.journal_size = tgt->u.integrity.journal_size;
1390 cd->u.integrity.params.journal_watermark = tgt->u.integrity.journal_watermark;
1391 cd->u.integrity.params.journal_commit_time = tgt->u.integrity.journal_commit_time;
1392 cd->u.integrity.params.interleave_sectors = tgt->u.integrity.interleave_sectors;
1393 cd->u.integrity.params.buffer_sectors = tgt->u.integrity.buffer_sectors;
1394 MOVE_REF(cd->u.integrity.params.integrity, tgt->u.integrity.integrity);
1395 MOVE_REF(cd->u.integrity.params.journal_integrity, tgt->u.integrity.journal_integrity);
1396 MOVE_REF(cd->u.integrity.params.journal_crypt, tgt->u.integrity.journal_crypt);
1397
1398 if (tgt->u.integrity.vk)
1399 cd->u.integrity.params.integrity_key_size = tgt->u.integrity.vk->keylength;
1400 if (tgt->u.integrity.journal_integrity_key)
1401 cd->u.integrity.params.journal_integrity_key_size = tgt->u.integrity.journal_integrity_key->keylength;
1402 if (tgt->u.integrity.journal_crypt_key)
1403 cd->u.integrity.params.integrity_key_size = tgt->u.integrity.journal_crypt_key->keylength;
1404 MOVE_REF(cd->metadata_device, tgt->u.integrity.meta_device);
1405 }
1406 out:
1407 dm_targets_free(cd, &dmd);
1408 return r;
1409 }
1410
1411 int crypt_init_by_name_and_header(struct crypt_device **cd,
1412 const char *name,
1413 const char *header_device)
1414 {
1415 crypt_status_info ci;
1416 struct crypt_dm_active_device dmd;
1417 struct dm_target *tgt = &dmd.segment;
1418 int r;
1419
1420 if (!cd || !name)
1421 return -EINVAL;
1422
1423 log_dbg(NULL, "Allocating crypt device context by device %s.", name);
1424
1425 ci = crypt_status(NULL, name);
1426 if (ci == CRYPT_INVALID)
1427 return -ENODEV;
1428
1429 if (ci < CRYPT_ACTIVE) {
1430 log_err(NULL, _("Device %s is not active."), name);
1431 return -ENODEV;
1432 }
1433
1434 r = dm_query_device(NULL, name, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &dmd);
1435 if (r < 0)
1436 return r;
1437
1438 *cd = NULL;
1439
1440 if (header_device) {
1441 r = crypt_init(cd, header_device);
1442 } else {
1443 r = crypt_init(cd, device_path(tgt->data_device));
1444
1445 /* Underlying device disappeared but mapping still active */
1446 if (!tgt->data_device || r == -ENOTBLK)
1447 log_verbose(NULL, _("Underlying device for crypt device %s disappeared."),
1448 name);
1449
1450 /* Underlying device is not readable but crypt mapping exists */
1451 if (r == -ENOTBLK)
1452 r = crypt_init(cd, NULL);
1453 }
1454
1455 if (r < 0)
1456 goto out;
1457
1458 if (dmd.uuid) {
1459 if (!strncmp(CRYPT_PLAIN, dmd.uuid, sizeof(CRYPT_PLAIN)-1))
1460 (*cd)->type = strdup(CRYPT_PLAIN);
1461 else if (!strncmp(CRYPT_LOOPAES, dmd.uuid, sizeof(CRYPT_LOOPAES)-1))
1462 (*cd)->type = strdup(CRYPT_LOOPAES);
1463 else if (!strncmp(CRYPT_LUKS1, dmd.uuid, sizeof(CRYPT_LUKS1)-1))
1464 (*cd)->type = strdup(CRYPT_LUKS1);
1465 else if (!strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1))
1466 (*cd)->type = strdup(CRYPT_LUKS2);
1467 else if (!strncmp(CRYPT_VERITY, dmd.uuid, sizeof(CRYPT_VERITY)-1))
1468 (*cd)->type = strdup(CRYPT_VERITY);
1469 else if (!strncmp(CRYPT_TCRYPT, dmd.uuid, sizeof(CRYPT_TCRYPT)-1))
1470 (*cd)->type = strdup(CRYPT_TCRYPT);
1471 else if (!strncmp(CRYPT_INTEGRITY, dmd.uuid, sizeof(CRYPT_INTEGRITY)-1))
1472 (*cd)->type = strdup(CRYPT_INTEGRITY);
1473 else if (!strncmp(CRYPT_BITLK, dmd.uuid, sizeof(CRYPT_BITLK)-1))
1474 (*cd)->type = strdup(CRYPT_BITLK);
1475 else
1476 log_dbg(NULL, "Unknown UUID set, some parameters are not set.");
1477 } else
1478 log_dbg(NULL, "Active device has no UUID set, some parameters are not set.");
1479
1480 if (header_device) {
1481 r = crypt_set_data_device(*cd, device_path(tgt->data_device));
1482 if (r < 0)
1483 goto out;
1484 }
1485
1486 /* Try to initialize basic parameters from active device */
1487
1488 if (tgt->type == DM_CRYPT || tgt->type == DM_LINEAR)
1489 r = _init_by_name_crypt(*cd, name);
1490 else if (tgt->type == DM_VERITY)
1491 r = _init_by_name_verity(*cd, name);
1492 else if (tgt->type == DM_INTEGRITY)
1493 r = _init_by_name_integrity(*cd, name);
1494 out:
1495 if (r < 0) {
1496 crypt_free(*cd);
1497 *cd = NULL;
1498 } else if (!(*cd)->type) {
1499 /* For anonymous device (no header found) remember initialized name */
1500 (*cd)->u.none.active_name = strdup(name);
1501 }
1502
1503 free(CONST_CAST(void*)dmd.uuid);
1504 dm_targets_free(NULL, &dmd);
1505 return r;
1506 }
1507
1508 int crypt_init_by_name(struct crypt_device **cd, const char *name)
1509 {
1510 return crypt_init_by_name_and_header(cd, name, NULL);
1511 }
1512
1513 /*
1514 * crypt_format() helpers
1515 */
1516 static int _crypt_format_plain(struct crypt_device *cd,
1517 const char *cipher,
1518 const char *cipher_mode,
1519 const char *uuid,
1520 size_t volume_key_size,
1521 struct crypt_params_plain *params)
1522 {
1523 unsigned int sector_size = params ? params->sector_size : SECTOR_SIZE;
1524 uint64_t dev_size;
1525
1526 if (!cipher || !cipher_mode) {
1527 log_err(cd, _("Invalid plain crypt parameters."));
1528 return -EINVAL;
1529 }
1530
1531 if (volume_key_size > 1024) {
1532 log_err(cd, _("Invalid key size."));
1533 return -EINVAL;
1534 }
1535
1536 if (uuid) {
1537 log_err(cd, _("UUID is not supported for this crypt type."));
1538 return -EINVAL;
1539 }
1540
1541 if (cd->metadata_device) {
1542 log_err(cd, _("Detached metadata device is not supported for this crypt type."));
1543 return -EINVAL;
1544 }
1545
1546 /* For compatibility with old params structure */
1547 if (!sector_size)
1548 sector_size = SECTOR_SIZE;
1549
1550 if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
1551 NOTPOW2(sector_size)) {
1552 log_err(cd, _("Unsupported encryption sector size."));
1553 return -EINVAL;
1554 }
1555
1556 if (sector_size > SECTOR_SIZE && !device_size(cd->device, &dev_size)) {
1557 if (params && params->offset)
1558 dev_size -= (params->offset * SECTOR_SIZE);
1559 if (dev_size % sector_size) {
1560 log_err(cd, _("Device size is not aligned to requested sector size."));
1561 return -EINVAL;
1562 }
1563 device_set_block_size(crypt_data_device(cd), sector_size);
1564 }
1565
1566 if (!(cd->type = strdup(CRYPT_PLAIN)))
1567 return -ENOMEM;
1568
1569 cd->u.plain.key_size = volume_key_size;
1570 cd->volume_key = crypt_alloc_volume_key(volume_key_size, NULL);
1571 if (!cd->volume_key)
1572 return -ENOMEM;
1573
1574 if (asprintf(&cd->u.plain.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1575 cd->u.plain.cipher_spec = NULL;
1576 return -ENOMEM;
1577 }
1578 cd->u.plain.cipher = strdup(cipher);
1579 cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1;
1580
1581 if (params && params->hash)
1582 cd->u.plain.hdr.hash = strdup(params->hash);
1583
1584 cd->u.plain.hdr.offset = params ? params->offset : 0;
1585 cd->u.plain.hdr.skip = params ? params->skip : 0;
1586 cd->u.plain.hdr.size = params ? params->size : 0;
1587 cd->u.plain.hdr.sector_size = sector_size;
1588
1589 if (!cd->u.plain.cipher)
1590 return -ENOMEM;
1591
1592 return 0;
1593 }
1594
1595 static int _crypt_format_luks1(struct crypt_device *cd,
1596 const char *cipher,
1597 const char *cipher_mode,
1598 const char *uuid,
1599 const char *volume_key,
1600 size_t volume_key_size,
1601 struct crypt_params_luks1 *params)
1602 {
1603 int r;
1604 unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT;
1605 unsigned long alignment_offset = 0;
1606 uint64_t dev_size;
1607
1608 if (!cipher || !cipher_mode)
1609 return -EINVAL;
1610
1611 if (!crypt_metadata_device(cd)) {
1612 log_err(cd, _("Can't format LUKS without device."));
1613 return -EINVAL;
1614 }
1615
1616 if (params && cd->data_offset && params->data_alignment &&
1617 (cd->data_offset % params->data_alignment)) {
1618 log_err(cd, _("Requested data alignment is not compatible with data offset."));
1619 return -EINVAL;
1620 }
1621
1622 if (!(cd->type = strdup(CRYPT_LUKS1)))
1623 return -ENOMEM;
1624
1625 if (volume_key)
1626 cd->volume_key = crypt_alloc_volume_key(volume_key_size,
1627 volume_key);
1628 else
1629 cd->volume_key = crypt_generate_volume_key(cd, volume_key_size);
1630
1631 if (!cd->volume_key)
1632 return -ENOMEM;
1633
1634 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
1635 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1);
1636 if (r)
1637 return r;
1638 }
1639
1640 if (params && params->hash && strcmp(params->hash, cd->pbkdf.hash)) {
1641 free(CONST_CAST(void*)cd->pbkdf.hash);
1642 cd->pbkdf.hash = strdup(params->hash);
1643 if (!cd->pbkdf.hash)
1644 return -ENOMEM;
1645 }
1646
1647 if (params && params->data_device) {
1648 if (!cd->metadata_device)
1649 cd->metadata_device = cd->device;
1650 else
1651 device_free(cd, cd->device);
1652 cd->device = NULL;
1653 if (device_alloc(cd, &cd->device, params->data_device) < 0)
1654 return -ENOMEM;
1655 }
1656
1657 if (params && cd->metadata_device) {
1658 /* For detached header the alignment is used directly as data offset */
1659 if (!cd->data_offset)
1660 cd->data_offset = params->data_alignment;
1661 required_alignment = params->data_alignment * SECTOR_SIZE;
1662 } else if (params && params->data_alignment) {
1663 required_alignment = params->data_alignment * SECTOR_SIZE;
1664 } else
1665 device_topology_alignment(cd, cd->device,
1666 &required_alignment,
1667 &alignment_offset, DEFAULT_DISK_ALIGNMENT);
1668
1669 r = LUKS_check_cipher(cd, volume_key_size, cipher, cipher_mode);
1670 if (r < 0)
1671 return r;
1672
1673 r = LUKS_generate_phdr(&cd->u.luks1.hdr, cd->volume_key, cipher, cipher_mode,
1674 cd->pbkdf.hash, uuid,
1675 cd->data_offset * SECTOR_SIZE,
1676 alignment_offset, required_alignment, cd);
1677 if (r < 0)
1678 return r;
1679
1680 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
1681 if (r < 0)
1682 return r;
1683
1684 if (!device_size(crypt_data_device(cd), &dev_size) &&
1685 dev_size < (crypt_get_data_offset(cd) * SECTOR_SIZE))
1686 log_std(cd, _("WARNING: Data offset is outside of currently available data device.\n"));
1687
1688 if (asprintf(&cd->u.luks1.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1689 cd->u.luks1.cipher_spec = NULL;
1690 return -ENOMEM;
1691 }
1692
1693 r = LUKS_wipe_header_areas(&cd->u.luks1.hdr, cd);
1694 if (r < 0) {
1695 free(cd->u.luks1.cipher_spec);
1696 log_err(cd, _("Cannot wipe header on device %s."),
1697 mdata_device_path(cd));
1698 return r;
1699 }
1700
1701 r = LUKS_write_phdr(&cd->u.luks1.hdr, cd);
1702 if (r)
1703 free(cd->u.luks1.cipher_spec);
1704
1705 return r;
1706 }
1707
1708 static int _crypt_format_luks2(struct crypt_device *cd,
1709 const char *cipher,
1710 const char *cipher_mode,
1711 const char *uuid,
1712 const char *volume_key,
1713 size_t volume_key_size,
1714 struct crypt_params_luks2 *params,
1715 bool sector_size_autodetect)
1716 {
1717 int r, integrity_key_size = 0;
1718 unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT;
1719 unsigned long alignment_offset = 0;
1720 unsigned int sector_size;
1721 const char *integrity = params ? params->integrity : NULL;
1722 uint64_t dev_size;
1723 uint32_t dmc_flags;
1724
1725 cd->u.luks2.hdr.jobj = NULL;
1726 cd->u.luks2.keyslot_cipher = NULL;
1727
1728 if (!cipher || !cipher_mode)
1729 return -EINVAL;
1730
1731 if (!crypt_metadata_device(cd)) {
1732 log_err(cd, _("Can't format LUKS without device."));
1733 return -EINVAL;
1734 }
1735
1736 if (params && cd->data_offset && params->data_alignment &&
1737 (cd->data_offset % params->data_alignment)) {
1738 log_err(cd, _("Requested data alignment is not compatible with data offset."));
1739 return -EINVAL;
1740 }
1741
1742 if (params && params->sector_size)
1743 sector_size_autodetect = false;
1744
1745 if (sector_size_autodetect) {
1746 sector_size = device_optimal_encryption_sector_size(cd, crypt_data_device(cd));
1747 log_dbg(cd, "Auto-detected optimal encryption sector size for device %s is %d bytes.",
1748 device_path(crypt_data_device(cd)), sector_size);
1749 } else
1750 sector_size = params ? params->sector_size : SECTOR_SIZE;
1751
1752 if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
1753 NOTPOW2(sector_size)) {
1754 log_err(cd, _("Unsupported encryption sector size."));
1755 return -EINVAL;
1756 }
1757 if (sector_size != SECTOR_SIZE && !dm_flags(cd, DM_CRYPT, &dmc_flags) &&
1758 !(dmc_flags & DM_SECTOR_SIZE_SUPPORTED)) {
1759 if (sector_size_autodetect) {
1760 log_dbg(cd, "dm-crypt does not support encryption sector size option. Reverting to 512 bytes.");
1761 sector_size = SECTOR_SIZE;
1762 } else
1763 log_std(cd, _("WARNING: The device activation will fail, dm-crypt is missing "
1764 "support for requested encryption sector size.\n"));
1765 }
1766
1767 if (integrity) {
1768 if (params->integrity_params) {
1769 /* Standalone dm-integrity must not be used */
1770 if (params->integrity_params->integrity ||
1771 params->integrity_params->integrity_key_size)
1772 return -EINVAL;
1773 /* FIXME: journal encryption and MAC is here not yet supported */
1774 if (params->integrity_params->journal_crypt ||
1775 params->integrity_params->journal_integrity)
1776 return -ENOTSUP;
1777 }
1778 if (!INTEGRITY_tag_size(cd, integrity, cipher, cipher_mode)) {
1779 if (!strcmp(integrity, "none"))
1780 integrity = NULL;
1781 else
1782 return -EINVAL;
1783 }
1784 integrity_key_size = INTEGRITY_key_size(cd, integrity);
1785 if ((integrity_key_size < 0) || (integrity_key_size >= (int)volume_key_size)) {
1786 log_err(cd, _("Volume key is too small for encryption with integrity extensions."));
1787 return -EINVAL;
1788 }
1789 }
1790
1791 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
1792 if (r < 0)
1793 return r;
1794
1795 if (!(cd->type = strdup(CRYPT_LUKS2)))
1796 return -ENOMEM;
1797
1798 if (volume_key)
1799 cd->volume_key = crypt_alloc_volume_key(volume_key_size,
1800 volume_key);
1801 else
1802 cd->volume_key = crypt_generate_volume_key(cd, volume_key_size);
1803
1804 if (!cd->volume_key)
1805 return -ENOMEM;
1806
1807 if (params && params->pbkdf)
1808 r = crypt_set_pbkdf_type(cd, params->pbkdf);
1809 else if (verify_pbkdf_params(cd, &cd->pbkdf))
1810 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2);
1811
1812 if (r < 0)
1813 return r;
1814
1815 if (params && params->data_device) {
1816 if (!cd->metadata_device)
1817 cd->metadata_device = cd->device;
1818 else
1819 device_free(cd, cd->device);
1820 cd->device = NULL;
1821 if (device_alloc(cd, &cd->device, params->data_device) < 0)
1822 return -ENOMEM;
1823 }
1824
1825 if (params && cd->metadata_device) {
1826 /* For detached header the alignment is used directly as data offset */
1827 if (!cd->data_offset)
1828 cd->data_offset = params->data_alignment;
1829 required_alignment = params->data_alignment * SECTOR_SIZE;
1830 } else if (params && params->data_alignment) {
1831 required_alignment = params->data_alignment * SECTOR_SIZE;
1832 } else
1833 device_topology_alignment(cd, cd->device,
1834 &required_alignment,
1835 &alignment_offset, DEFAULT_DISK_ALIGNMENT);
1836
1837 r = device_size(crypt_data_device(cd), &dev_size);
1838 if (r < 0)
1839 goto out;
1840
1841 if (sector_size_autodetect) {
1842 if (cd->data_offset && MISALIGNED(cd->data_offset, sector_size)) {
1843 log_dbg(cd, "Data offset not aligned to sector size. Reverting to 512 bytes.");
1844 sector_size = SECTOR_SIZE;
1845 } else if (MISALIGNED(dev_size - (uint64_t)required_alignment - (uint64_t)alignment_offset, sector_size)) {
1846 /* underflow does not affect misalignment checks */
1847 log_dbg(cd, "Device size is not aligned to sector size. Reverting to 512 bytes.");
1848 sector_size = SECTOR_SIZE;
1849 }
1850 }
1851
1852 /* FIXME: allow this later also for normal ciphers (check AF_ALG availability. */
1853 if (integrity && !integrity_key_size) {
1854 r = crypt_cipher_check_kernel(cipher, cipher_mode, integrity, volume_key_size);
1855 if (r < 0) {
1856 log_err(cd, _("Cipher %s-%s (key size %zd bits) is not available."),
1857 cipher, cipher_mode, volume_key_size * 8);
1858 goto out;
1859 }
1860 }
1861
1862 if ((!integrity || integrity_key_size) && !crypt_cipher_wrapped_key(cipher, cipher_mode) &&
1863 !INTEGRITY_tag_size(cd, NULL, cipher, cipher_mode)) {
1864 r = LUKS_check_cipher(cd, volume_key_size - integrity_key_size,
1865 cipher, cipher_mode);
1866 if (r < 0)
1867 goto out;
1868 }
1869
1870 r = LUKS2_generate_hdr(cd, &cd->u.luks2.hdr, cd->volume_key,
1871 cipher, cipher_mode,
1872 integrity, uuid,
1873 sector_size,
1874 cd->data_offset * SECTOR_SIZE,
1875 alignment_offset,
1876 required_alignment,
1877 cd->metadata_size, cd->keyslots_size);
1878 if (r < 0)
1879 goto out;
1880
1881 if (dev_size < (crypt_get_data_offset(cd) * SECTOR_SIZE))
1882 log_std(cd, _("WARNING: Data offset is outside of currently available data device.\n"));
1883
1884 if (cd->metadata_size && (cd->metadata_size != LUKS2_metadata_size(&cd->u.luks2.hdr)))
1885 log_std(cd, _("WARNING: LUKS2 metadata size changed to %" PRIu64 " bytes.\n"),
1886 LUKS2_metadata_size(&cd->u.luks2.hdr));
1887
1888 if (cd->keyslots_size && (cd->keyslots_size != LUKS2_keyslots_size(&cd->u.luks2.hdr)))
1889 log_std(cd, _("WARNING: LUKS2 keyslots area size changed to %" PRIu64 " bytes.\n"),
1890 LUKS2_keyslots_size(&cd->u.luks2.hdr));
1891
1892 if (!integrity && sector_size > SECTOR_SIZE) {
1893 dev_size -= (crypt_get_data_offset(cd) * SECTOR_SIZE);
1894 if (dev_size % sector_size) {
1895 log_err(cd, _("Device size is not aligned to requested sector size."));
1896 r = -EINVAL;
1897 goto out;
1898 }
1899 }
1900
1901 if (params && (params->label || params->subsystem)) {
1902 r = LUKS2_hdr_labels(cd, &cd->u.luks2.hdr,
1903 params->label, params->subsystem, 0);
1904 if (r < 0)
1905 goto out;
1906 }
1907
1908 device_set_block_size(crypt_data_device(cd), sector_size);
1909
1910 r = LUKS2_wipe_header_areas(cd, &cd->u.luks2.hdr, cd->metadata_device != NULL);
1911 if (r < 0) {
1912 log_err(cd, _("Cannot wipe header on device %s."),
1913 mdata_device_path(cd));
1914 if (dev_size < LUKS2_hdr_and_areas_size(&cd->u.luks2.hdr))
1915 log_err(cd, _("Device %s is too small."), device_path(crypt_metadata_device(cd)));
1916 goto out;
1917 }
1918
1919 /* Wipe integrity superblock and create integrity superblock */
1920 if (crypt_get_integrity_tag_size(cd)) {
1921 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_ZERO,
1922 crypt_get_data_offset(cd) * SECTOR_SIZE,
1923 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
1924 if (r < 0) {
1925 if (r == -EBUSY)
1926 log_err(cd, _("Cannot format device %s in use."),
1927 data_device_path(cd));
1928 else if (r == -EACCES) {
1929 log_err(cd, _("Cannot format device %s, permission denied."),
1930 data_device_path(cd));
1931 r = -EINVAL;
1932 } else
1933 log_err(cd, _("Cannot wipe header on device %s."),
1934 data_device_path(cd));
1935
1936 goto out;
1937 }
1938
1939 r = INTEGRITY_format(cd, params ? params->integrity_params : NULL, NULL, NULL);
1940 if (r)
1941 log_err(cd, _("Cannot format integrity for device %s."),
1942 data_device_path(cd));
1943 }
1944
1945 if (r < 0)
1946 goto out;
1947
1948 /* override sequence id check with format */
1949 r = LUKS2_hdr_write_force(cd, &cd->u.luks2.hdr);
1950 if (r < 0) {
1951 if (r == -EBUSY)
1952 log_err(cd, _("Cannot format device %s in use."),
1953 mdata_device_path(cd));
1954 else if (r == -EACCES) {
1955 log_err(cd, _("Cannot format device %s, permission denied."),
1956 mdata_device_path(cd));
1957 r = -EINVAL;
1958 } else
1959 log_err(cd, _("Cannot format device %s."),
1960 mdata_device_path(cd));
1961 }
1962
1963 out:
1964 if (r)
1965 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
1966
1967 return r;
1968 }
1969
1970 static int _crypt_format_loopaes(struct crypt_device *cd,
1971 const char *cipher,
1972 const char *uuid,
1973 size_t volume_key_size,
1974 struct crypt_params_loopaes *params)
1975 {
1976 if (!crypt_metadata_device(cd)) {
1977 log_err(cd, _("Can't format LOOPAES without device."));
1978 return -EINVAL;
1979 }
1980
1981 if (volume_key_size > 1024) {
1982 log_err(cd, _("Invalid key size."));
1983 return -EINVAL;
1984 }
1985
1986 if (uuid) {
1987 log_err(cd, _("UUID is not supported for this crypt type."));
1988 return -EINVAL;
1989 }
1990
1991 if (cd->metadata_device) {
1992 log_err(cd, _("Detached metadata device is not supported for this crypt type."));
1993 return -EINVAL;
1994 }
1995
1996 if (!(cd->type = strdup(CRYPT_LOOPAES)))
1997 return -ENOMEM;
1998
1999 cd->u.loopaes.key_size = volume_key_size;
2000
2001 cd->u.loopaes.cipher = strdup(cipher ?: DEFAULT_LOOPAES_CIPHER);
2002
2003 if (params && params->hash)
2004 cd->u.loopaes.hdr.hash = strdup(params->hash);
2005
2006 cd->u.loopaes.hdr.offset = params ? params->offset : 0;
2007 cd->u.loopaes.hdr.skip = params ? params->skip : 0;
2008
2009 return 0;
2010 }
2011
2012 static int _crypt_format_verity(struct crypt_device *cd,
2013 const char *uuid,
2014 struct crypt_params_verity *params)
2015 {
2016 int r = 0, hash_size;
2017 uint64_t data_device_size, hash_blocks_size;
2018 struct device *fec_device = NULL;
2019 char *fec_device_path = NULL, *hash_name = NULL, *root_hash = NULL, *salt = NULL;
2020
2021 if (!crypt_metadata_device(cd)) {
2022 log_err(cd, _("Can't format VERITY without device."));
2023 return -EINVAL;
2024 }
2025
2026 if (!params)
2027 return -EINVAL;
2028
2029 if (!params->data_device && !cd->metadata_device)
2030 return -EINVAL;
2031
2032 if (params->hash_type > VERITY_MAX_HASH_TYPE) {
2033 log_err(cd, _("Unsupported VERITY hash type %d."), params->hash_type);
2034 return -EINVAL;
2035 }
2036
2037 if (VERITY_BLOCK_SIZE_OK(params->data_block_size) ||
2038 VERITY_BLOCK_SIZE_OK(params->hash_block_size)) {
2039 log_err(cd, _("Unsupported VERITY block size."));
2040 return -EINVAL;
2041 }
2042
2043 if (MISALIGNED_512(params->hash_area_offset)) {
2044 log_err(cd, _("Unsupported VERITY hash offset."));
2045 return -EINVAL;
2046 }
2047
2048 if (MISALIGNED_512(params->fec_area_offset)) {
2049 log_err(cd, _("Unsupported VERITY FEC offset."));
2050 return -EINVAL;
2051 }
2052
2053 if (!(cd->type = strdup(CRYPT_VERITY)))
2054 return -ENOMEM;
2055
2056 if (params->data_device) {
2057 r = crypt_set_data_device(cd, params->data_device);
2058 if (r)
2059 return r;
2060 }
2061
2062 if (!params->data_size) {
2063 r = device_size(cd->device, &data_device_size);
2064 if (r < 0)
2065 return r;
2066
2067 cd->u.verity.hdr.data_size = data_device_size / params->data_block_size;
2068 } else
2069 cd->u.verity.hdr.data_size = params->data_size;
2070
2071 if (device_is_identical(crypt_metadata_device(cd), crypt_data_device(cd)) > 0 &&
2072 (cd->u.verity.hdr.data_size * params->data_block_size) > params->hash_area_offset) {
2073 log_err(cd, _("Data area overlaps with hash area."));
2074 return -EINVAL;
2075 }
2076
2077 hash_size = crypt_hash_size(params->hash_name);
2078 if (hash_size <= 0) {
2079 log_err(cd, _("Hash algorithm %s not supported."),
2080 params->hash_name);
2081 return -EINVAL;
2082 }
2083 cd->u.verity.root_hash_size = hash_size;
2084
2085 if (params->fec_device) {
2086 fec_device_path = strdup(params->fec_device);
2087 if (!fec_device_path)
2088 return -ENOMEM;
2089 r = device_alloc(cd, &fec_device, params->fec_device);
2090 if (r < 0) {
2091 r = -ENOMEM;
2092 goto out;
2093 }
2094
2095 hash_blocks_size = VERITY_hash_blocks(cd, params) * params->hash_block_size;
2096 if (device_is_identical(crypt_metadata_device(cd), fec_device) > 0 &&
2097 (params->hash_area_offset + hash_blocks_size) > params->fec_area_offset) {
2098 log_err(cd, _("Hash area overlaps with FEC area."));
2099 r = -EINVAL;
2100 goto out;
2101 }
2102
2103 if (device_is_identical(crypt_data_device(cd), fec_device) > 0 &&
2104 (cd->u.verity.hdr.data_size * params->data_block_size) > params->fec_area_offset) {
2105 log_err(cd, _("Data area overlaps with FEC area."));
2106 r = -EINVAL;
2107 goto out;
2108 }
2109 }
2110
2111 root_hash = malloc(cd->u.verity.root_hash_size);
2112 hash_name = strdup(params->hash_name);
2113 salt = malloc(params->salt_size);
2114
2115 if (!root_hash || !hash_name || !salt) {
2116 r = -ENOMEM;
2117 goto out;
2118 }
2119
2120 cd->u.verity.hdr.flags = params->flags;
2121 cd->u.verity.root_hash = root_hash;
2122 cd->u.verity.hdr.hash_name = hash_name;
2123 cd->u.verity.hdr.data_device = NULL;
2124 cd->u.verity.fec_device = fec_device;
2125 cd->u.verity.hdr.fec_device = fec_device_path;
2126 cd->u.verity.hdr.fec_roots = params->fec_roots;
2127 cd->u.verity.hdr.data_block_size = params->data_block_size;
2128 cd->u.verity.hdr.hash_block_size = params->hash_block_size;
2129 cd->u.verity.hdr.hash_area_offset = params->hash_area_offset;
2130 cd->u.verity.hdr.fec_area_offset = params->fec_area_offset;
2131 cd->u.verity.hdr.hash_type = params->hash_type;
2132 cd->u.verity.hdr.flags = params->flags;
2133 cd->u.verity.hdr.salt_size = params->salt_size;
2134 cd->u.verity.hdr.salt = salt;
2135
2136 if (params->salt)
2137 memcpy(salt, params->salt, params->salt_size);
2138 else
2139 r = crypt_random_get(cd, salt, params->salt_size, CRYPT_RND_SALT);
2140 if (r)
2141 goto out;
2142
2143 if (params->flags & CRYPT_VERITY_CREATE_HASH) {
2144 r = VERITY_create(cd, &cd->u.verity.hdr,
2145 cd->u.verity.root_hash, cd->u.verity.root_hash_size);
2146 if (!r && params->fec_device)
2147 r = VERITY_FEC_process(cd, &cd->u.verity.hdr, cd->u.verity.fec_device, 0, NULL);
2148 if (r)
2149 goto out;
2150 }
2151
2152 if (!(params->flags & CRYPT_VERITY_NO_HEADER)) {
2153 if (uuid) {
2154 if (!(cd->u.verity.uuid = strdup(uuid)))
2155 r = -ENOMEM;
2156 } else
2157 r = VERITY_UUID_generate(cd, &cd->u.verity.uuid);
2158
2159 if (!r)
2160 r = VERITY_write_sb(cd, cd->u.verity.hdr.hash_area_offset,
2161 cd->u.verity.uuid,
2162 &cd->u.verity.hdr);
2163 }
2164
2165 out:
2166 if (r) {
2167 device_free(cd, fec_device);
2168 free(root_hash);
2169 free(hash_name);
2170 free(fec_device_path);
2171 free(salt);
2172 }
2173
2174 return r;
2175 }
2176
2177 static int _crypt_format_integrity(struct crypt_device *cd,
2178 const char *uuid,
2179 struct crypt_params_integrity *params)
2180 {
2181 int r;
2182 uint32_t integrity_tag_size;
2183 char *integrity = NULL, *journal_integrity = NULL, *journal_crypt = NULL;
2184 struct volume_key *journal_crypt_key = NULL, *journal_mac_key = NULL;
2185
2186 if (!params)
2187 return -EINVAL;
2188
2189 if (uuid) {
2190 log_err(cd, _("UUID is not supported for this crypt type."));
2191 return -EINVAL;
2192 }
2193
2194 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
2195 if (r < 0)
2196 return r;
2197
2198 /* Wipe first 8 sectors - fs magic numbers etc. */
2199 r = crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0,
2200 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
2201 if (r < 0) {
2202 log_err(cd, _("Cannot wipe header on device %s."),
2203 mdata_device_path(cd));
2204 return r;
2205 }
2206
2207 if (!(cd->type = strdup(CRYPT_INTEGRITY)))
2208 return -ENOMEM;
2209
2210 if (params->journal_crypt_key) {
2211 journal_crypt_key = crypt_alloc_volume_key(params->journal_crypt_key_size,
2212 params->journal_crypt_key);
2213 if (!journal_crypt_key)
2214 return -ENOMEM;
2215 }
2216
2217 if (params->journal_integrity_key) {
2218 journal_mac_key = crypt_alloc_volume_key(params->journal_integrity_key_size,
2219 params->journal_integrity_key);
2220 if (!journal_mac_key) {
2221 r = -ENOMEM;
2222 goto out;
2223 }
2224 }
2225
2226 if (params->integrity && !(integrity = strdup(params->integrity))) {
2227 r = -ENOMEM;
2228 goto out;
2229 }
2230 if (params->journal_integrity && !(journal_integrity = strdup(params->journal_integrity))) {
2231 r = -ENOMEM;
2232 goto out;
2233 }
2234 if (params->journal_crypt && !(journal_crypt = strdup(params->journal_crypt))) {
2235 r = -ENOMEM;
2236 goto out;
2237 }
2238
2239 integrity_tag_size = INTEGRITY_hash_tag_size(integrity);
2240 if (integrity_tag_size > 0 && params->tag_size && integrity_tag_size != params->tag_size)
2241 log_std(cd, _("WARNING: Requested tag size %d bytes differs from %s size output (%d bytes).\n"),
2242 params->tag_size, integrity, integrity_tag_size);
2243
2244 if (params->tag_size)
2245 integrity_tag_size = params->tag_size;
2246
2247 cd->u.integrity.journal_crypt_key = journal_crypt_key;
2248 cd->u.integrity.journal_mac_key = journal_mac_key;
2249 cd->u.integrity.params.journal_size = params->journal_size;
2250 cd->u.integrity.params.journal_watermark = params->journal_watermark;
2251 cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
2252 cd->u.integrity.params.interleave_sectors = params->interleave_sectors;
2253 cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
2254 cd->u.integrity.params.sector_size = params->sector_size;
2255 cd->u.integrity.params.tag_size = integrity_tag_size;
2256 cd->u.integrity.params.integrity = integrity;
2257 cd->u.integrity.params.journal_integrity = journal_integrity;
2258 cd->u.integrity.params.journal_crypt = journal_crypt;
2259
2260 r = INTEGRITY_format(cd, params, cd->u.integrity.journal_crypt_key, cd->u.integrity.journal_mac_key);
2261 if (r)
2262 log_err(cd, _("Cannot format integrity for device %s."),
2263 mdata_device_path(cd));
2264 out:
2265 if (r) {
2266 crypt_free_volume_key(journal_crypt_key);
2267 crypt_free_volume_key(journal_mac_key);
2268 free(integrity);
2269 free(journal_integrity);
2270 free(journal_crypt);
2271 }
2272
2273 return r;
2274 }
2275
2276 static int _crypt_format(struct crypt_device *cd,
2277 const char *type,
2278 const char *cipher,
2279 const char *cipher_mode,
2280 const char *uuid,
2281 const char *volume_key,
2282 size_t volume_key_size,
2283 void *params,
2284 bool sector_size_autodetect)
2285 {
2286 int r;
2287
2288 if (!cd || !type)
2289 return -EINVAL;
2290
2291 if (cd->type) {
2292 log_dbg(cd, "Context already formatted as %s.", cd->type);
2293 return -EINVAL;
2294 }
2295
2296 log_dbg(cd, "Formatting device %s as type %s.", mdata_device_path(cd) ?: "(none)", type);
2297
2298 crypt_reset_null_type(cd);
2299
2300 r = init_crypto(cd);
2301 if (r < 0)
2302 return r;
2303
2304 if (isPLAIN(type))
2305 r = _crypt_format_plain(cd, cipher, cipher_mode,
2306 uuid, volume_key_size, params);
2307 else if (isLUKS1(type))
2308 r = _crypt_format_luks1(cd, cipher, cipher_mode,
2309 uuid, volume_key, volume_key_size, params);
2310 else if (isLUKS2(type))
2311 r = _crypt_format_luks2(cd, cipher, cipher_mode,
2312 uuid, volume_key, volume_key_size, params, sector_size_autodetect);
2313 else if (isLOOPAES(type))
2314 r = _crypt_format_loopaes(cd, cipher, uuid, volume_key_size, params);
2315 else if (isVERITY(type))
2316 r = _crypt_format_verity(cd, uuid, params);
2317 else if (isINTEGRITY(type))
2318 r = _crypt_format_integrity(cd, uuid, params);
2319 else {
2320 log_err(cd, _("Unknown crypt device type %s requested."), type);
2321 r = -EINVAL;
2322 }
2323
2324 if (r < 0) {
2325 crypt_set_null_type(cd);
2326 crypt_free_volume_key(cd->volume_key);
2327 cd->volume_key = NULL;
2328 }
2329
2330 return r;
2331 }
2332
2333 CRYPT_SYMBOL_EXPORT_NEW(int, crypt_format, 2, 4,
2334 /* crypt_format parameters follows */
2335 struct crypt_device *cd,
2336 const char *type,
2337 const char *cipher,
2338 const char *cipher_mode,
2339 const char *uuid,
2340 const char *volume_key,
2341 size_t volume_key_size,
2342 void *params)
2343 {
2344 return _crypt_format(cd, type, cipher, cipher_mode, uuid, volume_key, volume_key_size, params, true);
2345 }
2346
2347
2348 CRYPT_SYMBOL_EXPORT_OLD(int, crypt_format, 2, 0,
2349 /* crypt_format parameters follows */
2350 struct crypt_device *cd,
2351 const char *type,
2352 const char *cipher,
2353 const char *cipher_mode,
2354 const char *uuid,
2355 const char *volume_key,
2356 size_t volume_key_size,
2357 void *params)
2358 {
2359 return _crypt_format(cd, type, cipher, cipher_mode, uuid, volume_key, volume_key_size, params, false);
2360 }
2361
2362 int crypt_repair(struct crypt_device *cd,
2363 const char *requested_type,
2364 void *params __attribute__((unused)))
2365 {
2366 int r;
2367
2368 if (!cd)
2369 return -EINVAL;
2370
2371 log_dbg(cd, "Trying to repair %s crypt type from device %s.",
2372 requested_type ?: "any", mdata_device_path(cd) ?: "(none)");
2373
2374 if (!crypt_metadata_device(cd))
2375 return -EINVAL;
2376
2377 if (requested_type && !isLUKS(requested_type))
2378 return -EINVAL;
2379
2380 /* Load with repair */
2381 r = _crypt_load_luks(cd, requested_type, 1, 1);
2382 if (r < 0)
2383 return r;
2384
2385 /* cd->type and header must be set in context */
2386 r = crypt_check_data_device_size(cd);
2387 if (r < 0)
2388 crypt_set_null_type(cd);
2389
2390 return r;
2391 }
2392
2393 /* compare volume keys */
2394 static int _compare_volume_keys(struct volume_key *svk, unsigned skeyring_only, struct volume_key *tvk, unsigned tkeyring_only)
2395 {
2396 if (!svk && !tvk)
2397 return 0;
2398 else if (!svk || !tvk)
2399 return 1;
2400
2401 if (svk->keylength != tvk->keylength)
2402 return 1;
2403
2404 if (!skeyring_only && !tkeyring_only)
2405 return memcmp(svk->key, tvk->key, svk->keylength);
2406
2407 if (svk->key_description && tvk->key_description)
2408 return strcmp(svk->key_description, tvk->key_description);
2409
2410 return 0;
2411 }
2412
2413 static int _compare_device_types(struct crypt_device *cd,
2414 const struct crypt_dm_active_device *src,
2415 const struct crypt_dm_active_device *tgt)
2416 {
2417 if (!tgt->uuid) {
2418 log_dbg(cd, "Missing device uuid in target device.");
2419 return -EINVAL;
2420 }
2421
2422 if (isLUKS2(cd->type) && !strncmp("INTEGRITY-", tgt->uuid, strlen("INTEGRITY-"))) {
2423 if (crypt_uuid_cmp(tgt->uuid, src->uuid)) {
2424 log_dbg(cd, "LUKS UUID mismatch.");
2425 return -EINVAL;
2426 }
2427 } else if (isLUKS(cd->type)) {
2428 if (!src->uuid || strncmp(cd->type, tgt->uuid, strlen(cd->type)) ||
2429 crypt_uuid_cmp(tgt->uuid, src->uuid)) {
2430 log_dbg(cd, "LUKS UUID mismatch.");
2431 return -EINVAL;
2432 }
2433 } else if (isPLAIN(cd->type) || isLOOPAES(cd->type)) {
2434 if (strncmp(cd->type, tgt->uuid, strlen(cd->type))) {
2435 log_dbg(cd, "Unexpected uuid prefix %s in target device.", tgt->uuid);
2436 return -EINVAL;
2437 }
2438 } else {
2439 log_dbg(cd, "Unsupported device type %s for reload.", cd->type ?: "<empty>");
2440 return -ENOTSUP;
2441 }
2442
2443 return 0;
2444 }
2445
2446 static int _compare_crypt_devices(struct crypt_device *cd,
2447 const struct dm_target *src,
2448 const struct dm_target *tgt)
2449 {
2450 /* for crypt devices keys are mandatory */
2451 if (!src->u.crypt.vk || !tgt->u.crypt.vk)
2452 return -EINVAL;
2453
2454 /* CIPHER checks */
2455 if (!src->u.crypt.cipher || !tgt->u.crypt.cipher)
2456 return -EINVAL;
2457 if (strcmp(src->u.crypt.cipher, tgt->u.crypt.cipher)) {
2458 log_dbg(cd, "Cipher specs do not match.");
2459 return -EINVAL;
2460 }
2461
2462 if (tgt->u.crypt.vk->keylength == 0 && crypt_is_cipher_null(tgt->u.crypt.cipher))
2463 log_dbg(cd, "Existing device uses cipher null. Skipping key comparison.");
2464 else if (_compare_volume_keys(src->u.crypt.vk, 0, tgt->u.crypt.vk, tgt->u.crypt.vk->key_description != NULL)) {
2465 log_dbg(cd, "Keys in context and target device do not match.");
2466 return -EINVAL;
2467 }
2468
2469 if (crypt_strcmp(src->u.crypt.integrity, tgt->u.crypt.integrity)) {
2470 log_dbg(cd, "Integrity parameters do not match.");
2471 return -EINVAL;
2472 }
2473
2474 if (src->u.crypt.offset != tgt->u.crypt.offset ||
2475 src->u.crypt.sector_size != tgt->u.crypt.sector_size ||
2476 src->u.crypt.iv_offset != tgt->u.crypt.iv_offset ||
2477 src->u.crypt.tag_size != tgt->u.crypt.tag_size) {
2478 log_dbg(cd, "Integer parameters do not match.");
2479 return -EINVAL;
2480 }
2481
2482 if (device_is_identical(src->data_device, tgt->data_device) <= 0) {
2483 log_dbg(cd, "Data devices do not match.");
2484 return -EINVAL;
2485 }
2486
2487 return 0;
2488 }
2489
2490 static int _compare_integrity_devices(struct crypt_device *cd,
2491 const struct dm_target *src,
2492 const struct dm_target *tgt)
2493 {
2494 /*
2495 * some parameters may be implicit (and set in dm-integrity ctor)
2496 *
2497 * journal_size
2498 * journal_watermark
2499 * journal_commit_time
2500 * buffer_sectors
2501 * interleave_sectors
2502 */
2503
2504 /* check remaining integer values that makes sense */
2505 if (src->u.integrity.tag_size != tgt->u.integrity.tag_size ||
2506 src->u.integrity.offset != tgt->u.integrity.offset ||
2507 src->u.integrity.sector_size != tgt->u.integrity.sector_size) {
2508 log_dbg(cd, "Integer parameters do not match.");
2509 return -EINVAL;
2510 }
2511
2512 if (crypt_strcmp(src->u.integrity.integrity, tgt->u.integrity.integrity) ||
2513 crypt_strcmp(src->u.integrity.journal_integrity, tgt->u.integrity.journal_integrity) ||
2514 crypt_strcmp(src->u.integrity.journal_crypt, tgt->u.integrity.journal_crypt)) {
2515 log_dbg(cd, "Journal parameters do not match.");
2516 return -EINVAL;
2517 }
2518
2519 /* unfortunately dm-integrity doesn't support keyring */
2520 if (_compare_volume_keys(src->u.integrity.vk, 0, tgt->u.integrity.vk, 0) ||
2521 _compare_volume_keys(src->u.integrity.journal_integrity_key, 0, tgt->u.integrity.journal_integrity_key, 0) ||
2522 _compare_volume_keys(src->u.integrity.journal_crypt_key, 0, tgt->u.integrity.journal_crypt_key, 0)) {
2523 log_dbg(cd, "Journal keys do not match.");
2524 return -EINVAL;
2525 }
2526
2527 /* unsupported underneath dm-crypt with auth. encryption */
2528 if (src->u.integrity.meta_device || tgt->u.integrity.meta_device)
2529 return -ENOTSUP;
2530
2531 if (src->size != tgt->size) {
2532 log_dbg(cd, "Device size parameters do not match.");
2533 return -EINVAL;
2534 }
2535
2536 if (device_is_identical(src->data_device, tgt->data_device) <= 0) {
2537 log_dbg(cd, "Data devices do not match.");
2538 return -EINVAL;
2539 }
2540
2541 return 0;
2542 }
2543
2544 int crypt_compare_dm_devices(struct crypt_device *cd,
2545 const struct crypt_dm_active_device *src,
2546 const struct crypt_dm_active_device *tgt)
2547 {
2548 int r;
2549 const struct dm_target *s, *t;
2550
2551 if (!src || !tgt)
2552 return -EINVAL;
2553
2554 r = _compare_device_types(cd, src, tgt);
2555 if (r)
2556 return r;
2557
2558 s = &src->segment;
2559 t = &tgt->segment;
2560
2561 while (s || t) {
2562 if (!s || !t) {
2563 log_dbg(cd, "segments count mismatch.");
2564 return -EINVAL;
2565 }
2566 if (s->type != t->type) {
2567 log_dbg(cd, "segment type mismatch.");
2568 r = -EINVAL;
2569 break;
2570 }
2571
2572 switch (s->type) {
2573 case DM_CRYPT:
2574 r = _compare_crypt_devices(cd, s, t);
2575 break;
2576 case DM_INTEGRITY:
2577 r = _compare_integrity_devices(cd, s, t);
2578 break;
2579 case DM_LINEAR:
2580 r = (s->u.linear.offset == t->u.linear.offset) ? 0 : -EINVAL;
2581 break;
2582 default:
2583 r = -ENOTSUP;
2584 }
2585
2586 if (r)
2587 break;
2588
2589 s = s->next;
2590 t = t->next;
2591 }
2592
2593 return r;
2594 }
2595
2596 static int _reload_device(struct crypt_device *cd, const char *name,
2597 struct crypt_dm_active_device *sdmd)
2598 {
2599 int r;
2600 struct crypt_dm_active_device tdmd;
2601 struct dm_target *src, *tgt = &tdmd.segment;
2602
2603 if (!cd || !cd->type || !name || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH))
2604 return -EINVAL;
2605
2606 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2607 DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE |
2608 DM_ACTIVE_CRYPT_KEY, &tdmd);
2609 if (r < 0) {
2610 log_err(cd, _("Device %s is not active."), name);
2611 return -EINVAL;
2612 }
2613
2614 if (!single_segment(&tdmd) || tgt->type != DM_CRYPT || tgt->u.crypt.tag_size) {
2615 r = -ENOTSUP;
2616 log_err(cd, _("Unsupported parameters on device %s."), name);
2617 goto out;
2618 }
2619
2620 r = crypt_compare_dm_devices(cd, sdmd, &tdmd);
2621 if (r) {
2622 log_err(cd, _("Mismatching parameters on device %s."), name);
2623 goto out;
2624 }
2625
2626 src = &sdmd->segment;
2627
2628 /* Changing read only flag for active device makes no sense */
2629 if (tdmd.flags & CRYPT_ACTIVATE_READONLY)
2630 sdmd->flags |= CRYPT_ACTIVATE_READONLY;
2631 else
2632 sdmd->flags &= ~CRYPT_ACTIVATE_READONLY;
2633
2634 if (sdmd->flags & CRYPT_ACTIVATE_KEYRING_KEY) {
2635 r = crypt_volume_key_set_description(tgt->u.crypt.vk, src->u.crypt.vk->key_description);
2636 if (r)
2637 goto out;
2638 } else {
2639 crypt_free_volume_key(tgt->u.crypt.vk);
2640 tgt->u.crypt.vk = crypt_alloc_volume_key(src->u.crypt.vk->keylength, src->u.crypt.vk->key);
2641 if (!tgt->u.crypt.vk) {
2642 r = -ENOMEM;
2643 goto out;
2644 }
2645 }
2646
2647 r = device_block_adjust(cd, src->data_device, DEV_OK,
2648 src->u.crypt.offset, &sdmd->size, NULL);
2649 if (r)
2650 goto out;
2651
2652 tdmd.flags = sdmd->flags;
2653 tgt->size = tdmd.size = sdmd->size;
2654
2655 r = dm_reload_device(cd, name, &tdmd, 0, 1);
2656 out:
2657 dm_targets_free(cd, &tdmd);
2658 free(CONST_CAST(void*)tdmd.uuid);
2659
2660 return r;
2661 }
2662
2663 static int _reload_device_with_integrity(struct crypt_device *cd,
2664 const char *name,
2665 const char *iname,
2666 const char *ipath,
2667 struct crypt_dm_active_device *sdmd,
2668 struct crypt_dm_active_device *sdmdi)
2669 {
2670 int r;
2671 struct crypt_dm_active_device tdmd, tdmdi = {};
2672 struct dm_target *src, *srci, *tgt = &tdmd.segment, *tgti = &tdmdi.segment;
2673 struct device *data_device = NULL;
2674 bool clear = false;
2675
2676 if (!cd || !cd->type || !name || !iname || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH))
2677 return -EINVAL;
2678
2679 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2680 DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE |
2681 DM_ACTIVE_CRYPT_KEY, &tdmd);
2682 if (r < 0) {
2683 log_err(cd, _("Device %s is not active."), name);
2684 return -EINVAL;
2685 }
2686
2687 if (!single_segment(&tdmd) || tgt->type != DM_CRYPT || !tgt->u.crypt.tag_size) {
2688 log_err(cd, _("Unsupported parameters on device %s."), name);
2689 r = -ENOTSUP;
2690 goto out;
2691 }
2692
2693 r = dm_query_device(cd, iname, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &tdmdi);
2694 if (r < 0) {
2695 log_err(cd, _("Device %s is not active."), iname);
2696 r = -EINVAL;
2697 goto out;
2698 }
2699
2700 if (!single_segment(&tdmdi) || tgti->type != DM_INTEGRITY) {
2701 log_err(cd, _("Unsupported parameters on device %s."), iname);
2702 r = -ENOTSUP;
2703 goto out;
2704 }
2705
2706 r = crypt_compare_dm_devices(cd, sdmdi, &tdmdi);
2707 if (r) {
2708 log_err(cd, _("Mismatching parameters on device %s."), iname);
2709 goto out;
2710 }
2711
2712 src = &sdmd->segment;
2713 srci = &sdmdi->segment;
2714
2715 r = device_alloc(cd, &data_device, ipath);
2716 if (r < 0)
2717 goto out;
2718
2719 r = device_block_adjust(cd, srci->data_device, DEV_OK,
2720 srci->u.integrity.offset, &sdmdi->size, NULL);
2721 if (r)
2722 goto out;
2723
2724 src->data_device = data_device;
2725
2726 r = crypt_compare_dm_devices(cd, sdmd, &tdmd);
2727 if (r) {
2728 log_err(cd, _("Crypt devices mismatch."));
2729 goto out;
2730 }
2731
2732 /* Changing read only flag for active device makes no sense */
2733 if (tdmd.flags & CRYPT_ACTIVATE_READONLY)
2734 sdmd->flags |= CRYPT_ACTIVATE_READONLY;
2735 else
2736 sdmd->flags &= ~CRYPT_ACTIVATE_READONLY;
2737
2738 if (tdmdi.flags & CRYPT_ACTIVATE_READONLY)
2739 sdmdi->flags |= CRYPT_ACTIVATE_READONLY;
2740 else
2741 sdmdi->flags &= ~CRYPT_ACTIVATE_READONLY;
2742
2743 if (sdmd->flags & CRYPT_ACTIVATE_KEYRING_KEY) {
2744 r = crypt_volume_key_set_description(tgt->u.crypt.vk, src->u.crypt.vk->key_description);
2745 if (r)
2746 goto out;
2747 } else {
2748 crypt_free_volume_key(tgt->u.crypt.vk);
2749 tgt->u.crypt.vk = crypt_alloc_volume_key(src->u.crypt.vk->keylength, src->u.crypt.vk->key);
2750 if (!tgt->u.crypt.vk) {
2751 r = -ENOMEM;
2752 goto out;
2753 }
2754 }
2755
2756 r = device_block_adjust(cd, src->data_device, DEV_OK,
2757 src->u.crypt.offset, &sdmd->size, NULL);
2758 if (r)
2759 goto out;
2760
2761 tdmd.flags = sdmd->flags;
2762 tdmd.size = sdmd->size;
2763
2764 if ((r = dm_reload_device(cd, iname, sdmdi, 0, 0))) {
2765 log_err(cd, _("Failed to reload device %s."), iname);
2766 goto out;
2767 }
2768
2769 if ((r = dm_reload_device(cd, name, &tdmd, 0, 0))) {
2770 log_err(cd, _("Failed to reload device %s."), name);
2771 clear = true;
2772 goto out;
2773 }
2774
2775 if ((r = dm_suspend_device(cd, name, 0))) {
2776 log_err(cd, _("Failed to suspend device %s."), name);
2777 clear = true;
2778 goto out;
2779 }
2780
2781 if ((r = dm_suspend_device(cd, iname, 0))) {
2782 log_err(cd, _("Failed to suspend device %s."), iname);
2783 clear = true;
2784 goto out;
2785 }
2786
2787 if ((r = dm_resume_device(cd, iname, act2dmflags(sdmdi->flags)))) {
2788 log_err(cd, _("Failed to resume device %s."), iname);
2789 clear = true;
2790 goto out;
2791 }
2792
2793 r = dm_resume_device(cd, name, act2dmflags(tdmd.flags));
2794 if (!r)
2795 goto out;
2796
2797 /*
2798 * This is worst case scenario. We have active underlying dm-integrity device with
2799 * new table but dm-crypt resume failed for some reason. Tear everything down and
2800 * burn it for good.
2801 */
2802
2803 log_err(cd, _("Fatal error while reloading device %s (on top of device %s)."), name, iname);
2804
2805 if (dm_error_device(cd, name))
2806 log_err(cd, _("Failed to switch device %s to dm-error."), name);
2807 if (dm_error_device(cd, iname))
2808 log_err(cd, _("Failed to switch device %s to dm-error."), iname);
2809 out:
2810 if (clear) {
2811 dm_clear_device(cd, name);
2812 dm_clear_device(cd, iname);
2813
2814 if (dm_status_suspended(cd, name) > 0)
2815 dm_resume_device(cd, name, 0);
2816 if (dm_status_suspended(cd, iname) > 0)
2817 dm_resume_device(cd, iname, 0);
2818 }
2819
2820 dm_targets_free(cd, &tdmd);
2821 dm_targets_free(cd, &tdmdi);
2822 free(CONST_CAST(void*)tdmdi.uuid);
2823 free(CONST_CAST(void*)tdmd.uuid);
2824 device_free(cd, data_device);
2825
2826 return r;
2827 }
2828
2829 int crypt_resize(struct crypt_device *cd, const char *name, uint64_t new_size)
2830 {
2831 struct crypt_dm_active_device dmdq, dmd = {};
2832 struct dm_target *tgt = &dmdq.segment;
2833 int r;
2834
2835 /*
2836 * FIXME: Also with LUKS2 we must not allow resize when there's
2837 * explicit size stored in metadata (length != "dynamic")
2838 */
2839
2840 /* Device context type must be initialized */
2841 if (!cd || !cd->type || !name)
2842 return -EINVAL;
2843
2844 if (isTCRYPT(cd->type) || isBITLK(cd->type)) {
2845 log_err(cd, _("This operation is not supported for this device type."));
2846 return -ENOTSUP;
2847 }
2848
2849 log_dbg(cd, "Resizing device %s to %" PRIu64 " sectors.", name, new_size);
2850
2851 r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmdq);
2852 if (r < 0) {
2853 log_err(cd, _("Device %s is not active."), name);
2854 return -EINVAL;
2855 }
2856 if (!single_segment(&dmdq) || tgt->type != DM_CRYPT) {
2857 log_dbg(cd, "Unsupported device table detected in %s.", name);
2858 r = -EINVAL;
2859 goto out;
2860 }
2861
2862 if ((dmdq.flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_key_in_keyring(cd)) {
2863 r = -EPERM;
2864 goto out;
2865 }
2866
2867 if (crypt_key_in_keyring(cd)) {
2868 if (!isLUKS2(cd->type)) {
2869 r = -EINVAL;
2870 goto out;
2871 }
2872 r = LUKS2_key_description_by_segment(cd, &cd->u.luks2.hdr,
2873 tgt->u.crypt.vk, CRYPT_DEFAULT_SEGMENT);
2874 if (r)
2875 goto out;
2876
2877 dmdq.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
2878 }
2879
2880 if (crypt_loop_device(crypt_get_device_name(cd))) {
2881 log_dbg(cd, "Trying to resize underlying loop device %s.",
2882 crypt_get_device_name(cd));
2883 /* Here we always use default size not new_size */
2884 if (crypt_loop_resize(crypt_get_device_name(cd)))
2885 log_err(cd, _("Cannot resize loop device."));
2886 }
2887
2888 r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
2889 crypt_get_data_offset(cd), &new_size, &dmdq.flags);
2890 if (r)
2891 goto out;
2892
2893 if (MISALIGNED(new_size, tgt->u.crypt.sector_size >> SECTOR_SHIFT)) {
2894 log_err(cd, _("Device size is not aligned to requested sector size."));
2895 r = -EINVAL;
2896 goto out;
2897 }
2898
2899 if (MISALIGNED(new_size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) {
2900 log_err(cd, _("Device size is not aligned to device logical block size."));
2901 r = -EINVAL;
2902 goto out;
2903 }
2904
2905 dmd.uuid = crypt_get_uuid(cd);
2906 dmd.size = new_size;
2907 dmd.flags = dmdq.flags | CRYPT_ACTIVATE_REFRESH;
2908 r = dm_crypt_target_set(&dmd.segment, 0, new_size, crypt_data_device(cd),
2909 tgt->u.crypt.vk, crypt_get_cipher_spec(cd),
2910 crypt_get_iv_offset(cd), crypt_get_data_offset(cd),
2911 crypt_get_integrity(cd), crypt_get_integrity_tag_size(cd),
2912 crypt_get_sector_size(cd));
2913 if (r < 0)
2914 goto out;
2915
2916 if (new_size == dmdq.size) {
2917 log_dbg(cd, "Device has already requested size %" PRIu64
2918 " sectors.", dmdq.size);
2919 r = 0;
2920 } else {
2921 if (isTCRYPT(cd->type))
2922 r = -ENOTSUP;
2923 else if (isLUKS2(cd->type))
2924 r = LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, 0);
2925 if (!r)
2926 r = _reload_device(cd, name, &dmd);
2927 }
2928 out:
2929 dm_targets_free(cd, &dmd);
2930 dm_targets_free(cd, &dmdq);
2931
2932 return r;
2933 }
2934
2935 int crypt_set_uuid(struct crypt_device *cd, const char *uuid)
2936 {
2937 const char *active_uuid;
2938 int r;
2939
2940 log_dbg(cd, "%s device uuid.", uuid ? "Setting new" : "Refreshing");
2941
2942 if ((r = onlyLUKS(cd)))
2943 return r;
2944
2945 active_uuid = crypt_get_uuid(cd);
2946
2947 if (uuid && active_uuid && !strncmp(uuid, active_uuid, UUID_STRING_L)) {
2948 log_dbg(cd, "UUID is the same as requested (%s) for device %s.",
2949 uuid, mdata_device_path(cd));
2950 return 0;
2951 }
2952
2953 if (uuid)
2954 log_dbg(cd, "Requested new UUID change to %s for %s.", uuid, mdata_device_path(cd));
2955 else
2956 log_dbg(cd, "Requested new UUID refresh for %s.", mdata_device_path(cd));
2957
2958 if (!crypt_confirm(cd, _("Do you really want to change UUID of device?")))
2959 return -EPERM;
2960
2961 if (isLUKS1(cd->type))
2962 return LUKS_hdr_uuid_set(&cd->u.luks1.hdr, uuid, cd);
2963 else
2964 return LUKS2_hdr_uuid(cd, &cd->u.luks2.hdr, uuid);
2965 }
2966
2967 int crypt_set_label(struct crypt_device *cd, const char *label, const char *subsystem)
2968 {
2969 int r;
2970
2971 log_dbg(cd, "Setting new labels.");
2972
2973 if ((r = onlyLUKS2(cd)))
2974 return r;
2975
2976 return LUKS2_hdr_labels(cd, &cd->u.luks2.hdr, label, subsystem, 1);
2977 }
2978
2979 int crypt_header_backup(struct crypt_device *cd,
2980 const char *requested_type,
2981 const char *backup_file)
2982 {
2983 int r;
2984
2985 if (requested_type && !isLUKS(requested_type))
2986 return -EINVAL;
2987
2988 if (!backup_file)
2989 return -EINVAL;
2990
2991 /* Load with repair */
2992 r = _crypt_load_luks(cd, requested_type, 1, 0);
2993 if (r < 0)
2994 return r;
2995
2996 log_dbg(cd, "Requested header backup of device %s (%s) to "
2997 "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file);
2998
2999 if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type)))
3000 r = LUKS_hdr_backup(backup_file, cd);
3001 else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type)))
3002 r = LUKS2_hdr_backup(cd, &cd->u.luks2.hdr, backup_file);
3003 else
3004 r = -EINVAL;
3005
3006 return r;
3007 }
3008
3009 int crypt_header_restore(struct crypt_device *cd,
3010 const char *requested_type,
3011 const char *backup_file)
3012 {
3013 struct luks_phdr hdr1;
3014 struct luks2_hdr hdr2;
3015 int r, version;
3016
3017 if (requested_type && !isLUKS(requested_type))
3018 return -EINVAL;
3019
3020 if (!cd || (cd->type && !isLUKS(cd->type)) || !backup_file)
3021 return -EINVAL;
3022
3023 r = init_crypto(cd);
3024 if (r < 0)
3025 return r;
3026
3027 log_dbg(cd, "Requested header restore to device %s (%s) from "
3028 "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file);
3029
3030 version = LUKS2_hdr_version_unlocked(cd, backup_file);
3031 if (!version ||
3032 (requested_type && version == 1 && !isLUKS1(requested_type)) ||
3033 (requested_type && version == 2 && !isLUKS2(requested_type))) {
3034 log_err(cd, _("Header backup file does not contain compatible LUKS header."));
3035 return -EINVAL;
3036 }
3037
3038 memset(&hdr2, 0, sizeof(hdr2));
3039
3040 if (!cd->type) {
3041 if (version == 1)
3042 r = LUKS_hdr_restore(backup_file, &hdr1, cd);
3043 else
3044 r = LUKS2_hdr_restore(cd, &hdr2, backup_file);
3045
3046 crypt_safe_memzero(&hdr1, sizeof(hdr1));
3047 crypt_safe_memzero(&hdr2, sizeof(hdr2));
3048 } else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type))) {
3049 r = LUKS2_hdr_restore(cd, &cd->u.luks2.hdr, backup_file);
3050 if (r)
3051 _luks2_reload(cd);
3052 } else if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type)))
3053 r = LUKS_hdr_restore(backup_file, &cd->u.luks1.hdr, cd);
3054 else
3055 r = -EINVAL;
3056
3057 if (!r)
3058 r = _crypt_load_luks(cd, version == 1 ? CRYPT_LUKS1 : CRYPT_LUKS2, 1, 1);
3059
3060 return r;
3061 }
3062
3063 int crypt_header_is_detached(struct crypt_device *cd)
3064 {
3065 int r;
3066
3067 if (!cd || !isLUKS(cd->type))
3068 return -EINVAL;
3069
3070 r = device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd));
3071 if (r < 0) {
3072 log_dbg(cd, "Failed to compare data and metadata devices path.");
3073 return r;
3074 }
3075
3076 return r ? 0 : 1;
3077 }
3078
3079 void crypt_free(struct crypt_device *cd)
3080 {
3081 if (!cd)
3082 return;
3083
3084 log_dbg(cd, "Releasing crypt device %s context.", mdata_device_path(cd));
3085
3086 dm_backend_exit(cd);
3087 crypt_free_volume_key(cd->volume_key);
3088
3089 crypt_free_type(cd);
3090
3091 device_free(cd, cd->device);
3092 device_free(cd, cd->metadata_device);
3093
3094 free(CONST_CAST(void*)cd->pbkdf.type);
3095 free(CONST_CAST(void*)cd->pbkdf.hash);
3096
3097 /* Some structures can contain keys (TCRYPT), wipe it */
3098 crypt_safe_memzero(cd, sizeof(*cd));
3099 free(cd);
3100 }
3101
3102 static char *crypt_get_device_key_description(struct crypt_device *cd, const char *name)
3103 {
3104 char *desc = NULL;
3105 struct crypt_dm_active_device dmd;
3106 struct dm_target *tgt = &dmd.segment;
3107
3108 if (dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, &dmd) < 0)
3109 return NULL;
3110
3111 if (single_segment(&dmd) && tgt->type == DM_CRYPT &&
3112 (dmd.flags & CRYPT_ACTIVATE_KEYRING_KEY) && tgt->u.crypt.vk->key_description)
3113 desc = strdup(tgt->u.crypt.vk->key_description);
3114
3115 dm_targets_free(cd, &dmd);
3116
3117 return desc;
3118 }
3119
3120 int crypt_suspend(struct crypt_device *cd,
3121 const char *name)
3122 {
3123 char *key_desc;
3124 crypt_status_info ci;
3125 int r;
3126 uint32_t dmflags = DM_SUSPEND_WIPE_KEY;
3127
3128 /* FIXME: check context uuid matches the dm-crypt device uuid (onlyLUKS branching) */
3129
3130 if (!cd || !name)
3131 return -EINVAL;
3132
3133 log_dbg(cd, "Suspending volume %s.", name);
3134
3135 if (cd->type)
3136 r = onlyLUKS(cd);
3137 else {
3138 r = crypt_uuid_type_cmp(cd, CRYPT_LUKS1);
3139 if (r < 0)
3140 r = crypt_uuid_type_cmp(cd, CRYPT_LUKS2);
3141 if (r < 0)
3142 log_err(cd, _("This operation is supported only for LUKS device."));
3143 }
3144
3145 if (r < 0)
3146 return r;
3147
3148 ci = crypt_status(NULL, name);
3149 if (ci < CRYPT_ACTIVE) {
3150 log_err(cd, _("Volume %s is not active."), name);
3151 return -EINVAL;
3152 }
3153
3154 dm_backend_init(cd);
3155
3156 r = dm_status_suspended(cd, name);
3157 if (r < 0)
3158 goto out;
3159
3160 if (r) {
3161 log_err(cd, _("Volume %s is already suspended."), name);
3162 r = -EINVAL;
3163 goto out;
3164 }
3165
3166 key_desc = crypt_get_device_key_description(cd, name);
3167
3168 /* we can't simply wipe wrapped keys */
3169 if (crypt_cipher_wrapped_key(crypt_get_cipher(cd), crypt_get_cipher_mode(cd)))
3170 dmflags &= ~DM_SUSPEND_WIPE_KEY;
3171
3172 r = dm_suspend_device(cd, name, dmflags);
3173 if (r == -ENOTSUP)
3174 log_err(cd, _("Suspend is not supported for device %s."), name);
3175 else if (r)
3176 log_err(cd, _("Error during suspending device %s."), name);
3177 else
3178 crypt_drop_keyring_key_by_description(cd, key_desc, LOGON_KEY);
3179 free(key_desc);
3180 out:
3181 dm_backend_exit(cd);
3182 return r;
3183 }
3184
3185 /* key must be properly verified */
3186 static int resume_by_volume_key(struct crypt_device *cd,
3187 struct volume_key *vk,
3188 const char *name)
3189 {
3190 int digest, r;
3191 struct volume_key *zerokey = NULL;
3192
3193 if (crypt_is_cipher_null(crypt_get_cipher_spec(cd))) {
3194 zerokey = crypt_alloc_volume_key(0, NULL);
3195 if (!zerokey)
3196 return -ENOMEM;
3197 vk = zerokey;
3198 } else if (crypt_use_keyring_for_vk(cd)) {
3199 /* LUKS2 path only */
3200 digest = LUKS2_digest_by_segment(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
3201 if (digest < 0)
3202 return -EINVAL;
3203 r = LUKS2_volume_key_load_in_keyring_by_digest(cd,
3204 &cd->u.luks2.hdr, vk, digest);
3205 if (r < 0)
3206 return r;
3207 }
3208
3209 r = dm_resume_and_reinstate_key(cd, name, vk);
3210
3211 if (r == -ENOTSUP)
3212 log_err(cd, _("Resume is not supported for device %s."), name);
3213 else if (r)
3214 log_err(cd, _("Error during resuming device %s."), name);
3215
3216 if (r < 0)
3217 crypt_drop_keyring_key(cd, vk);
3218
3219 crypt_free_volume_key(zerokey);
3220
3221 return r;
3222 }
3223
3224 int crypt_resume_by_passphrase(struct crypt_device *cd,
3225 const char *name,
3226 int keyslot,
3227 const char *passphrase,
3228 size_t passphrase_size)
3229 {
3230 struct volume_key *vk = NULL;
3231 int r;
3232
3233 /* FIXME: check context uuid matches the dm-crypt device uuid */
3234
3235 if (!passphrase || !name)
3236 return -EINVAL;
3237
3238 log_dbg(cd, "Resuming volume %s.", name);
3239
3240 if ((r = onlyLUKS(cd)))
3241 return r;
3242
3243 r = dm_status_suspended(cd, name);
3244 if (r < 0)
3245 return r;
3246
3247 if (!r) {
3248 log_err(cd, _("Volume %s is not suspended."), name);
3249 return -EINVAL;
3250 }
3251
3252 if (isLUKS1(cd->type))
3253 r = LUKS_open_key_with_hdr(keyslot, passphrase, passphrase_size,
3254 &cd->u.luks1.hdr, &vk, cd);
3255 else
3256 r = LUKS2_keyslot_open(cd, keyslot, CRYPT_DEFAULT_SEGMENT, passphrase, passphrase_size, &vk);
3257
3258 if (r < 0)
3259 return r;
3260
3261 keyslot = r;
3262
3263 r = resume_by_volume_key(cd, vk, name);
3264
3265 crypt_free_volume_key(vk);
3266 return r < 0 ? r : keyslot;
3267 }
3268
3269 int crypt_resume_by_keyfile_device_offset(struct crypt_device *cd,
3270 const char *name,
3271 int keyslot,
3272 const char *keyfile,
3273 size_t keyfile_size,
3274 uint64_t keyfile_offset)
3275 {
3276 struct volume_key *vk = NULL;
3277 char *passphrase_read = NULL;
3278 size_t passphrase_size_read;
3279 int r;
3280
3281 /* FIXME: check context uuid matches the dm-crypt device uuid */
3282
3283 if (!name || !keyfile)
3284 return -EINVAL;
3285
3286 log_dbg(cd, "Resuming volume %s.", name);
3287
3288 if ((r = onlyLUKS(cd)))
3289 return r;
3290
3291 r = dm_status_suspended(cd, name);
3292 if (r < 0)
3293 return r;
3294
3295 if (!r) {
3296 log_err(cd, _("Volume %s is not suspended."), name);
3297 return -EINVAL;
3298 }
3299
3300 r = crypt_keyfile_device_read(cd, keyfile,
3301 &passphrase_read, &passphrase_size_read,
3302 keyfile_offset, keyfile_size, 0);
3303 if (r < 0)
3304 return r;
3305
3306 if (isLUKS1(cd->type))
3307 r = LUKS_open_key_with_hdr(keyslot, passphrase_read, passphrase_size_read,
3308 &cd->u.luks1.hdr, &vk, cd);
3309 else
3310 r = LUKS2_keyslot_open(cd, keyslot, CRYPT_DEFAULT_SEGMENT, passphrase_read, passphrase_size_read, &vk);
3311
3312 crypt_safe_free(passphrase_read);
3313 if (r < 0)
3314 return r;
3315
3316 keyslot = r;
3317
3318 r = resume_by_volume_key(cd, vk, name);
3319
3320 crypt_free_volume_key(vk);
3321 return r < 0 ? r : keyslot;
3322 }
3323
3324 int crypt_resume_by_keyfile(struct crypt_device *cd,
3325 const char *name,
3326 int keyslot,
3327 const char *keyfile,
3328 size_t keyfile_size)
3329 {
3330 return crypt_resume_by_keyfile_device_offset(cd, name, keyslot,
3331 keyfile, keyfile_size, 0);
3332 }
3333
3334 int crypt_resume_by_keyfile_offset(struct crypt_device *cd,
3335 const char *name,
3336 int keyslot,
3337 const char *keyfile,
3338 size_t keyfile_size,
3339 size_t keyfile_offset)
3340 {
3341 return crypt_resume_by_keyfile_device_offset(cd, name, keyslot,
3342 keyfile, keyfile_size, keyfile_offset);
3343 }
3344
3345 int crypt_resume_by_volume_key(struct crypt_device *cd,
3346 const char *name,
3347 const char *volume_key,
3348 size_t volume_key_size)
3349 {
3350 struct volume_key *vk = NULL;
3351 int r;
3352
3353 if (!name || !volume_key)
3354 return -EINVAL;
3355
3356 log_dbg(cd, "Resuming volume %s by volume key.", name);
3357
3358 if ((r = onlyLUKS(cd)))
3359 return r;
3360
3361 r = dm_status_suspended(cd, name);
3362 if (r < 0)
3363 return r;
3364
3365 if (!r) {
3366 log_err(cd, _("Volume %s is not suspended."), name);
3367 return -EINVAL;
3368 }
3369
3370 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
3371 if (!vk)
3372 return -ENOMEM;
3373
3374 if (isLUKS1(cd->type))
3375 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
3376 else if (isLUKS2(cd->type))
3377 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
3378 else
3379 r = -EINVAL;
3380 if (r == -EPERM || r == -ENOENT)
3381 log_err(cd, _("Volume key does not match the volume."));
3382
3383 if (r >= 0)
3384 r = resume_by_volume_key(cd, vk, name);
3385
3386 crypt_free_volume_key(vk);
3387 return r;
3388 }
3389
3390 /*
3391 * Keyslot manipulation
3392 */
3393 int crypt_keyslot_add_by_passphrase(struct crypt_device *cd,
3394 int keyslot, // -1 any
3395 const char *passphrase,
3396 size_t passphrase_size,
3397 const char *new_passphrase,
3398 size_t new_passphrase_size)
3399 {
3400 int digest, r, active_slots;
3401 struct luks2_keyslot_params params;
3402 struct volume_key *vk = NULL;
3403
3404 log_dbg(cd, "Adding new keyslot, existing passphrase %sprovided,"
3405 "new passphrase %sprovided.",
3406 passphrase ? "" : "not ", new_passphrase ? "" : "not ");
3407
3408 if ((r = onlyLUKS(cd)))
3409 return r;
3410
3411 if (!passphrase || !new_passphrase)
3412 return -EINVAL;
3413
3414 r = keyslot_verify_or_find_empty(cd, &keyslot);
3415 if (r)
3416 return r;
3417
3418 if (isLUKS1(cd->type))
3419 active_slots = LUKS_keyslot_active_count(&cd->u.luks1.hdr);
3420 else
3421 active_slots = LUKS2_keyslot_active_count(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
3422 if (active_slots == 0) {
3423 /* No slots used, try to use pre-generated key in header */
3424 if (cd->volume_key) {
3425 vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
3426 r = vk ? 0 : -ENOMEM;
3427 } else {
3428 log_err(cd, _("Cannot add key slot, all slots disabled and no volume key provided."));
3429 return -EINVAL;
3430 }
3431 } else if (active_slots < 0)
3432 return -EINVAL;
3433 else {
3434 /* Passphrase provided, use it to unlock existing keyslot */
3435 if (isLUKS1(cd->type))
3436 r = LUKS_open_key_with_hdr(CRYPT_ANY_SLOT, passphrase,
3437 passphrase_size, &cd->u.luks1.hdr, &vk, cd);
3438 else
3439 r = LUKS2_keyslot_open(cd, CRYPT_ANY_SLOT, CRYPT_DEFAULT_SEGMENT, passphrase,
3440 passphrase_size, &vk);
3441 }
3442
3443 if (r < 0)
3444 goto out;
3445
3446 if (isLUKS1(cd->type))
3447 r = LUKS_set_key(keyslot, CONST_CAST(char*)new_passphrase,
3448 new_passphrase_size, &cd->u.luks1.hdr, vk, cd);
3449 else {
3450 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
3451 digest = r;
3452
3453 if (r >= 0)
3454 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
3455
3456 if (r >= 0)
3457 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot, digest, 1, 0);
3458
3459 if (r >= 0)
3460 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, keyslot,
3461 CONST_CAST(char*)new_passphrase,
3462 new_passphrase_size, vk, ¶ms);
3463 }
3464
3465 if (r < 0)
3466 goto out;
3467
3468 r = 0;
3469 out:
3470 crypt_free_volume_key(vk);
3471 if (r < 0) {
3472 _luks2_reload(cd);
3473 return r;
3474 }
3475 return keyslot;
3476 }
3477
3478 int crypt_keyslot_change_by_passphrase(struct crypt_device *cd,
3479 int keyslot_old,
3480 int keyslot_new,
3481 const char *passphrase,
3482 size_t passphrase_size,
3483 const char *new_passphrase,
3484 size_t new_passphrase_size)
3485 {
3486 int digest = -1, r, keyslot_new_orig = keyslot_new;
3487 struct luks2_keyslot_params params;
3488 struct volume_key *vk = NULL;
3489
3490 if (!passphrase || !new_passphrase)
3491 return -EINVAL;
3492
3493 log_dbg(cd, "Changing passphrase from old keyslot %d to new %d.",
3494 keyslot_old, keyslot_new);
3495
3496 if ((r = onlyLUKS(cd)))
3497 return r;
3498
3499 if (isLUKS1(cd->type))
3500 r = LUKS_open_key_with_hdr(keyslot_old, passphrase, passphrase_size,
3501 &cd->u.luks1.hdr, &vk, cd);
3502 else if (isLUKS2(cd->type)) {
3503 r = LUKS2_keyslot_open(cd, keyslot_old, CRYPT_ANY_SEGMENT, passphrase, passphrase_size, &vk);
3504 /* will fail for keyslots w/o digest. fix if supported in a future */
3505 if (r >= 0) {
3506 digest = LUKS2_digest_by_keyslot(&cd->u.luks2.hdr, r);
3507 if (digest < 0)
3508 r = -EINVAL;
3509 }
3510 } else
3511 r = -EINVAL;
3512 if (r < 0)
3513 goto out;
3514
3515 if (keyslot_old != CRYPT_ANY_SLOT && keyslot_old != r) {
3516 log_dbg(cd, "Keyslot mismatch.");
3517 goto out;
3518 }
3519 keyslot_old = r;
3520
3521 if (keyslot_new == CRYPT_ANY_SLOT) {
3522 if (isLUKS1(cd->type))
3523 keyslot_new = LUKS_keyslot_find_empty(&cd->u.luks1.hdr);
3524 else if (isLUKS2(cd->type))
3525 keyslot_new = LUKS2_keyslot_find_empty(cd, &cd->u.luks2.hdr, vk->keylength);
3526 if (keyslot_new < 0)
3527 keyslot_new = keyslot_old;
3528 }
3529 log_dbg(cd, "Key change, old slot %d, new slot %d.", keyslot_old, keyslot_new);
3530
3531 if (isLUKS1(cd->type)) {
3532 if (keyslot_old == keyslot_new) {
3533 log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old);
3534 (void)crypt_keyslot_destroy(cd, keyslot_old);
3535 }
3536 r = LUKS_set_key(keyslot_new, new_passphrase, new_passphrase_size,
3537 &cd->u.luks1.hdr, vk, cd);
3538 } else if (isLUKS2(cd->type)) {
3539 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
3540 if (r)
3541 goto out;
3542
3543 if (keyslot_old != keyslot_new) {
3544 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot_new, digest, 1, 0);
3545 if (r < 0)
3546 goto out;
3547 r = LUKS2_token_assignment_copy(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new, 0);
3548 if (r < 0)
3549 goto out;
3550 } else {
3551 log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old);
3552 /* FIXME: improve return code so that we can detect area is damaged */
3553 r = LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot_old, 1);
3554 if (r) {
3555 /* (void)crypt_keyslot_destroy(cd, keyslot_old); */
3556 r = -EINVAL;
3557 goto out;
3558 }
3559 }
3560
3561 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr,
3562 keyslot_new, new_passphrase,
3563 new_passphrase_size, vk, ¶ms);
3564 if (r < 0)
3565 goto out;
3566
3567 /* Swap old & new so the final keyslot number remains */
3568 if (keyslot_new_orig == CRYPT_ANY_SLOT && keyslot_old != keyslot_new) {
3569 r = LUKS2_keyslot_swap(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new);
3570 if (r < 0)
3571 goto out;
3572
3573 /* Swap slot id */
3574 r = keyslot_old;
3575 keyslot_old = keyslot_new;
3576 keyslot_new = r;
3577 }
3578 } else
3579 r = -EINVAL;
3580
3581 if (r >= 0 && keyslot_old != keyslot_new)
3582 r = crypt_keyslot_destroy(cd, keyslot_old);
3583
3584 if (r < 0)
3585 log_err(cd, _("Failed to swap new key slot."));
3586 out:
3587 crypt_free_volume_key(vk);
3588 if (r < 0) {
3589 _luks2_reload(cd);
3590 return r;
3591 }
3592 return keyslot_new;
3593 }
3594
3595 int crypt_keyslot_add_by_keyfile_device_offset(struct crypt_device *cd,
3596 int keyslot,
3597 const char *keyfile,
3598 size_t keyfile_size,
3599 uint64_t keyfile_offset,
3600 const char *new_keyfile,
3601 size_t new_keyfile_size,
3602 uint64_t new_keyfile_offset)
3603 {
3604 int digest, r, active_slots;
3605 size_t passwordLen, new_passwordLen;
3606 struct luks2_keyslot_params params;
3607 char *password = NULL, *new_password = NULL;
3608 struct volume_key *vk = NULL;
3609
3610 if (!keyfile || !new_keyfile)
3611 return -EINVAL;
3612
3613 log_dbg(cd, "Adding new keyslot, existing keyfile %s, new keyfile %s.",
3614 keyfile, new_keyfile);
3615
3616 if ((r = onlyLUKS(cd)))
3617 return r;
3618
3619 r = keyslot_verify_or_find_empty(cd, &keyslot);
3620 if (r)
3621 return r;
3622
3623 if (isLUKS1(cd->type))
3624 active_slots = LUKS_keyslot_active_count(&cd->u.luks1.hdr);
3625 else
3626 active_slots = LUKS2_keyslot_active_count(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
3627 if (active_slots == 0) {
3628 /* No slots used, try to use pre-generated key in header */
3629 if (cd->volume_key) {
3630 vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
3631 r = vk ? 0 : -ENOMEM;
3632 } else {
3633 log_err(cd, _("Cannot add key slot, all slots disabled and no volume key provided."));
3634 return -EINVAL;
3635 }
3636 } else {
3637 r = crypt_keyfile_device_read(cd, keyfile,
3638 &password, &passwordLen,
3639 keyfile_offset, keyfile_size, 0);
3640 if (r < 0)
3641 goto out;
3642
3643 if (isLUKS1(cd->type))
3644 r = LUKS_open_key_with_hdr(CRYPT_ANY_SLOT, password, passwordLen,
3645 &cd->u.luks1.hdr, &vk, cd);
3646 else
3647 r = LUKS2_keyslot_open(cd, CRYPT_ANY_SLOT, CRYPT_DEFAULT_SEGMENT, password, passwordLen, &vk);
3648 }
3649
3650 if (r < 0)
3651 goto out;
3652
3653 r = crypt_keyfile_device_read(cd, new_keyfile,
3654 &new_password, &new_passwordLen,
3655 new_keyfile_offset, new_keyfile_size, 0);
3656 if (r < 0)
3657 goto out;
3658
3659 if (isLUKS1(cd->type))
3660 r = LUKS_set_key(keyslot, new_password, new_passwordLen,
3661 &cd->u.luks1.hdr, vk, cd);
3662 else {
3663 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
3664 digest = r;
3665
3666 if (r >= 0)
3667 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
3668
3669 if (r >= 0)
3670 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot, digest, 1, 0);
3671
3672 if (r >= 0)
3673 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, keyslot,
3674 new_password, new_passwordLen, vk, ¶ms);
3675 }
3676 out:
3677 crypt_safe_free(password);
3678 crypt_safe_free(new_password);
3679 crypt_free_volume_key(vk);
3680 if (r < 0) {
3681 _luks2_reload(cd);
3682 return r;
3683 }
3684 return keyslot;
3685 }
3686
3687 int crypt_keyslot_add_by_keyfile(struct crypt_device *cd,
3688 int keyslot,
3689 const char *keyfile,
3690 size_t keyfile_size,
3691 const char *new_keyfile,
3692 size_t new_keyfile_size)
3693 {
3694 return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot,
3695 keyfile, keyfile_size, 0,
3696 new_keyfile, new_keyfile_size, 0);
3697 }
3698
3699 int crypt_keyslot_add_by_keyfile_offset(struct crypt_device *cd,
3700 int keyslot,
3701 const char *keyfile,
3702 size_t keyfile_size,
3703 size_t keyfile_offset,
3704 const char *new_keyfile,
3705 size_t new_keyfile_size,
3706 size_t new_keyfile_offset)
3707 {
3708 return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot,
3709 keyfile, keyfile_size, keyfile_offset,
3710 new_keyfile, new_keyfile_size, new_keyfile_offset);
3711 }
3712
3713 int crypt_keyslot_add_by_volume_key(struct crypt_device *cd,
3714 int keyslot,
3715 const char *volume_key,
3716 size_t volume_key_size,
3717 const char *passphrase,
3718 size_t passphrase_size)
3719 {
3720 struct volume_key *vk = NULL;
3721 int r;
3722
3723 if (!passphrase)
3724 return -EINVAL;
3725
3726 log_dbg(cd, "Adding new keyslot %d using volume key.", keyslot);
3727
3728 if ((r = onlyLUKS(cd)))
3729 return r;
3730
3731 if (isLUKS2(cd->type))
3732 return crypt_keyslot_add_by_key(cd, keyslot,
3733 volume_key, volume_key_size, passphrase,
3734 passphrase_size, 0);
3735
3736 r = keyslot_verify_or_find_empty(cd, &keyslot);
3737 if (r < 0)
3738 return r;
3739
3740 if (volume_key)
3741 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
3742 else if (cd->volume_key)
3743 vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
3744
3745 if (!vk)
3746 return -ENOMEM;
3747
3748 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
3749 if (r < 0)
3750 log_err(cd, _("Volume key does not match the volume."));
3751 else
3752 r = LUKS_set_key(keyslot, passphrase, passphrase_size,
3753 &cd->u.luks1.hdr, vk, cd);
3754
3755 crypt_free_volume_key(vk);
3756 return (r < 0) ? r : keyslot;
3757 }
3758
3759 int crypt_keyslot_destroy(struct crypt_device *cd, int keyslot)
3760 {
3761 crypt_keyslot_info ki;
3762 int r;
3763
3764 log_dbg(cd, "Destroying keyslot %d.", keyslot);
3765
3766 if ((r = _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED)))
3767 return r;
3768
3769 ki = crypt_keyslot_status(cd, keyslot);
3770 if (ki == CRYPT_SLOT_INVALID) {
3771 log_err(cd, _("Key slot %d is invalid."), keyslot);
3772 return -EINVAL;
3773 }
3774
3775 if (isLUKS1(cd->type)) {
3776 if (ki == CRYPT_SLOT_INACTIVE) {
3777 log_err(cd, _("Keyslot %d is not active."), keyslot);
3778 return -EINVAL;
3779 }
3780 return LUKS_del_key(keyslot, &cd->u.luks1.hdr, cd);
3781 }
3782
3783 return LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot, 0);
3784 }
3785
3786 static int _check_header_data_overlap(struct crypt_device *cd, const char *name)
3787 {
3788 if (!name || !isLUKS(cd->type))
3789 return 0;
3790
3791 if (device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd)) <= 0)
3792 return 0;
3793
3794 /* FIXME: check real header size */
3795 if (crypt_get_data_offset(cd) == 0) {
3796 log_err(cd, _("Device header overlaps with data area."));
3797 return -EINVAL;
3798 }
3799
3800 return 0;
3801 }
3802
3803 static int check_devices(struct crypt_device *cd, const char *name, const char *iname, uint32_t *flags)
3804 {
3805 int r;
3806
3807 if (!flags || !name)
3808 return -EINVAL;
3809
3810 if (iname) {
3811 r = dm_status_device(cd, iname);
3812 if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH))
3813 return -EBUSY;
3814 if (r < 0 && r != -ENODEV)
3815 return r;
3816 if (r == -ENODEV)
3817 *flags &= ~CRYPT_ACTIVATE_REFRESH;
3818 }
3819
3820 r = dm_status_device(cd, name);
3821 if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH))
3822 return -EBUSY;
3823 if (r < 0 && r != -ENODEV)
3824 return r;
3825 if (r == -ENODEV)
3826 *flags &= ~CRYPT_ACTIVATE_REFRESH;
3827
3828 return 0;
3829 }
3830
3831 static int _create_device_with_integrity(struct crypt_device *cd,
3832 const char *type, const char *name, const char *iname,
3833 const char *ipath, struct crypt_dm_active_device *dmd,
3834 struct crypt_dm_active_device *dmdi)
3835 {
3836 int r;
3837 enum devcheck device_check;
3838 struct dm_target *tgt;
3839 struct device *device = NULL;
3840
3841 if (!single_segment(dmd))
3842 return -EINVAL;
3843
3844 tgt = &dmd->segment;
3845 if (tgt->type != DM_CRYPT)
3846 return -EINVAL;
3847
3848 device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL;
3849
3850 r = INTEGRITY_activate_dmd_device(cd, iname, CRYPT_INTEGRITY, dmdi, 0);
3851 if (r)
3852 return r;
3853
3854 r = device_alloc(cd, &device, ipath);
3855 if (r < 0)
3856 goto out;
3857 tgt->data_device = device;
3858
3859 r = device_block_adjust(cd, tgt->data_device, device_check,
3860 tgt->u.crypt.offset, &dmd->size, &dmd->flags);
3861
3862 if (!r)
3863 r = dm_create_device(cd, name, type, dmd);
3864 out:
3865 if (r < 0)
3866 dm_remove_device(cd, iname, 0);
3867
3868 device_free(cd, device);
3869 return r;
3870 }
3871
3872 static int kernel_keyring_support(void)
3873 {
3874 static unsigned _checked = 0;
3875
3876 if (!_checked) {
3877 _kernel_keyring_supported = keyring_check();
3878 _checked = 1;
3879 }
3880
3881 return _kernel_keyring_supported;
3882 }
3883
3884 static int dmcrypt_keyring_bug(void)
3885 {
3886 uint64_t kversion;
3887
3888 if (kernel_version(&kversion))
3889 return 1;
3890 return kversion < compact_version(4,15,0,0);
3891 }
3892
3893 int create_or_reload_device(struct crypt_device *cd, const char *name,
3894 const char *type, struct crypt_dm_active_device *dmd)
3895 {
3896 int r;
3897 enum devcheck device_check;
3898 struct dm_target *tgt;
3899
3900 if (!type || !name || !single_segment(dmd))
3901 return -EINVAL;
3902
3903 tgt = &dmd->segment;
3904 if (tgt->type != DM_CRYPT)
3905 return -EINVAL;
3906
3907 /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */
3908 r = check_devices(cd, name, NULL, &dmd->flags);
3909 if (r)
3910 return r;
3911
3912 if (dmd->flags & CRYPT_ACTIVATE_REFRESH)
3913 r = _reload_device(cd, name, dmd);
3914 else {
3915 device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL;
3916
3917 r = device_block_adjust(cd, tgt->data_device, device_check,
3918 tgt->u.crypt.offset, &dmd->size, &dmd->flags);
3919 if (!r) {
3920 tgt->size = dmd->size;
3921 r = dm_create_device(cd, name, type, dmd);
3922 }
3923 }
3924
3925 return r;
3926 }
3927
3928 int create_or_reload_device_with_integrity(struct crypt_device *cd, const char *name,
3929 const char *type, struct crypt_dm_active_device *dmd,
3930 struct crypt_dm_active_device *dmdi)
3931 {
3932 int r;
3933 const char *iname = NULL;
3934 char *ipath = NULL;
3935
3936 if (!type || !name || !dmd || !dmdi)
3937 return -EINVAL;
3938
3939 if (asprintf(&ipath, "%s/%s_dif", dm_get_dir(), name) < 0)
3940 return -ENOMEM;
3941 iname = ipath + strlen(dm_get_dir()) + 1;
3942
3943 /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */
3944 r = check_devices(cd, name, iname, &dmd->flags);
3945 if (r)
3946 goto out;
3947
3948 if (dmd->flags & CRYPT_ACTIVATE_REFRESH)
3949 r = _reload_device_with_integrity(cd, name, iname, ipath, dmd, dmdi);
3950 else
3951 r = _create_device_with_integrity(cd, type, name, iname, ipath, dmd, dmdi);
3952 out:
3953 free(ipath);
3954
3955 return r;
3956 }
3957
3958 /* See fixmes in _open_and_activate_luks2 */
3959 int update_reencryption_flag(struct crypt_device *cd, int enable, bool commit);
3960
3961 /* TODO: This function should 1:1 with pre-reencryption code */
3962 static int _open_and_activate(struct crypt_device *cd,
3963 int keyslot,
3964 const char *name,
3965 const char *passphrase,
3966 size_t passphrase_size,
3967 uint32_t flags)
3968 {
3969 bool use_keyring;
3970 int r;
3971 struct volume_key *vk = NULL;
3972
3973 r = LUKS2_keyslot_open(cd, keyslot,
3974 (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) ?
3975 CRYPT_ANY_SEGMENT : CRYPT_DEFAULT_SEGMENT,
3976 passphrase, passphrase_size, &vk);
3977 if (r < 0)
3978 return r;
3979 keyslot = r;
3980
3981 if (!crypt_use_keyring_for_vk(cd))
3982 use_keyring = false;
3983 else
3984 use_keyring = ((name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
3985 (flags & CRYPT_ACTIVATE_KEYRING_KEY));
3986
3987 if (use_keyring) {
3988 r = LUKS2_volume_key_load_in_keyring_by_keyslot(cd,
3989 &cd->u.luks2.hdr, vk, keyslot);
3990 if (r < 0)
3991 goto out;
3992 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3993 }
3994
3995 if (name)
3996 r = LUKS2_activate(cd, name, vk, flags);
3997 out:
3998 if (r < 0)
3999 crypt_drop_keyring_key(cd, vk);
4000 crypt_free_volume_key(vk);
4001
4002 return r < 0 ? r : keyslot;
4003 }
4004
4005 #if USE_LUKS2_REENCRYPTION
4006 static int load_all_keys(struct crypt_device *cd, struct luks2_hdr *hdr, struct volume_key *vks)
4007 {
4008 int r;
4009 struct volume_key *vk = vks;
4010
4011 while (vk) {
4012 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk));
4013 if (r < 0)
4014 return r;
4015 vk = crypt_volume_key_next(vk);
4016 }
4017
4018 return 0;
4019 }
4020
4021 static int _open_all_keys(struct crypt_device *cd,
4022 struct luks2_hdr *hdr,
4023 int keyslot,
4024 const char *passphrase,
4025 size_t passphrase_size,
4026 uint32_t flags,
4027 struct volume_key **vks)
4028 {
4029 int r, segment;
4030 struct volume_key *_vks = NULL;
4031 crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
4032
4033 segment = (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) ? CRYPT_ANY_SEGMENT : CRYPT_DEFAULT_SEGMENT;
4034
4035 switch (ri) {
4036 case CRYPT_REENCRYPT_NONE:
4037 r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase, passphrase_size, &_vks);
4038 break;
4039 case CRYPT_REENCRYPT_CLEAN:
4040 case CRYPT_REENCRYPT_CRASH:
4041 if (segment == CRYPT_ANY_SEGMENT)
4042 r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase,
4043 passphrase_size, &_vks);
4044 else
4045 r = LUKS2_keyslot_open_all_segments(cd, keyslot,
4046 keyslot, passphrase, passphrase_size,
4047 &_vks);
4048 break;
4049 default:
4050 r = -EINVAL;
4051 }
4052
4053 if (keyslot == CRYPT_ANY_SLOT)
4054 keyslot = r;
4055
4056 if (r >= 0 && (flags & CRYPT_ACTIVATE_KEYRING_KEY))
4057 r = load_all_keys(cd, hdr, _vks);
4058
4059 if (r >= 0 && vks)
4060 MOVE_REF(*vks, _vks);
4061
4062 if (r < 0)
4063 crypt_drop_keyring_key(cd, _vks);
4064 crypt_free_volume_key(_vks);
4065
4066 return r < 0 ? r : keyslot;
4067 }
4068
4069 static int _open_and_activate_reencrypt_device(struct crypt_device *cd,
4070 struct luks2_hdr *hdr,
4071 int keyslot,
4072 const char *name,
4073 const char *passphrase,
4074 size_t passphrase_size,
4075 uint32_t flags)
4076 {
4077 bool dynamic_size;
4078 crypt_reencrypt_info ri;
4079 uint64_t minimal_size, device_size;
4080 struct volume_key *vks = NULL;
4081 int r = 0;
4082 struct crypt_lock_handle *reencrypt_lock = NULL;
4083
4084 if (crypt_use_keyring_for_vk(cd))
4085 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
4086
4087 r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
4088 if (r) {
4089 if (r == -EBUSY)
4090 log_err(cd, _("Reencryption in-progress. Cannot activate device."));
4091 else
4092 log_err(cd, _("Failed to get reencryption lock."));
4093 return r;
4094 }
4095
4096 if ((r = crypt_load(cd, CRYPT_LUKS2, NULL)))
4097 goto out;
4098
4099 ri = LUKS2_reencrypt_status(hdr);
4100
4101 if (ri == CRYPT_REENCRYPT_CRASH) {
4102 r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot,
4103 keyslot, passphrase, passphrase_size, flags, &vks);
4104 if (r < 0) {
4105 log_err(cd, _("LUKS2 reencryption recovery failed."));
4106 goto out;
4107 }
4108 keyslot = r;
4109
4110 ri = LUKS2_reencrypt_status(hdr);
4111 }
4112
4113 /* recovery finished reencryption or it's already finished */
4114 if (ri == CRYPT_REENCRYPT_NONE) {
4115 crypt_drop_keyring_key(cd, vks);
4116 crypt_free_volume_key(vks);
4117 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
4118 return _open_and_activate(cd, keyslot, name, passphrase, passphrase_size, flags);
4119 }
4120
4121 if (ri > CRYPT_REENCRYPT_CLEAN) {
4122 r = -EINVAL;
4123 goto out;
4124 }
4125
4126 if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic_size))
4127 goto out;
4128
4129 if (!vks) {
4130 r = _open_all_keys(cd, hdr, keyslot, passphrase, passphrase_size, flags, &vks);
4131 if (r >= 0)
4132 keyslot = r;
4133 }
4134
4135 if (r >= 0) {
4136 r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
4137 if (r < 0)
4138 goto out;
4139 }
4140
4141 log_dbg(cd, "Entering clean reencryption state mode.");
4142
4143 if (r >= 0)
4144 r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, dynamic_size);
4145
4146 if (r >= 0)
4147 r = LUKS2_activate_multi(cd, name, vks, device_size >> SECTOR_SHIFT, flags);
4148 out:
4149 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
4150 if (r < 0)
4151 crypt_drop_keyring_key(cd, vks);
4152 crypt_free_volume_key(vks);
4153
4154 return r < 0 ? r : keyslot;
4155 }
4156
4157 /*
4158 * Activation/deactivation of a device
4159 */
4160 static int _open_and_activate_luks2(struct crypt_device *cd,
4161 int keyslot,
4162 const char *name,
4163 const char *passphrase,
4164 size_t passphrase_size,
4165 uint32_t flags)
4166 {
4167 crypt_reencrypt_info ri;
4168 int r, rv;
4169 struct luks2_hdr *hdr = &cd->u.luks2.hdr;
4170 struct volume_key *vks = NULL;
4171
4172 ri = LUKS2_reencrypt_status(hdr);
4173 if (ri == CRYPT_REENCRYPT_INVALID)
4174 return -EINVAL;
4175
4176 if (ri > CRYPT_REENCRYPT_NONE) {
4177 if (name)
4178 r = _open_and_activate_reencrypt_device(cd, hdr, keyslot, name, passphrase,
4179 passphrase_size, flags);
4180 else {
4181 r = _open_all_keys(cd, hdr, keyslot, passphrase,
4182 passphrase_size, flags, &vks);
4183 if (r < 0)
4184 return r;
4185
4186 rv = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
4187 crypt_free_volume_key(vks);
4188 if (rv < 0)
4189 return rv;
4190 }
4191 } else
4192 r = _open_and_activate(cd, keyslot, name, passphrase,
4193 passphrase_size, flags);
4194
4195 return r;
4196 }
4197 #else
4198 static int _open_and_activate_luks2(struct crypt_device *cd,
4199 int keyslot,
4200 const char *name,
4201 const char *passphrase,
4202 size_t passphrase_size,
4203 uint32_t flags)
4204 {
4205 crypt_reencrypt_info ri;
4206
4207 ri = LUKS2_reencrypt_status(&cd->u.luks2.hdr);
4208 if (ri == CRYPT_REENCRYPT_INVALID)
4209 return -EINVAL;
4210
4211 if (ri > CRYPT_REENCRYPT_NONE) {
4212 log_err(cd, _("This operation is not supported for this device type."));
4213 return -ENOTSUP;
4214 }
4215
4216 return _open_and_activate(cd, keyslot, name, passphrase, passphrase_size, flags);
4217 }
4218 #endif
4219
4220 static int _activate_by_passphrase(struct crypt_device *cd,
4221 const char *name,
4222 int keyslot,
4223 const char *passphrase,
4224 size_t passphrase_size,
4225 uint32_t flags)
4226 {
4227 int r;
4228 struct volume_key *vk = NULL;
4229
4230 if ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd))
4231 return -EINVAL;
4232
4233 if ((flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) && name)
4234 return -EINVAL;
4235
4236 r = _check_header_data_overlap(cd, name);
4237 if (r < 0)
4238 return r;
4239
4240 if (flags & CRYPT_ACTIVATE_SERIALIZE_MEMORY_HARD_PBKDF)
4241 cd->memory_hard_pbkdf_lock_enabled = true;
4242
4243 /* plain, use hashed passphrase */
4244 if (isPLAIN(cd->type)) {
4245 r = -EINVAL;
4246 if (!name)
4247 goto out;
4248
4249 r = process_key(cd, cd->u.plain.hdr.hash,
4250 cd->u.plain.key_size,
4251 passphrase, passphrase_size, &vk);
4252 if (r < 0)
4253 goto out;
4254
4255 r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags);
4256 keyslot = 0;
4257 } else if (isLUKS1(cd->type)) {
4258 r = LUKS_open_key_with_hdr(keyslot, passphrase,
4259 passphrase_size, &cd->u.luks1.hdr, &vk, cd);
4260 if (r >= 0) {
4261 keyslot = r;
4262 if (name)
4263 r = LUKS1_activate(cd, name, vk, flags);
4264 }
4265 } else if (isLUKS2(cd->type)) {
4266 r = _open_and_activate_luks2(cd, keyslot, name, passphrase, passphrase_size, flags);
4267 keyslot = r;
4268 } else if (isBITLK(cd->type)) {
4269 r = BITLK_activate_by_passphrase(cd, name, passphrase, passphrase_size,
4270 &cd->u.bitlk.params, flags);
4271 keyslot = 0;
4272 } else {
4273 log_err(cd, _("Device type is not properly initialized."));
4274 r = -EINVAL;
4275 }
4276 out:
4277 if (r < 0)
4278 crypt_drop_keyring_key(cd, vk);
4279 crypt_free_volume_key(vk);
4280
4281 cd->memory_hard_pbkdf_lock_enabled = false;
4282
4283 return r < 0 ? r : keyslot;
4284 }
4285
4286 static int _activate_loopaes(struct crypt_device *cd,
4287 const char *name,
4288 char *buffer,
4289 size_t buffer_size,
4290 uint32_t flags)
4291 {
4292 int r;
4293 unsigned int key_count = 0;
4294 struct volume_key *vk = NULL;
4295
4296 r = LOOPAES_parse_keyfile(cd, &vk, cd->u.loopaes.hdr.hash, &key_count,
4297 buffer, buffer_size);
4298
4299 if (!r && name)
4300 r = LOOPAES_activate(cd, name, cd->u.loopaes.cipher, key_count,
4301 vk, flags);
4302
4303 crypt_free_volume_key(vk);
4304
4305 return r;
4306 }
4307
4308 static int _activate_check_status(struct crypt_device *cd, const char *name, unsigned reload)
4309 {
4310 int r;
4311
4312 if (!name)
4313 return 0;
4314
4315 r = dm_status_device(cd, name);
4316
4317 if (r >= 0 && reload)
4318 return 0;
4319
4320 if (r >= 0 || r == -EEXIST) {
4321 log_err(cd, _("Device %s already exists."), name);
4322 return -EEXIST;
4323 }
4324
4325 if (r == -ENODEV)
4326 return 0;
4327
4328 log_err(cd, _("Cannot use device %s, name is invalid or still in use."), name);
4329 return r;
4330 }
4331
4332 // activation/deactivation of device mapping
4333 int crypt_activate_by_passphrase(struct crypt_device *cd,
4334 const char *name,
4335 int keyslot,
4336 const char *passphrase,
4337 size_t passphrase_size,
4338 uint32_t flags)
4339 {
4340 int r;
4341
4342 if (!cd || !passphrase || (!name && (flags & CRYPT_ACTIVATE_REFRESH)))
4343 return -EINVAL;
4344
4345 log_dbg(cd, "%s volume %s [keyslot %d] using passphrase.",
4346 name ? "Activating" : "Checking", name ?: "passphrase",
4347 keyslot);
4348
4349 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4350 if (r < 0)
4351 return r;
4352
4353 return _activate_by_passphrase(cd, name, keyslot, passphrase, passphrase_size, flags);
4354 }
4355
4356 int crypt_activate_by_keyfile_device_offset(struct crypt_device *cd,
4357 const char *name,
4358 int keyslot,
4359 const char *keyfile,
4360 size_t keyfile_size,
4361 uint64_t keyfile_offset,
4362 uint32_t flags)
4363 {
4364 char *passphrase_read = NULL;
4365 size_t passphrase_size_read;
4366 int r;
4367
4368 if (!cd || !keyfile ||
4369 ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)))
4370 return -EINVAL;
4371
4372 log_dbg(cd, "%s volume %s [keyslot %d] using keyfile %s.",
4373 name ? "Activating" : "Checking", name ?: "passphrase", keyslot, keyfile);
4374
4375 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4376 if (r < 0)
4377 return r;
4378
4379 r = crypt_keyfile_device_read(cd, keyfile,
4380 &passphrase_read, &passphrase_size_read,
4381 keyfile_offset, keyfile_size, 0);
4382 if (r < 0)
4383 goto out;
4384
4385 if (isLOOPAES(cd->type))
4386 r = _activate_loopaes(cd, name, passphrase_read, passphrase_size_read, flags);
4387 else
4388 r = _activate_by_passphrase(cd, name, keyslot, passphrase_read, passphrase_size_read, flags);
4389
4390 out:
4391 crypt_safe_free(passphrase_read);
4392 return r;
4393 }
4394
4395 int crypt_activate_by_keyfile(struct crypt_device *cd,
4396 const char *name,
4397 int keyslot,
4398 const char *keyfile,
4399 size_t keyfile_size,
4400 uint32_t flags)
4401 {
4402 return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile,
4403 keyfile_size, 0, flags);
4404 }
4405
4406 int crypt_activate_by_keyfile_offset(struct crypt_device *cd,
4407 const char *name,
4408 int keyslot,
4409 const char *keyfile,
4410 size_t keyfile_size,
4411 size_t keyfile_offset,
4412 uint32_t flags)
4413 {
4414 return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile,
4415 keyfile_size, keyfile_offset, flags);
4416 }
4417 int crypt_activate_by_volume_key(struct crypt_device *cd,
4418 const char *name,
4419 const char *volume_key,
4420 size_t volume_key_size,
4421 uint32_t flags)
4422 {
4423 bool use_keyring;
4424 struct volume_key *vk = NULL;
4425 int r;
4426
4427 if (!cd ||
4428 ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)))
4429 return -EINVAL;
4430
4431 log_dbg(cd, "%s volume %s by volume key.", name ? "Activating" : "Checking",
4432 name ?: "");
4433
4434 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4435 if (r < 0)
4436 return r;
4437
4438 r = _check_header_data_overlap(cd, name);
4439 if (r < 0)
4440 return r;
4441
4442 /* use key directly, no hash */
4443 if (isPLAIN(cd->type)) {
4444 if (!name)
4445 return -EINVAL;
4446
4447 if (!volume_key || !volume_key_size || volume_key_size != cd->u.plain.key_size) {
4448 log_err(cd, _("Incorrect volume key specified for plain device."));
4449 return -EINVAL;
4450 }
4451
4452 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4453 if (!vk)
4454 return -ENOMEM;
4455
4456 r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags);
4457 } else if (isLUKS1(cd->type)) {
4458 /* If key is not provided, try to use internal key */
4459 if (!volume_key) {
4460 if (!cd->volume_key) {
4461 log_err(cd, _("Volume key does not match the volume."));
4462 return -EINVAL;
4463 }
4464 volume_key_size = cd->volume_key->keylength;
4465 volume_key = cd->volume_key->key;
4466 }
4467
4468 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4469 if (!vk)
4470 return -ENOMEM;
4471 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
4472
4473 if (r == -EPERM)
4474 log_err(cd, _("Volume key does not match the volume."));
4475
4476 if (!r && name)
4477 r = LUKS1_activate(cd, name, vk, flags);
4478 } else if (isLUKS2(cd->type)) {
4479 /* If key is not provided, try to use internal key */
4480 if (!volume_key) {
4481 if (!cd->volume_key) {
4482 log_err(cd, _("Volume key does not match the volume."));
4483 return -EINVAL;
4484 }
4485 volume_key_size = cd->volume_key->keylength;
4486 volume_key = cd->volume_key->key;
4487 }
4488
4489 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4490 if (!vk)
4491 return -ENOMEM;
4492
4493 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
4494 if (r == -EPERM || r == -ENOENT)
4495 log_err(cd, _("Volume key does not match the volume."));
4496 if (r > 0)
4497 r = 0;
4498
4499 if (!crypt_use_keyring_for_vk(cd))
4500 use_keyring = false;
4501 else
4502 use_keyring = (name && !crypt_is_cipher_null(crypt_get_cipher(cd))) || (flags & CRYPT_ACTIVATE_KEYRING_KEY);
4503
4504 if (!r && use_keyring) {
4505 r = LUKS2_key_description_by_segment(cd,
4506 &cd->u.luks2.hdr, vk, CRYPT_DEFAULT_SEGMENT);
4507 if (!r)
4508 r = crypt_volume_key_load_in_keyring(cd, vk);
4509 if (!r)
4510 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
4511 }
4512
4513 if (!r && name)
4514 r = LUKS2_activate(cd, name, vk, flags);
4515 } else if (isVERITY(cd->type)) {
4516 r = crypt_activate_by_signed_key(cd, name, volume_key, volume_key_size, NULL, 0, flags);
4517 } else if (isTCRYPT(cd->type)) {
4518 if (!name)
4519 return 0;
4520 r = TCRYPT_activate(cd, name, &cd->u.tcrypt.hdr,
4521 &cd->u.tcrypt.params, flags);
4522 } else if (isINTEGRITY(cd->type)) {
4523 if (!name)
4524 return 0;
4525 if (volume_key) {
4526 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4527 if (!vk)
4528 return -ENOMEM;
4529 }
4530 r = INTEGRITY_activate(cd, name, &cd->u.integrity.params, vk,
4531 cd->u.integrity.journal_crypt_key,
4532 cd->u.integrity.journal_mac_key, flags,
4533 cd->u.integrity.sb_flags);
4534 } else if (isBITLK(cd->type)) {
4535 r = BITLK_activate_by_volume_key(cd, name, volume_key, volume_key_size,
4536 &cd->u.bitlk.params, flags);
4537 } else {
4538 log_err(cd, _("Device type is not properly initialized."));
4539 r = -EINVAL;
4540 }
4541
4542 if (r < 0)
4543 crypt_drop_keyring_key(cd, vk);
4544 crypt_free_volume_key(vk);
4545
4546 return r;
4547 }
4548
4549 int crypt_activate_by_signed_key(struct crypt_device *cd,
4550 const char *name,
4551 const char *volume_key,
4552 size_t volume_key_size,
4553 const char *signature,
4554 size_t signature_size,
4555 uint32_t flags)
4556 {
4557 char description[512];
4558 int r;
4559
4560 if (!cd || !isVERITY(cd->type))
4561 return -EINVAL;
4562
4563 if (!volume_key || !volume_key_size || (!name && signature)) {
4564 log_err(cd, _("Incorrect root hash specified for verity device."));
4565 return -EINVAL;
4566 }
4567
4568 log_dbg(cd, "%s volume %s by %skey.", name ? "Activating" : "Checking", name ?: "", signature ? "signed " : "");
4569
4570 if (cd->u.verity.hdr.flags & CRYPT_VERITY_ROOT_HASH_SIGNATURE && !signature) {
4571 log_err(cd, _("Root hash signature required."));
4572 return -EINVAL;
4573 }
4574
4575 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4576 if (r < 0)
4577 return r;
4578
4579 if (signature && !kernel_keyring_support()) {
4580 log_err(cd, _("Kernel keyring missing: required for passing signature to kernel."));
4581 return -EINVAL;
4582 }
4583
4584 /* volume_key == root hash */
4585 free(CONST_CAST(void*)cd->u.verity.root_hash);
4586 cd->u.verity.root_hash = NULL;
4587
4588 if (signature) {
4589 r = snprintf(description, sizeof(description)-1, "cryptsetup:%s%s%s",
4590 crypt_get_uuid(cd) ?: "", crypt_get_uuid(cd) ? "-" : "", name);
4591 if (r < 0)
4592 return -EINVAL;
4593
4594 log_dbg(cd, "Adding signature into keyring %s", description);
4595 r = keyring_add_key_in_thread_keyring(USER_KEY, description, signature, signature_size);
4596 if (r) {
4597 log_err(cd, _("Failed to load key in kernel keyring."));
4598 return r;
4599 }
4600 }
4601
4602 r = VERITY_activate(cd, name, volume_key, volume_key_size,
4603 signature ? description : NULL,
4604 cd->u.verity.fec_device,
4605 &cd->u.verity.hdr, flags | CRYPT_ACTIVATE_READONLY);
4606
4607 if (!r) {
4608 cd->u.verity.root_hash_size = volume_key_size;
4609 cd->u.verity.root_hash = malloc(volume_key_size);
4610 if (cd->u.verity.root_hash)
4611 memcpy(CONST_CAST(void*)cd->u.verity.root_hash, volume_key, volume_key_size);
4612 }
4613
4614 if (signature)
4615 crypt_drop_keyring_key_by_description(cd, description, USER_KEY);
4616
4617 return r;
4618 }
4619
4620 int crypt_deactivate_by_name(struct crypt_device *cd, const char *name, uint32_t flags)
4621 {
4622 struct crypt_device *fake_cd = NULL;
4623 struct luks2_hdr *hdr2 = NULL;
4624 struct crypt_dm_active_device dmd = {};
4625 int r;
4626 uint32_t get_flags = DM_ACTIVE_DEVICE | DM_ACTIVE_UUID | DM_ACTIVE_HOLDERS;
4627
4628 if (!name)
4629 return -EINVAL;
4630
4631 if ((flags & CRYPT_DEACTIVATE_DEFERRED) && (flags & CRYPT_DEACTIVATE_DEFERRED_CANCEL))
4632 return -EINVAL;
4633
4634 log_dbg(cd, "Deactivating volume %s.", name);
4635
4636 if (!cd) {
4637 r = crypt_init_by_name(&fake_cd, name);
4638 if (r < 0)
4639 return r;
4640 cd = fake_cd;
4641 }
4642
4643 /* skip holders detection and early abort when some flags raised */
4644 if (flags & (CRYPT_DEACTIVATE_FORCE | CRYPT_DEACTIVATE_DEFERRED | CRYPT_DEACTIVATE_DEFERRED_CANCEL))
4645 get_flags &= ~DM_ACTIVE_HOLDERS;
4646
4647 switch (crypt_status(cd, name)) {
4648 case CRYPT_ACTIVE:
4649 case CRYPT_BUSY:
4650 if (flags & CRYPT_DEACTIVATE_DEFERRED_CANCEL) {
4651 r = dm_cancel_deferred_removal(name);
4652 if (r < 0)
4653 log_err(cd, _("Could not cancel deferred remove from device %s."), name);
4654 break;
4655 }
4656
4657 r = dm_query_device(cd, name, get_flags, &dmd);
4658 if (r >= 0) {
4659 if (dmd.holders) {
4660 log_err(cd, _("Device %s is still in use."), name);
4661 r = -EBUSY;
4662 break;
4663 }
4664 }
4665
4666 if (isLUKS2(cd->type))
4667 hdr2 = crypt_get_hdr(cd, CRYPT_LUKS2);
4668
4669 if ((dmd.uuid && !strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1)) || hdr2)
4670 r = LUKS2_deactivate(cd, name, hdr2, &dmd, flags);
4671 else if (isTCRYPT(cd->type))
4672 r = TCRYPT_deactivate(cd, name, flags);
4673 else
4674 r = dm_remove_device(cd, name, flags);
4675 if (r < 0 && crypt_status(cd, name) == CRYPT_BUSY) {
4676 log_err(cd, _("Device %s is still in use."), name);
4677 r = -EBUSY;
4678 }
4679 break;
4680 case CRYPT_INACTIVE:
4681 log_err(cd, _("Device %s is not active."), name);
4682 r = -ENODEV;
4683 break;
4684 default:
4685 log_err(cd, _("Invalid device %s."), name);
4686 r = -EINVAL;
4687 }
4688
4689 dm_targets_free(cd, &dmd);
4690 free(CONST_CAST(void*)dmd.uuid);
4691 crypt_free(fake_cd);
4692
4693 return r;
4694 }
4695
4696 int crypt_deactivate(struct crypt_device *cd, const char *name)
4697 {
4698 return crypt_deactivate_by_name(cd, name, 0);
4699 }
4700
4701 int crypt_get_active_device(struct crypt_device *cd, const char *name,
4702 struct crypt_active_device *cad)
4703 {
4704 int r;
4705 struct crypt_dm_active_device dmd, dmdi = {};
4706 const char *namei = NULL;
4707 struct dm_target *tgt = &dmd.segment;
4708 uint64_t min_offset = UINT64_MAX;
4709
4710 if (!cd || !name || !cad)
4711 return -EINVAL;
4712
4713 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE, &dmd);
4714 if (r < 0)
4715 return r;
4716
4717 /* For LUKS2 with integrity we need flags from underlying dm-integrity */
4718 if (isLUKS2(cd->type) && crypt_get_integrity_tag_size(cd) && single_segment(&dmd)) {
4719 namei = device_dm_name(tgt->data_device);
4720 if (namei && dm_query_device(cd, namei, 0, &dmdi) >= 0)
4721 dmd.flags |= dmdi.flags;
4722 }
4723
4724 if (cd && isTCRYPT(cd->type)) {
4725 cad->offset = TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
4726 cad->iv_offset = TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
4727 } else {
4728 while (tgt) {
4729 if (tgt->type == DM_CRYPT && (min_offset > tgt->u.crypt.offset)) {
4730 min_offset = tgt->u.crypt.offset;
4731 cad->iv_offset = tgt->u.crypt.iv_offset;
4732 } else if (tgt->type == DM_INTEGRITY && (min_offset > tgt->u.integrity.offset)) {
4733 min_offset = tgt->u.integrity.offset;
4734 cad->iv_offset = 0;
4735 } else if (tgt->type == DM_LINEAR && (min_offset > tgt->u.linear.offset)) {
4736 min_offset = tgt->u.linear.offset;
4737 cad->iv_offset = 0;
4738 }
4739 tgt = tgt->next;
4740 }
4741 }
4742
4743 if (min_offset != UINT64_MAX)
4744 cad->offset = min_offset;
4745
4746 cad->size = dmd.size;
4747 cad->flags = dmd.flags;
4748
4749 r = 0;
4750 dm_targets_free(cd, &dmd);
4751 dm_targets_free(cd, &dmdi);
4752
4753 return r;
4754 }
4755
4756 uint64_t crypt_get_active_integrity_failures(struct crypt_device *cd, const char *name)
4757 {
4758 struct crypt_dm_active_device dmd;
4759 uint64_t failures = 0;
4760
4761 if (!name)
4762 return 0;
4763
4764 /* LUKS2 / dm-crypt does not provide this count. */
4765 if (dm_query_device(cd, name, 0, &dmd) < 0)
4766 return 0;
4767
4768 if (single_segment(&dmd) && dmd.segment.type == DM_INTEGRITY)
4769 (void)dm_status_integrity_failures(cd, name, &failures);
4770
4771 dm_targets_free(cd, &dmd);
4772
4773 return failures;
4774 }
4775
4776 /*
4777 * Volume key handling
4778 */
4779 int crypt_volume_key_get(struct crypt_device *cd,
4780 int keyslot,
4781 char *volume_key,
4782 size_t *volume_key_size,
4783 const char *passphrase,
4784 size_t passphrase_size)
4785 {
4786 struct volume_key *vk = NULL;
4787 int key_len, r = -EINVAL;
4788
4789 if (!cd || !volume_key || !volume_key_size || (!isTCRYPT(cd->type) && !isVERITY(cd->type) && !passphrase))
4790 return -EINVAL;
4791
4792 if (isLUKS2(cd->type) && keyslot != CRYPT_ANY_SLOT)
4793 key_len = LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot);
4794 else
4795 key_len = crypt_get_volume_key_size(cd);
4796
4797 if (key_len < 0)
4798 return -EINVAL;
4799
4800 if (key_len > (int)*volume_key_size) {
4801 log_err(cd, _("Volume key buffer too small."));
4802 return -ENOMEM;
4803 }
4804
4805 if (isPLAIN(cd->type) && cd->u.plain.hdr.hash) {
4806 r = process_key(cd, cd->u.plain.hdr.hash, key_len,
4807 passphrase, passphrase_size, &vk);
4808 if (r < 0)
4809 log_err(cd, _("Cannot retrieve volume key for plain device."));
4810 } else if (isLUKS1(cd->type)) {
4811 r = LUKS_open_key_with_hdr(keyslot, passphrase,
4812 passphrase_size, &cd->u.luks1.hdr, &vk, cd);
4813 } else if (isLUKS2(cd->type)) {
4814 r = LUKS2_keyslot_open(cd, keyslot,
4815 keyslot == CRYPT_ANY_SLOT ? CRYPT_DEFAULT_SEGMENT : CRYPT_ANY_SEGMENT,
4816 passphrase, passphrase_size, &vk);
4817 } else if (isTCRYPT(cd->type)) {
4818 r = TCRYPT_get_volume_key(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params, &vk);
4819 } else if (isVERITY(cd->type)) {
4820 /* volume_key == root hash */
4821 if (cd->u.verity.root_hash) {
4822 memcpy(volume_key, cd->u.verity.root_hash, cd->u.verity.root_hash_size);
4823 *volume_key_size = cd->u.verity.root_hash_size;
4824 r = 0;
4825 } else
4826 log_err(cd, _("Cannot retrieve root hash for verity device."));
4827 } else if (isBITLK(cd->type)) {
4828 r = BITLK_get_volume_key(cd, passphrase, passphrase_size, &cd->u.bitlk.params, &vk);
4829 } else
4830 log_err(cd, _("This operation is not supported for %s crypt device."), cd->type ?: "(none)");
4831
4832 if (r >= 0 && vk) {
4833 memcpy(volume_key, vk->key, vk->keylength);
4834 *volume_key_size = vk->keylength;
4835 }
4836
4837 crypt_free_volume_key(vk);
4838 return r;
4839 }
4840
4841 int crypt_volume_key_verify(struct crypt_device *cd,
4842 const char *volume_key,
4843 size_t volume_key_size)
4844 {
4845 struct volume_key *vk;
4846 int r;
4847
4848 if ((r = _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED)))
4849 return r;
4850
4851 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4852 if (!vk)
4853 return -ENOMEM;
4854
4855 if (isLUKS1(cd->type))
4856 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
4857 else if (isLUKS2(cd->type))
4858 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
4859 else
4860 r = -EINVAL;
4861
4862
4863 if (r == -EPERM)
4864 log_err(cd, _("Volume key does not match the volume."));
4865
4866 crypt_free_volume_key(vk);
4867
4868 return r >= 0 ? 0 : r;
4869 }
4870
4871 /*
4872 * RNG and memory locking
4873 */
4874 void crypt_set_rng_type(struct crypt_device *cd, int rng_type)
4875 {
4876 if (!cd)
4877 return;
4878
4879 switch (rng_type) {
4880 case CRYPT_RNG_URANDOM:
4881 case CRYPT_RNG_RANDOM:
4882 log_dbg(cd, "RNG set to %d (%s).", rng_type, rng_type ? "random" : "urandom");
4883 cd->rng_type = rng_type;
4884 }
4885 }
4886
4887 int crypt_get_rng_type(struct crypt_device *cd)
4888 {
4889 if (!cd)
4890 return -EINVAL;
4891
4892 return cd->rng_type;
4893 }
4894
4895 int crypt_memory_lock(struct crypt_device *cd, int lock)
4896 {
4897 return lock ? crypt_memlock_inc(cd) : crypt_memlock_dec(cd);
4898 }
4899
4900 void crypt_set_compatibility(struct crypt_device *cd, uint32_t flags)
4901 {
4902 if (cd)
4903 cd->compatibility = flags;
4904 }
4905
4906 uint32_t crypt_get_compatibility(struct crypt_device *cd)
4907 {
4908 if (cd)
4909 return cd->compatibility;
4910
4911 return 0;
4912 }
4913
4914 /*
4915 * Reporting
4916 */
4917 crypt_status_info crypt_status(struct crypt_device *cd, const char *name)
4918 {
4919 int r;
4920
4921 if (!name)
4922 return CRYPT_INVALID;
4923
4924 if (!cd)
4925 dm_backend_init(cd);
4926
4927 r = dm_status_device(cd, name);
4928
4929 if (!cd)
4930 dm_backend_exit(cd);
4931
4932 if (r < 0 && r != -ENODEV)
4933 return CRYPT_INVALID;
4934
4935 if (r == 0)
4936 return CRYPT_ACTIVE;
4937
4938 if (r > 0)
4939 return CRYPT_BUSY;
4940
4941 return CRYPT_INACTIVE;
4942 }
4943
4944