"Fossies" - the Fresh Open Source Software Archive

Member "cryptsetup-2.4.3/lib/luks2/luks2_reencrypt.c" (13 Jan 2022, 109379 Bytes) of package /linux/misc/cryptsetup-2.4.3.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "luks2_reencrypt.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 2.4.2_vs_2.4.3.

    1 /*
    2  * LUKS - Linux Unified Key Setup v2, reencryption helpers
    3  *
    4  * Copyright (C) 2015-2021, Red Hat, Inc. All rights reserved.
    5  * Copyright (C) 2015-2021, Ondrej Kozina
    6  *
    7  * This program is free software; you can redistribute it and/or
    8  * modify it under the terms of the GNU General Public License
    9  * as published by the Free Software Foundation; either version 2
   10  * of the License, or (at your option) any later version.
   11  *
   12  * This program is distributed in the hope that it will be useful,
   13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   15  * GNU General Public License for more details.
   16  *
   17  * You should have received a copy of the GNU General Public License
   18  * along with this program; if not, write to the Free Software
   19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
   20  */
   21 
   22 #include "luks2_internal.h"
   23 #include "utils_device_locking.h"
   24 
   25 struct reenc_protection {
   26     enum { REENC_PROTECTION_NONE = 0, /* none should be 0 always */
   27            REENC_PROTECTION_CHECKSUM,
   28            REENC_PROTECTION_JOURNAL,
   29                REENC_PROTECTION_DATASHIFT } type;
   30 
   31     union {
   32     struct {
   33     } none;
   34     struct {
   35         char hash[LUKS2_CHECKSUM_ALG_L]; // or include luks.h
   36         struct crypt_hash *ch;
   37         size_t hash_size;
   38         /* buffer for checksums */
   39         void *checksums;
   40         size_t checksums_len;
   41     } csum;
   42     struct {
   43     } ds;
   44     } p;
   45 };
   46 
   47 struct luks2_reencrypt {
   48     /* reencryption window attributes */
   49     uint64_t offset;
   50     uint64_t progress;
   51     uint64_t length;
   52     uint64_t data_shift;
   53     size_t alignment;
   54     uint64_t device_size;
   55     bool online;
   56     bool fixed_length;
   57     crypt_reencrypt_direction_info direction;
   58     crypt_reencrypt_mode_info mode;
   59 
   60     char *device_name;
   61     char *hotzone_name;
   62     char *overlay_name;
   63     uint32_t flags;
   64 
   65     /* reencryption window persistence attributes */
   66     struct reenc_protection rp;
   67 
   68     int reenc_keyslot;
   69 
   70     /* already running reencryption */
   71     json_object *jobj_segs_hot;
   72     struct json_object *jobj_segs_post;
   73 
   74     /* backup segments */
   75     json_object *jobj_segment_new;
   76     int digest_new;
   77     json_object *jobj_segment_old;
   78     int digest_old;
   79     json_object *jobj_segment_moved;
   80 
   81     struct volume_key *vks;
   82 
   83     void *reenc_buffer;
   84     ssize_t read;
   85 
   86     struct crypt_storage_wrapper *cw1;
   87     struct crypt_storage_wrapper *cw2;
   88 
   89     uint32_t wflags1;
   90     uint32_t wflags2;
   91 
   92     struct crypt_lock_handle *reenc_lock;
   93 };
   94 #if USE_LUKS2_REENCRYPTION
   95 static int reencrypt_keyslot_update(struct crypt_device *cd,
   96     const struct luks2_reencrypt *rh)
   97 {
   98     int r;
   99     json_object *jobj_keyslot, *jobj_area, *jobj_area_type;
  100     struct luks2_hdr *hdr;
  101 
  102     if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
  103         return -EINVAL;
  104 
  105     jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, rh->reenc_keyslot);
  106     if (!jobj_keyslot)
  107         return -EINVAL;
  108 
  109     json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
  110     json_object_object_get_ex(jobj_area, "type", &jobj_area_type);
  111 
  112     if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
  113         log_dbg(cd, "Updating reencrypt keyslot for checksum protection.");
  114         json_object_object_add(jobj_area, "type", json_object_new_string("checksum"));
  115         json_object_object_add(jobj_area, "hash", json_object_new_string(rh->rp.p.csum.hash));
  116         json_object_object_add(jobj_area, "sector_size", json_object_new_int64(rh->alignment));
  117     } else if (rh->rp.type == REENC_PROTECTION_NONE) {
  118         log_dbg(cd, "Updating reencrypt keyslot for none protection.");
  119         json_object_object_add(jobj_area, "type", json_object_new_string("none"));
  120         json_object_object_del(jobj_area, "hash");
  121     } else if (rh->rp.type == REENC_PROTECTION_JOURNAL) {
  122         log_dbg(cd, "Updating reencrypt keyslot for journal protection.");
  123         json_object_object_add(jobj_area, "type", json_object_new_string("journal"));
  124         json_object_object_del(jobj_area, "hash");
  125     } else
  126         log_dbg(cd, "No update of reencrypt keyslot needed.");
  127 
  128     r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, rh->vks);
  129     if (r < 0)
  130         log_err(cd, "Failed to refresh reencryption verification digest.");
  131 
  132     return r;
  133 }
  134 
  135 static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
  136 {
  137     return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
  138 }
  139 
  140 static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
  141 {
  142     return reencrypt_segment(hdr, 1);
  143 }
  144 
  145 static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
  146 {
  147     return reencrypt_segment(hdr, 0);
  148 }
  149 
  150 static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
  151 {
  152     return json_segment_get_cipher(reencrypt_segment(hdr, 1));
  153 }
  154 
  155 static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
  156 {
  157     return json_segment_get_cipher(reencrypt_segment(hdr, 0));
  158 }
  159 
  160 static int reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
  161 {
  162     return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
  163 }
  164 
  165 static int reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
  166 {
  167     return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
  168 }
  169 
  170 static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
  171 {
  172     json_object *jobj = reencrypt_segment(hdr, new);
  173     if (jobj)
  174         return json_segment_get_offset(jobj, 0);
  175 
  176     return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
  177 }
  178 
  179 static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
  180 {
  181     json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
  182 
  183     if (!jobj_segment)
  184         return 0;
  185 
  186     return json_segment_get_offset(jobj_segment, 0);
  187 }
  188 
  189 static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
  190 {
  191     return reencrypt_data_offset(hdr, 1);
  192 }
  193 
  194 static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
  195 {
  196     return reencrypt_data_offset(hdr, 0);
  197 }
  198 #endif
  199 static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
  200 {
  201     int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
  202 
  203     if (segment < 0)
  204         return segment;
  205 
  206     return LUKS2_digest_by_segment(hdr, segment);
  207 }
  208 
  209 int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
  210 {
  211     return reencrypt_digest(hdr, 1);
  212 }
  213 
  214 int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
  215 {
  216     return reencrypt_digest(hdr, 0);
  217 }
  218 
  219 /* none, checksums, journal or shift */
  220 static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
  221 {
  222     json_object *jobj_keyslot, *jobj_area, *jobj_type;
  223     int ks = LUKS2_find_keyslot(hdr, "reencrypt");
  224 
  225     if (ks < 0)
  226         return NULL;
  227 
  228     jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
  229 
  230     json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
  231     if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
  232         return NULL;
  233 
  234     return json_object_get_string(jobj_type);
  235 }
  236 
  237 static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
  238 {
  239     json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
  240     int ks = LUKS2_find_keyslot(hdr, "reencrypt");
  241 
  242     if (ks < 0)
  243         return NULL;
  244 
  245     jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
  246 
  247     json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
  248     if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
  249         return NULL;
  250     if (strcmp(json_object_get_string(jobj_type), "checksum"))
  251         return NULL;
  252     if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
  253         return NULL;
  254 
  255     return json_object_get_string(jobj_hash);
  256 }
  257 #if USE_LUKS2_REENCRYPTION
  258 static uint32_t reencrypt_alignment(struct luks2_hdr *hdr)
  259 {
  260     json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash, *jobj_sector_size;
  261     int ks = LUKS2_find_keyslot(hdr, "reencrypt");
  262 
  263     if (ks < 0)
  264         return 0;
  265 
  266     jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
  267 
  268     json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
  269     if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
  270         return 0;
  271     if (strcmp(json_object_get_string(jobj_type), "checksum"))
  272         return 0;
  273     if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
  274         return 0;
  275     if (!json_object_object_get_ex(jobj_area, "sector_size", &jobj_sector_size))
  276         return 0;
  277 
  278     return crypt_jobj_get_uint32(jobj_sector_size);
  279 }
  280 
  281 static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset)
  282 {
  283     int reenc_seg, i = 0;
  284     json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
  285     uint64_t tmp;
  286 
  287     if (!rh->jobj_segs_hot || !jobj_segs_post)
  288         goto err;
  289 
  290     if (json_segments_count(rh->jobj_segs_hot) == 0)
  291         return jobj_segs_post;
  292 
  293     reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
  294     if (reenc_seg < 0)
  295         goto err;
  296 
  297     while (i < reenc_seg) {
  298         jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
  299         if (!jobj_copy)
  300             goto err;
  301         json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
  302     }
  303 
  304     if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
  305         if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
  306             goto err;
  307         json_segment_remove_flag(jobj_seg_new, "in-reencryption");
  308         tmp = rh->length;
  309     } else {
  310         json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
  311         json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
  312         tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
  313     }
  314 
  315     /* alter size of new segment, reenc_seg == 0 we're finished */
  316     json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
  317     json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
  318 
  319     return jobj_segs_post;
  320 err:
  321     json_object_put(jobj_segs_post);
  322     return NULL;
  323 }
  324 
  325 static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *hdr,
  326     struct luks2_reencrypt *rh,
  327     uint64_t data_offset)
  328 {
  329     int sg, crypt_seg, i = 0;
  330     uint64_t segment_size;
  331     json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
  332              *jobj_segs_hot = json_object_new_object();
  333 
  334     if (!jobj_segs_hot)
  335         return NULL;
  336 
  337     crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
  338 
  339     /* FIXME: This is hack. Find proper way to fix it. */
  340     sg = LUKS2_last_segment_by_type(hdr, "linear");
  341     if (rh->offset && sg < 0)
  342         goto err;
  343     if (sg < 0)
  344         return jobj_segs_hot;
  345 
  346     jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
  347                               rh->offset >> SECTOR_SHIFT,
  348                               &rh->length,
  349                               reencrypt_segment_cipher_new(hdr),
  350                               reencrypt_get_sector_size_new(hdr),
  351                               1);
  352 
  353     while (i < sg) {
  354         jobj_copy = LUKS2_get_segment_jobj(hdr, i);
  355         if (!jobj_copy)
  356             goto err;
  357         json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
  358     }
  359 
  360     segment_size = LUKS2_segment_size(hdr, sg, 0);
  361     if (segment_size > rh->length) {
  362         jobj_seg_shrunk = NULL;
  363         if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
  364             goto err;
  365         json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
  366         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
  367     }
  368 
  369     json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
  370     jobj_enc_seg = NULL; /* see err: label */
  371 
  372     /* first crypt segment after encryption ? */
  373     if (crypt_seg >= 0) {
  374         jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
  375         if (!jobj_seg_new)
  376             goto err;
  377         json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
  378     }
  379 
  380     return jobj_segs_hot;
  381 err:
  382     json_object_put(jobj_enc_seg);
  383     json_object_put(jobj_segs_hot);
  384 
  385     return NULL;
  386 }
  387 
  388 static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
  389         struct luks2_hdr *hdr,
  390         const struct luks2_reencrypt *rh,
  391         uint64_t data_offset,
  392         uint64_t segment_offset,
  393         uint64_t iv_offset,
  394         const uint64_t *segment_length)
  395 {
  396     switch (rh->mode) {
  397     case CRYPT_REENCRYPT_REENCRYPT:
  398     case CRYPT_REENCRYPT_ENCRYPT:
  399         return json_segment_create_crypt(data_offset + segment_offset,
  400                           crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
  401                           segment_length,
  402                           reencrypt_segment_cipher_new(hdr),
  403                           reencrypt_get_sector_size_new(hdr), 0);
  404     case CRYPT_REENCRYPT_DECRYPT:
  405         return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
  406     }
  407 
  408     return NULL;
  409 }
  410 
  411 static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
  412     struct luks2_hdr *hdr,
  413     struct luks2_reencrypt *rh,
  414     uint64_t data_offset)
  415 {
  416     int reenc_seg;
  417     json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
  418             *jobj_segs_post = json_object_new_object();
  419     uint64_t fixed_length = rh->offset + rh->length;
  420 
  421     if (!rh->jobj_segs_hot || !jobj_segs_post)
  422         goto err;
  423 
  424     reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
  425     if (reenc_seg < 0)
  426         return NULL;
  427 
  428     jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
  429 
  430     /*
  431      * if there's no old segment after reencryption, we're done.
  432      * Set size to 'dynamic' again.
  433      */
  434     jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
  435     if (!jobj_new_seg_after)
  436         goto err;
  437     json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
  438 
  439     if (jobj_old_seg) {
  440         if (rh->fixed_length) {
  441             if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
  442                 goto err;
  443             jobj_old_seg = jobj_old_seg_copy;
  444             fixed_length = rh->device_size - fixed_length;
  445             json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
  446         } else
  447             json_object_get(jobj_old_seg);
  448         json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
  449     }
  450 
  451     return jobj_segs_post;
  452 err:
  453     json_object_put(jobj_segs_post);
  454     return NULL;
  455 }
  456 
  457 static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
  458     struct luks2_hdr *hdr,
  459     struct luks2_reencrypt *rh,
  460     uint64_t data_offset)
  461 {
  462     int reenc_seg;
  463     uint64_t fixed_length;
  464 
  465     json_object *jobj_new_seg_after, *jobj_old_seg,
  466             *jobj_segs_post = json_object_new_object();
  467 
  468     if (!rh->jobj_segs_hot || !jobj_segs_post)
  469         goto err;
  470 
  471     reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
  472     if (reenc_seg < 0)
  473         return NULL;
  474 
  475     jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
  476     if (jobj_old_seg)
  477         json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
  478     if (rh->fixed_length && rh->offset) {
  479         fixed_length = rh->device_size - rh->offset;
  480         jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
  481     } else
  482         jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
  483     if (!jobj_new_seg_after)
  484         goto err;
  485     json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
  486 
  487     return jobj_segs_post;
  488 err:
  489     json_object_put(jobj_segs_post);
  490     return NULL;
  491 }
  492 
  493 static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
  494         struct luks2_hdr *hdr,
  495         const struct luks2_reencrypt *rh,
  496         uint64_t data_offset,
  497         uint64_t segment_offset,
  498         uint64_t iv_offset,
  499         const uint64_t *segment_length)
  500 {
  501     switch (rh->mode) {
  502     case CRYPT_REENCRYPT_REENCRYPT:
  503     case CRYPT_REENCRYPT_ENCRYPT:
  504         return json_segment_create_crypt(data_offset + segment_offset,
  505                 crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
  506                 segment_length,
  507                 reencrypt_segment_cipher_new(hdr),
  508                 reencrypt_get_sector_size_new(hdr), 1);
  509     case CRYPT_REENCRYPT_DECRYPT:
  510         return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
  511     }
  512 
  513     return NULL;
  514 }
  515 
  516 static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
  517         struct luks2_hdr *hdr,
  518         const struct luks2_reencrypt *rh,
  519         uint64_t data_offset,
  520         uint64_t segment_offset,
  521         const uint64_t *segment_length)
  522 {
  523     json_object *jobj_old_seg = NULL;
  524 
  525     switch (rh->mode) {
  526     case CRYPT_REENCRYPT_REENCRYPT:
  527     case CRYPT_REENCRYPT_DECRYPT:
  528         jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
  529                             crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
  530                             segment_length,
  531                             reencrypt_segment_cipher_old(hdr),
  532                             reencrypt_get_sector_size_old(hdr),
  533                             0);
  534         break;
  535     case CRYPT_REENCRYPT_ENCRYPT:
  536         jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
  537     }
  538 
  539     return jobj_old_seg;
  540 }
  541 
  542 static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
  543         struct luks2_hdr *hdr,
  544         struct luks2_reencrypt *rh,
  545         uint64_t device_size,
  546         uint64_t data_offset)
  547 {
  548     json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
  549     uint64_t fixed_length, tmp = rh->offset + rh->length;
  550     unsigned int sg = 0;
  551 
  552     jobj_segs_hot = json_object_new_object();
  553     if (!jobj_segs_hot)
  554         return NULL;
  555 
  556     if (rh->offset) {
  557         jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
  558         if (!jobj_new_seg)
  559             goto err;
  560         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
  561     }
  562 
  563     jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
  564     if (!jobj_reenc_seg)
  565         goto err;
  566 
  567     json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
  568 
  569     if (tmp < device_size) {
  570         fixed_length = device_size - tmp;
  571         jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + rh->data_shift, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
  572         if (!jobj_old_seg)
  573             goto err;
  574         json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
  575     }
  576 
  577     return jobj_segs_hot;
  578 err:
  579     json_object_put(jobj_segs_hot);
  580     return NULL;
  581 }
  582 
  583 static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
  584         struct luks2_hdr *hdr,
  585         struct luks2_reencrypt *rh,
  586         uint64_t device_size,
  587         uint64_t data_offset)
  588 {
  589     json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
  590             *jobj_segs_hot = json_object_new_object();
  591     int sg = 0;
  592     uint64_t fixed_length, tmp = rh->offset + rh->length;
  593 
  594     if (!jobj_segs_hot)
  595         return NULL;
  596 
  597     if (rh->offset) {
  598         if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
  599             goto err;
  600         json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
  601 
  602         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
  603     }
  604 
  605     jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
  606     if (!jobj_reenc_seg)
  607         goto err;
  608 
  609     json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
  610 
  611     if (tmp < device_size) {
  612         fixed_length = device_size - tmp;
  613         jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
  614         if (!jobj_new_seg)
  615             goto err;
  616         json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
  617     }
  618 
  619     return jobj_segs_hot;
  620 err:
  621     json_object_put(jobj_segs_hot);
  622     return NULL;
  623 }
  624 
  625 static int reencrypt_make_hot_segments(struct crypt_device *cd,
  626         struct luks2_hdr *hdr,
  627         struct luks2_reencrypt *rh,
  628         uint64_t device_size,
  629         uint64_t data_offset)
  630 {
  631     rh->jobj_segs_hot = NULL;
  632 
  633     if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
  634         rh->data_shift && rh->jobj_segment_moved) {
  635         log_dbg(cd, "Calculating hot segments for encryption with data move.");
  636         rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(hdr, rh, data_offset);
  637     } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
  638         log_dbg(cd, "Calculating hot segments (forward direction).");
  639         rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
  640     } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
  641         log_dbg(cd, "Calculating hot segments (backward direction).");
  642         rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
  643     }
  644 
  645     return rh->jobj_segs_hot ? 0 : -EINVAL;
  646 }
  647 
  648 static int reencrypt_make_post_segments(struct crypt_device *cd,
  649         struct luks2_hdr *hdr,
  650         struct luks2_reencrypt *rh,
  651         uint64_t data_offset)
  652 {
  653     rh->jobj_segs_post = NULL;
  654 
  655     if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
  656         rh->data_shift && rh->jobj_segment_moved) {
  657         log_dbg(cd, "Calculating post segments for encryption with data move.");
  658         rh->jobj_segs_post = _enc_create_segments_shift_after(rh, data_offset);
  659     } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
  660         log_dbg(cd, "Calculating post segments (forward direction).");
  661         rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
  662     } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
  663         log_dbg(cd, "Calculating segments (backward direction).");
  664         rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
  665     }
  666 
  667     return rh->jobj_segs_post ? 0 : -EINVAL;
  668 }
  669 #endif
  670 static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
  671 {
  672     json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
  673     int ks = LUKS2_find_keyslot(hdr, "reencrypt");
  674 
  675     if (ks < 0)
  676         return 0;
  677 
  678     jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
  679 
  680     json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
  681     if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
  682         return 0;
  683 
  684     return crypt_jobj_get_uint64(jobj_data_shift);
  685 }
  686 
  687 static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
  688 {
  689     const char *mode;
  690     crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
  691     json_object *jobj_keyslot, *jobj_mode;
  692 
  693     jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
  694     if (!jobj_keyslot)
  695         return mi;
  696 
  697     json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
  698     mode = json_object_get_string(jobj_mode);
  699 
  700     /* validation enforces allowed values */
  701     if (!strcmp(mode, "encrypt"))
  702         mi = CRYPT_REENCRYPT_ENCRYPT;
  703     else if (!strcmp(mode, "decrypt"))
  704         mi = CRYPT_REENCRYPT_DECRYPT;
  705 
  706     return mi;
  707 }
  708 
  709 static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
  710 {
  711     const char *value;
  712     json_object *jobj_keyslot, *jobj_mode;
  713     crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
  714 
  715     jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
  716     if (!jobj_keyslot)
  717         return di;
  718 
  719     json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
  720     value = json_object_get_string(jobj_mode);
  721 
  722     /* validation enforces allowed values */
  723     if (strcmp(value, "forward"))
  724         di = CRYPT_REENCRYPT_BACKWARD;
  725 
  726     return di;
  727 }
  728 
  729 typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
  730 
  731 void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
  732 {
  733     if (!rh)
  734         return;
  735 
  736     if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
  737         if (rh->rp.p.csum.ch) {
  738             crypt_hash_destroy(rh->rp.p.csum.ch);
  739             rh->rp.p.csum.ch = NULL;
  740         }
  741         if (rh->rp.p.csum.checksums) {
  742             memset(rh->rp.p.csum.checksums, 0, rh->rp.p.csum.checksums_len);
  743             free(rh->rp.p.csum.checksums);
  744             rh->rp.p.csum.checksums = NULL;
  745         }
  746     }
  747 
  748     json_object_put(rh->jobj_segs_hot);
  749     rh->jobj_segs_hot = NULL;
  750     json_object_put(rh->jobj_segs_post);
  751     rh->jobj_segs_post = NULL;
  752     json_object_put(rh->jobj_segment_old);
  753     rh->jobj_segment_old = NULL;
  754     json_object_put(rh->jobj_segment_new);
  755     rh->jobj_segment_new = NULL;
  756     json_object_put(rh->jobj_segment_moved);
  757     rh->jobj_segment_moved = NULL;
  758 
  759     free(rh->reenc_buffer);
  760     rh->reenc_buffer = NULL;
  761     crypt_storage_wrapper_destroy(rh->cw1);
  762     rh->cw1 = NULL;
  763     crypt_storage_wrapper_destroy(rh->cw2);
  764     rh->cw2 = NULL;
  765 
  766     free(rh->device_name);
  767     free(rh->overlay_name);
  768     free(rh->hotzone_name);
  769     crypt_drop_keyring_key(cd, rh->vks);
  770     crypt_free_volume_key(rh->vks);
  771     device_release_excl(cd, crypt_data_device(cd));
  772     crypt_unlock_internal(cd, rh->reenc_lock);
  773     free(rh);
  774 }
  775 #if USE_LUKS2_REENCRYPTION
  776 static size_t reencrypt_get_alignment(struct crypt_device *cd,
  777         struct luks2_hdr *hdr)
  778 {
  779     int ss;
  780     size_t alignment = device_block_size(cd, crypt_data_device(cd));
  781 
  782     ss = reencrypt_get_sector_size_old(hdr);
  783     if (ss > 0 && (size_t)ss > alignment)
  784         alignment = ss;
  785     ss = reencrypt_get_sector_size_new(hdr);
  786     if (ss > 0 && (size_t)ss > alignment)
  787         alignment = (size_t)ss;
  788 
  789     return alignment;
  790 }
  791 
  792 /* returns void because it must not fail on valid LUKS2 header */
  793 static void _load_backup_segments(struct luks2_hdr *hdr,
  794         struct luks2_reencrypt *rh)
  795 {
  796     int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
  797 
  798     if (segment >= 0) {
  799         rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
  800         rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
  801     } else {
  802         rh->jobj_segment_new = NULL;
  803         rh->digest_new = -ENOENT;
  804     }
  805 
  806     segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
  807     if (segment >= 0) {
  808         rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
  809         rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
  810     } else {
  811         rh->jobj_segment_old = NULL;
  812         rh->digest_old = -ENOENT;
  813     }
  814 
  815     segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
  816     if (segment >= 0)
  817         rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
  818     else
  819         rh->jobj_segment_moved = NULL;
  820 }
  821 
  822 static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
  823 {
  824     uint64_t tmp, linear_length = 0;
  825     int sg, segs = json_segments_count(jobj_segments);
  826 
  827     /* find reencrypt offset with data shift */
  828     for (sg = 0; sg < segs; sg++)
  829         if (LUKS2_segment_is_type(hdr, sg, "linear"))
  830             linear_length += LUKS2_segment_size(hdr, sg, 0);
  831 
  832     /* all active linear segments length */
  833     if (linear_length && segs > 1) {
  834         if (linear_length < data_shift)
  835             return -EINVAL;
  836         tmp = linear_length - data_shift;
  837         if (tmp && tmp < data_shift) {
  838             *offset = data_shift;
  839             *reencrypt_length = tmp;
  840         } else
  841             *offset = tmp;
  842         return 0;
  843     }
  844 
  845     if (segs == 1) {
  846         *offset = 0;
  847         return 0;
  848     }
  849 
  850     /* should be unreachable */
  851 
  852     return -EINVAL;
  853 }
  854 
  855 static int _offset_forward(json_object *jobj_segments, uint64_t *offset)
  856 {
  857     int segs = json_segments_count(jobj_segments);
  858 
  859     if (segs == 1)
  860         *offset = 0;
  861     else if (segs == 2) {
  862         *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
  863         if (!*offset)
  864             return -EINVAL;
  865     } else
  866         return -EINVAL;
  867 
  868     return 0;
  869 }
  870 
  871 static int _offset_backward(json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
  872 {
  873     int segs = json_segments_count(jobj_segments);
  874     uint64_t tmp;
  875 
  876     if (segs == 1) {
  877         if (device_size < *length)
  878             *length = device_size;
  879         *offset = device_size - *length;
  880     } else if (segs == 2) {
  881         tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
  882         if (tmp < *length)
  883             *length = tmp;
  884         *offset =  tmp - *length;
  885     } else
  886         return -EINVAL;
  887 
  888     return 0;
  889 }
  890 
  891 /* must be always relative to data offset */
  892 /* the LUKS2 header MUST be valid */
  893 static int reencrypt_offset(struct luks2_hdr *hdr,
  894         crypt_reencrypt_direction_info di,
  895         uint64_t device_size,
  896         uint64_t *reencrypt_length,
  897         uint64_t *offset)
  898 {
  899     int sg;
  900     json_object *jobj_segments;
  901     uint64_t data_shift = reencrypt_data_shift(hdr);
  902 
  903     if (!offset)
  904         return -EINVAL;
  905 
  906     /* if there's segment in reencryption return directly offset of it */
  907     json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
  908     sg = json_segments_segment_in_reencrypt(jobj_segments);
  909     if (sg >= 0) {
  910         *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
  911         return 0;
  912     }
  913 
  914     if (di == CRYPT_REENCRYPT_FORWARD)
  915         return _offset_forward(jobj_segments, offset);
  916     else if (di == CRYPT_REENCRYPT_BACKWARD) {
  917         if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
  918             LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
  919             return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
  920         return _offset_backward(jobj_segments, device_size, reencrypt_length, offset);
  921     }
  922 
  923     return -EINVAL;
  924 }
  925 
  926 static uint64_t reencrypt_length(struct crypt_device *cd,
  927         struct luks2_hdr *hdr,
  928         struct luks2_reencrypt *rh,
  929         uint64_t keyslot_area_length,
  930         uint64_t length_max)
  931 {
  932     unsigned long dummy, optimal_alignment;
  933     uint64_t length, soft_mem_limit;
  934 
  935     if (rh->rp.type == REENC_PROTECTION_NONE)
  936         length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
  937     else if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
  938         length = (keyslot_area_length / rh->rp.p.csum.hash_size) * rh->alignment;
  939     else if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
  940         return reencrypt_data_shift(hdr);
  941     else
  942         length = keyslot_area_length;
  943 
  944     /* hard limit */
  945     if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
  946         length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
  947 
  948     /* soft limit is 1/4 of system memory */
  949     soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
  950 
  951     if (soft_mem_limit && length > soft_mem_limit)
  952         length = soft_mem_limit;
  953 
  954     if (length_max && length > length_max)
  955         length = length_max;
  956 
  957     length -= (length % rh->alignment);
  958 
  959     /* Emits error later */
  960     if (!length)
  961         return length;
  962 
  963     device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
  964 
  965     /* we have to stick with encryption sector size alignment */
  966     if (optimal_alignment % rh->alignment)
  967         return length;
  968 
  969     /* align to opt-io size only if remaining size allows it */
  970     if (length > optimal_alignment)
  971         length -= (length % optimal_alignment);
  972 
  973     return length;
  974 }
  975 
  976 static int reencrypt_context_init(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh, uint64_t device_size, const struct crypt_params_reencrypt *params)
  977 {
  978     int r;
  979     uint64_t dummy, area_length;
  980 
  981     rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
  982     if (rh->reenc_keyslot < 0)
  983         return -EINVAL;
  984     if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
  985         return -EINVAL;
  986 
  987     rh->mode = reencrypt_mode(hdr);
  988 
  989     rh->alignment = reencrypt_get_alignment(cd, hdr);
  990     if (!rh->alignment)
  991         return -EINVAL;
  992 
  993     log_dbg(cd, "Hotzone size: %" PRIu64 ", device size: %" PRIu64 ", alignment: %zu.",
  994         params->max_hotzone_size << SECTOR_SHIFT,
  995         params->device_size << SECTOR_SHIFT, rh->alignment);
  996 
  997     if ((params->max_hotzone_size << SECTOR_SHIFT) % rh->alignment) {
  998         log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
  999         return -EINVAL;
 1000     }
 1001 
 1002     if ((params->device_size << SECTOR_SHIFT) % rh->alignment) {
 1003         log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
 1004         return -EINVAL;
 1005     }
 1006 
 1007     rh->direction = reencrypt_direction(hdr);
 1008 
 1009     if (!strcmp(params->resilience, "datashift")) {
 1010         log_dbg(cd, "Initializing reencryption context with data_shift resilience.");
 1011         rh->rp.type = REENC_PROTECTION_DATASHIFT;
 1012         rh->data_shift = reencrypt_data_shift(hdr);
 1013     } else if (!strcmp(params->resilience, "journal")) {
 1014         log_dbg(cd, "Initializing reencryption context with journal resilience.");
 1015         rh->rp.type = REENC_PROTECTION_JOURNAL;
 1016     } else if (!strcmp(params->resilience, "checksum")) {
 1017         log_dbg(cd, "Initializing reencryption context with checksum resilience.");
 1018         rh->rp.type = REENC_PROTECTION_CHECKSUM;
 1019 
 1020         r = snprintf(rh->rp.p.csum.hash,
 1021             sizeof(rh->rp.p.csum.hash), "%s", params->hash);
 1022         if (r < 0 || (size_t)r >= sizeof(rh->rp.p.csum.hash)) {
 1023             log_dbg(cd, "Invalid hash parameter");
 1024             return -EINVAL;
 1025         }
 1026 
 1027         if (crypt_hash_init(&rh->rp.p.csum.ch, params->hash)) {
 1028             log_err(cd, _("Hash algorithm %s not supported."), params->hash);
 1029             return -EINVAL;
 1030         }
 1031 
 1032         r = crypt_hash_size(params->hash);
 1033         if (r < 1) {
 1034             log_dbg(cd, "Invalid hash size");
 1035             return -EINVAL;
 1036         }
 1037         rh->rp.p.csum.hash_size = r;
 1038 
 1039         rh->rp.p.csum.checksums_len = area_length;
 1040         if (posix_memalign(&rh->rp.p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
 1041                    rh->rp.p.csum.checksums_len))
 1042             return -ENOMEM;
 1043     } else if (!strcmp(params->resilience, "none")) {
 1044         log_dbg(cd, "Initializing reencryption context with none resilience.");
 1045         rh->rp.type = REENC_PROTECTION_NONE;
 1046     } else {
 1047         log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
 1048         return -EINVAL;
 1049     }
 1050 
 1051     if (params->device_size) {
 1052         log_dbg(cd, "Switching reencryption to fixed size mode.");
 1053         device_size = params->device_size << SECTOR_SHIFT;
 1054         rh->fixed_length = true;
 1055     } else
 1056         rh->fixed_length = false;
 1057 
 1058     rh->length = reencrypt_length(cd, hdr, rh, area_length, params->max_hotzone_size << SECTOR_SHIFT);
 1059     if (!rh->length) {
 1060         log_dbg(cd, "Invalid reencryption length.");
 1061         return -EINVAL;
 1062     }
 1063 
 1064     if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
 1065         log_dbg(cd, "Failed to get reencryption offset.");
 1066         return -EINVAL;
 1067     }
 1068 
 1069     if (rh->offset > device_size)
 1070         return -EINVAL;
 1071     if (rh->length > device_size - rh->offset)
 1072         rh->length = device_size - rh->offset;
 1073 
 1074     log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
 1075 
 1076     _load_backup_segments(hdr, rh);
 1077 
 1078     if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
 1079         rh->progress = device_size - rh->offset - rh->length;
 1080     else
 1081         rh->progress = rh->offset;
 1082 
 1083     log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
 1084     log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
 1085     log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
 1086     log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
 1087     log_dbg(cd, "reencrypt shift: %s%" PRIu64, (rh->data_shift && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""), rh->data_shift);
 1088     log_dbg(cd, "reencrypt alignment: %zu", rh->alignment);
 1089     log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
 1090 
 1091     rh->device_size = device_size;
 1092 
 1093     return rh->length < 512 ? -EINVAL : 0;
 1094 }
 1095 
 1096 static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh)
 1097 {
 1098     if (rh->data_shift)
 1099         return rh->data_shift;
 1100     return rh->length;
 1101 }
 1102 
 1103 static int reencrypt_load_clean(struct crypt_device *cd,
 1104     struct luks2_hdr *hdr,
 1105     uint64_t device_size,
 1106     struct luks2_reencrypt **rh,
 1107     const struct crypt_params_reencrypt *params)
 1108 {
 1109     int r;
 1110     const struct crypt_params_reencrypt hdr_reenc_params = {
 1111         .resilience = reencrypt_resilience_type(hdr),
 1112         .hash = reencrypt_resilience_hash(hdr),
 1113         .device_size = params ? params->device_size : 0
 1114     };
 1115     struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp));
 1116 
 1117     if (!tmp)
 1118         return -ENOMEM;
 1119 
 1120     r = -EINVAL;
 1121     if (!hdr_reenc_params.resilience)
 1122         goto err;
 1123 
 1124     /* skip context update if data shift is detected in header */
 1125     if (!strcmp(hdr_reenc_params.resilience, "datashift"))
 1126         params = NULL;
 1127 
 1128     log_dbg(cd, "Initializing reencryption context (%s).", params ? "update" : "load");
 1129 
 1130     if (!params || !params->resilience)
 1131         params = &hdr_reenc_params;
 1132 
 1133     r = reencrypt_context_init(cd, hdr, tmp, device_size, params);
 1134     if (r)
 1135         goto err;
 1136 
 1137     if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
 1138                reencrypt_buffer_length(tmp))) {
 1139         r = -ENOMEM;
 1140         goto err;
 1141     }
 1142 
 1143     *rh = tmp;
 1144 
 1145     return 0;
 1146 err:
 1147     LUKS2_reencrypt_free(cd, tmp);
 1148 
 1149     return r;
 1150 }
 1151 
 1152 static int reencrypt_make_segments(struct crypt_device *cd,
 1153     struct luks2_hdr *hdr,
 1154     struct luks2_reencrypt *rh,
 1155     uint64_t device_size)
 1156 {
 1157     int r;
 1158     uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
 1159 
 1160     log_dbg(cd, "Calculating segments.");
 1161 
 1162     r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
 1163     if (!r) {
 1164         r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
 1165         if (r)
 1166             json_object_put(rh->jobj_segs_hot);
 1167     }
 1168 
 1169     if (r)
 1170         log_dbg(cd, "Failed to make reencryption segments.");
 1171 
 1172     return r;
 1173 }
 1174 
 1175 static int reencrypt_make_segments_crashed(struct crypt_device *cd,
 1176                 struct luks2_hdr *hdr,
 1177                     struct luks2_reencrypt *rh)
 1178 {
 1179     int r;
 1180     uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
 1181 
 1182     if (!rh)
 1183         return -EINVAL;
 1184 
 1185     rh->jobj_segs_hot = json_object_new_object();
 1186     if (!rh->jobj_segs_hot)
 1187         return -ENOMEM;
 1188 
 1189     json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
 1190         if (json_segment_is_backup(val))
 1191             continue;
 1192         json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
 1193     }
 1194 
 1195     r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
 1196     if (r) {
 1197         json_object_put(rh->jobj_segs_hot);
 1198         rh->jobj_segs_hot = NULL;
 1199     }
 1200 
 1201     return r;
 1202 }
 1203 
 1204 static int reencrypt_load_crashed(struct crypt_device *cd,
 1205     struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh)
 1206 {
 1207     bool dynamic;
 1208     uint64_t minimal_size;
 1209     int r, reenc_seg;
 1210     struct crypt_params_reencrypt params = {};
 1211 
 1212     if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
 1213         return -EINVAL;
 1214 
 1215     if (!dynamic)
 1216         params.device_size = minimal_size >> SECTOR_SHIFT;
 1217 
 1218     r = reencrypt_load_clean(cd, hdr, device_size, rh, &params);
 1219 
 1220     if (!r) {
 1221         reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
 1222         if (reenc_seg < 0)
 1223             r = -EINVAL;
 1224         else
 1225             (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
 1226     }
 1227 
 1228     if (!r && ((*rh)->rp.type == REENC_PROTECTION_CHECKSUM)) {
 1229         /* we have to override calculated alignment with value stored in mda */
 1230         (*rh)->alignment = reencrypt_alignment(hdr);
 1231         if (!(*rh)->alignment) {
 1232             log_dbg(cd, "Failed to get read resilience sector_size from metadata.");
 1233             r = -EINVAL;
 1234         }
 1235     }
 1236 
 1237     if (!r)
 1238         r = reencrypt_make_segments_crashed(cd, hdr, *rh);
 1239 
 1240     if (r) {
 1241         LUKS2_reencrypt_free(cd, *rh);
 1242         *rh = NULL;
 1243     }
 1244     return r;
 1245 }
 1246 
 1247 static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
 1248         struct luks2_hdr *hdr,
 1249         struct luks2_reencrypt *rh,
 1250         struct volume_key *vks)
 1251 {
 1252     int r;
 1253     struct volume_key *vk;
 1254     uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
 1255 
 1256     vk = crypt_volume_key_by_id(vks, rh->digest_old);
 1257     r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
 1258             reencrypt_get_data_offset_old(hdr),
 1259             crypt_get_iv_offset(cd),
 1260             reencrypt_get_sector_size_old(hdr),
 1261             reencrypt_segment_cipher_old(hdr),
 1262             vk, wrapper_flags | OPEN_READONLY);
 1263     if (r) {
 1264         log_err(cd, _("Failed to initialize old segment storage wrapper."));
 1265         return r;
 1266     }
 1267     rh->wflags1 = wrapper_flags | OPEN_READONLY;
 1268     log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
 1269 
 1270     vk = crypt_volume_key_by_id(vks, rh->digest_new);
 1271     r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
 1272             reencrypt_get_data_offset_new(hdr),
 1273             crypt_get_iv_offset(cd),
 1274             reencrypt_get_sector_size_new(hdr),
 1275             reencrypt_segment_cipher_new(hdr),
 1276             vk, wrapper_flags);
 1277     if (r) {
 1278         log_err(cd, _("Failed to initialize new segment storage wrapper."));
 1279         return r;
 1280     }
 1281     rh->wflags2 = wrapper_flags;
 1282     log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
 1283 
 1284     return 0;
 1285 }
 1286 
 1287 static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
 1288 {
 1289     if (!rh | !name)
 1290         return -EINVAL;
 1291 
 1292     if (*name == '/') {
 1293         if (!(rh->device_name = dm_device_name(name)))
 1294             return -EINVAL;
 1295     } else if (!(rh->device_name = strdup(name)))
 1296         return -ENOMEM;
 1297 
 1298     if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
 1299              rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
 1300         rh->hotzone_name = NULL;
 1301         return -ENOMEM;
 1302     }
 1303     if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
 1304         rh->overlay_name = NULL;
 1305         return -ENOMEM;
 1306     }
 1307 
 1308     rh->online = true;
 1309     return 0;
 1310 }
 1311 
 1312 static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
 1313 {
 1314     int r = -EINVAL;
 1315 
 1316     if (!offset)
 1317         return r;
 1318 
 1319     if (di == CRYPT_REENCRYPT_FORWARD) {
 1320         if (*offset >= data_shift) {
 1321             *offset -= data_shift;
 1322             r = 0;
 1323         }
 1324     } else if (di == CRYPT_REENCRYPT_BACKWARD) {
 1325         *offset += data_shift;
 1326         r = 0;
 1327     }
 1328 
 1329     return r;
 1330 }
 1331 
 1332 static int reencrypt_update_flag(struct crypt_device *cd, int enable, bool commit)
 1333 {
 1334     uint32_t reqs;
 1335     struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
 1336 
 1337     if (LUKS2_config_get_requirements(cd, hdr, &reqs))
 1338         return -EINVAL;
 1339 
 1340     /* nothing to do */
 1341     if (enable && (reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
 1342         return -EINVAL;
 1343 
 1344     /* nothing to do */
 1345     if (!enable && !(reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
 1346         return -EINVAL;
 1347 
 1348     if (enable)
 1349         reqs |= CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
 1350     else
 1351         reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
 1352 
 1353     log_dbg(cd, "Going to %s reencryption requirement flag.", enable ? "store" : "wipe");
 1354 
 1355     return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
 1356 }
 1357 
 1358 static int reencrypt_recover_segment(struct crypt_device *cd,
 1359     struct luks2_hdr *hdr,
 1360     struct luks2_reencrypt *rh,
 1361     struct volume_key *vks)
 1362 {
 1363     struct volume_key *vk_old, *vk_new;
 1364     size_t count, s;
 1365     ssize_t read, w;
 1366     unsigned resilience;
 1367     uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
 1368          data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
 1369     int devfd, r, new_sector_size, old_sector_size, rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
 1370     char *checksum_tmp = NULL, *data_buffer = NULL;
 1371     struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
 1372 
 1373     resilience = rh->rp.type;
 1374 
 1375     if (rseg < 0 || rh->length < 512)
 1376         return -EINVAL;
 1377 
 1378     vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
 1379     if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
 1380         return -EINVAL;
 1381     vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
 1382     if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
 1383         return -EINVAL;
 1384     old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
 1385     new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
 1386     if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
 1387         crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
 1388     else
 1389         crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
 1390 
 1391     log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ",  crash_iv_offset: %" PRIu64, data_offset + rh->offset, rh->length, crash_iv_offset);
 1392 
 1393     r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
 1394             data_offset + rh->offset, crash_iv_offset, new_sector_size,
 1395             reencrypt_segment_cipher_new(hdr), vk_new, 0);
 1396     if (r) {
 1397         log_err(cd, _("Failed to initialize new segment storage wrapper."));
 1398         return r;
 1399     }
 1400 
 1401     if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
 1402         r = -EINVAL;
 1403         goto out;
 1404     }
 1405 
 1406     if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
 1407         r = -ENOMEM;
 1408         goto out;
 1409     }
 1410 
 1411     switch (resilience) {
 1412     case  REENC_PROTECTION_CHECKSUM:
 1413         log_dbg(cd, "Checksums based recovery.");
 1414 
 1415         r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
 1416                 data_offset + rh->offset, crash_iv_offset, old_sector_size,
 1417                 reencrypt_segment_cipher_old(hdr), vk_old, 0);
 1418         if (r) {
 1419             log_err(cd, _("Failed to initialize old segment storage wrapper."));
 1420             goto out;
 1421         }
 1422 
 1423         count = rh->length / rh->alignment;
 1424         area_length_read = count * rh->rp.p.csum.hash_size;
 1425         if (area_length_read > area_length) {
 1426             log_dbg(cd, "Internal error in calculated area_length.");
 1427             r = -EINVAL;
 1428             goto out;
 1429         }
 1430 
 1431         checksum_tmp = malloc(rh->rp.p.csum.hash_size);
 1432         if (!checksum_tmp) {
 1433             r = -ENOMEM;
 1434             goto out;
 1435         }
 1436 
 1437         /* TODO: lock for read */
 1438         devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
 1439         if (devfd < 0)
 1440             goto out;
 1441 
 1442         /* read old data checksums */
 1443         read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
 1444                     device_alignment(crypt_metadata_device(cd)), rh->rp.p.csum.checksums, area_length_read, area_offset);
 1445         if (read < 0 || (size_t)read != area_length_read) {
 1446             log_err(cd, _("Failed to read checksums for current hotzone."));
 1447             r = -EINVAL;
 1448             goto out;
 1449         }
 1450 
 1451         read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
 1452         if (read < 0 || (size_t)read != rh->length) {
 1453             log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
 1454             r = -EINVAL;
 1455             goto out;
 1456         }
 1457 
 1458         for (s = 0; s < count; s++) {
 1459             if (crypt_hash_write(rh->rp.p.csum.ch, data_buffer + (s * rh->alignment), rh->alignment)) {
 1460                 log_dbg(cd, "Failed to write hash.");
 1461                 r = EINVAL;
 1462                 goto out;
 1463             }
 1464             if (crypt_hash_final(rh->rp.p.csum.ch, checksum_tmp, rh->rp.p.csum.hash_size)) {
 1465                 log_dbg(cd, "Failed to finalize hash.");
 1466                 r = EINVAL;
 1467                 goto out;
 1468             }
 1469             if (!memcmp(checksum_tmp, (char *)rh->rp.p.csum.checksums + (s * rh->rp.p.csum.hash_size), rh->rp.p.csum.hash_size)) {
 1470                 log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rh->alignment, s * rh->alignment);
 1471                 if (crypt_storage_wrapper_decrypt(cw1, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment)) {
 1472                     log_err(cd, _("Failed to decrypt sector %zu."), s);
 1473                     r = -EINVAL;
 1474                     goto out;
 1475                 }
 1476                 w = crypt_storage_wrapper_encrypt_write(cw2, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment);
 1477                 if (w < 0 || (size_t)w != rh->alignment) {
 1478                     log_err(cd, _("Failed to recover sector %zu."), s);
 1479                     r = -EINVAL;
 1480                     goto out;
 1481                 }
 1482             }
 1483         }
 1484 
 1485         r = 0;
 1486         break;
 1487     case  REENC_PROTECTION_JOURNAL:
 1488         log_dbg(cd, "Journal based recovery.");
 1489 
 1490         /* FIXME: validation candidate */
 1491         if (rh->length > area_length) {
 1492             r = -EINVAL;
 1493             log_dbg(cd, "Invalid journal size.");
 1494             goto out;
 1495         }
 1496 
 1497         /* TODO locking */
 1498         r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
 1499                 area_offset, crash_iv_offset, old_sector_size,
 1500                 reencrypt_segment_cipher_old(hdr), vk_old, 0);
 1501         if (r) {
 1502             log_err(cd, _("Failed to initialize old segment storage wrapper."));
 1503             goto out;
 1504         }
 1505         read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
 1506         if (read < 0 || (size_t)read != rh->length) {
 1507             log_dbg(cd, "Failed to read journaled data.");
 1508             r = -EIO;
 1509             /* may content plaintext */
 1510             crypt_safe_memzero(data_buffer, rh->length);
 1511             goto out;
 1512         }
 1513         read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
 1514         /* may content plaintext */
 1515         crypt_safe_memzero(data_buffer, rh->length);
 1516         if (read < 0 || (size_t)read != rh->length) {
 1517             log_dbg(cd, "recovery write failed.");
 1518             r = -EINVAL;
 1519             goto out;
 1520         }
 1521 
 1522         r = 0;
 1523         break;
 1524     case  REENC_PROTECTION_DATASHIFT:
 1525         log_dbg(cd, "Data shift based recovery.");
 1526 
 1527         if (rseg == 0) {
 1528             r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
 1529                     json_segment_get_offset(rh->jobj_segment_moved, 0), 0, 0,
 1530                     reencrypt_segment_cipher_old(hdr), NULL, 0);
 1531         } else
 1532             r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
 1533                     data_offset + rh->offset - rh->data_shift, 0, 0,
 1534                     reencrypt_segment_cipher_old(hdr), NULL, 0);
 1535         if (r) {
 1536             log_err(cd, _("Failed to initialize old segment storage wrapper."));
 1537             goto out;
 1538         }
 1539 
 1540         read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
 1541         if (read < 0 || (size_t)read != rh->length) {
 1542             log_dbg(cd, "Failed to read data.");
 1543             r = -EIO;
 1544             /* may content plaintext */
 1545             crypt_safe_memzero(data_buffer, rh->length);
 1546             goto out;
 1547         }
 1548 
 1549         read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
 1550         /* may content plaintext */
 1551         crypt_safe_memzero(data_buffer, rh->length);
 1552         if (read < 0 || (size_t)read != rh->length) {
 1553             log_dbg(cd, "recovery write failed.");
 1554             r = -EINVAL;
 1555             goto out;
 1556         }
 1557         r = 0;
 1558         break;
 1559     default:
 1560         r = -EINVAL;
 1561     }
 1562 
 1563     if (!r)
 1564         rh->read = rh->length;
 1565 out:
 1566     free(data_buffer);
 1567     free(checksum_tmp);
 1568     crypt_storage_wrapper_destroy(cw1);
 1569     crypt_storage_wrapper_destroy(cw2);
 1570 
 1571     return r;
 1572 }
 1573 
 1574 static int reencrypt_add_moved_segment(struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
 1575 {
 1576     int s = LUKS2_segment_first_unused_id(hdr);
 1577 
 1578     if (!rh->jobj_segment_moved)
 1579         return 0;
 1580 
 1581     if (s < 0)
 1582         return s;
 1583 
 1584     if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
 1585         json_object_put(rh->jobj_segment_moved);
 1586         return -EINVAL;
 1587     }
 1588 
 1589     return 0;
 1590 }
 1591 
 1592 static int reencrypt_add_backup_segment(struct crypt_device *cd,
 1593         struct luks2_hdr *hdr,
 1594         struct luks2_reencrypt *rh,
 1595         unsigned final)
 1596 {
 1597     int digest, s = LUKS2_segment_first_unused_id(hdr);
 1598     json_object *jobj;
 1599 
 1600     if (s < 0)
 1601         return s;
 1602 
 1603     digest = final ? rh->digest_new : rh->digest_old;
 1604     jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
 1605 
 1606     if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
 1607         json_object_put(jobj);
 1608         return -EINVAL;
 1609     }
 1610 
 1611     if (strcmp(json_segment_type(jobj), "crypt"))
 1612         return 0;
 1613 
 1614     return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
 1615 }
 1616 
 1617 static int reencrypt_assign_segments_simple(struct crypt_device *cd,
 1618     struct luks2_hdr *hdr,
 1619     struct luks2_reencrypt *rh,
 1620     unsigned hot,
 1621     unsigned commit)
 1622 {
 1623     int r, sg;
 1624 
 1625     if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
 1626         log_dbg(cd, "Setting 'hot' segments.");
 1627 
 1628         r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
 1629         if (!r)
 1630             rh->jobj_segs_hot = NULL;
 1631     } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
 1632         log_dbg(cd, "Setting 'post' segments.");
 1633         r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
 1634         if (!r)
 1635             rh->jobj_segs_post = NULL;
 1636     } else {
 1637         log_dbg(cd, "No segments to set.");
 1638         return -EINVAL;
 1639     }
 1640 
 1641     if (r) {
 1642         log_dbg(cd, "Failed to assign new enc segments.");
 1643         return r;
 1644     }
 1645 
 1646     r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
 1647     if (r) {
 1648         log_dbg(cd, "Failed to assign reencryption previous backup segment.");
 1649         return r;
 1650     }
 1651 
 1652     r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
 1653     if (r) {
 1654         log_dbg(cd, "Failed to assign reencryption final backup segment.");
 1655         return r;
 1656     }
 1657 
 1658     r = reencrypt_add_moved_segment(hdr, rh);
 1659     if (r) {
 1660         log_dbg(cd, "Failed to assign reencryption moved backup segment.");
 1661         return r;
 1662     }
 1663 
 1664     for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
 1665         if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
 1666             LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
 1667             log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
 1668             return -EINVAL;
 1669         }
 1670     }
 1671 
 1672     return commit ? LUKS2_hdr_write(cd, hdr) : 0;
 1673 }
 1674 
 1675 static int reencrypt_assign_segments(struct crypt_device *cd,
 1676         struct luks2_hdr *hdr,
 1677         struct luks2_reencrypt *rh,
 1678         unsigned hot,
 1679         unsigned commit)
 1680 {
 1681     bool forward;
 1682     int rseg, scount, r = -EINVAL;
 1683 
 1684     /* FIXME: validate in reencrypt context load */
 1685     if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
 1686         return -EINVAL;
 1687 
 1688     if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
 1689         return -EINVAL;
 1690 
 1691     if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
 1692         return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
 1693 
 1694     if (hot && rh->jobj_segs_hot) {
 1695         log_dbg(cd, "Setting 'hot' segments.");
 1696 
 1697         r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
 1698         if (!r)
 1699             rh->jobj_segs_hot = NULL;
 1700     } else if (!hot && rh->jobj_segs_post) {
 1701         log_dbg(cd, "Setting 'post' segments.");
 1702         r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
 1703         if (!r)
 1704             rh->jobj_segs_post = NULL;
 1705     }
 1706 
 1707     if (r)
 1708         return r;
 1709 
 1710     scount = LUKS2_segments_count(hdr);
 1711 
 1712     /* segment in reencryption has to hold reference on both digests */
 1713     rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
 1714     if (rseg < 0 && hot)
 1715         return -EINVAL;
 1716 
 1717     if (rseg >= 0) {
 1718         LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
 1719         LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
 1720     }
 1721 
 1722     forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
 1723     if (hot) {
 1724         if (rseg > 0)
 1725             LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
 1726         if (scount > rseg + 1)
 1727             LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
 1728     } else {
 1729         LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
 1730         if (scount > 1)
 1731             LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
 1732     }
 1733 
 1734     r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
 1735     if (r) {
 1736         log_dbg(cd, "Failed to assign hot reencryption backup segment.");
 1737         return r;
 1738     }
 1739     r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
 1740     if (r) {
 1741         log_dbg(cd, "Failed to assign post reencryption backup segment.");
 1742         return r;
 1743     }
 1744 
 1745     return commit ? LUKS2_hdr_write(cd, hdr) : 0;
 1746 }
 1747 
 1748 static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t dev_size, uint64_t data_shift, bool move_first_segment, crypt_reencrypt_direction_info di)
 1749 {
 1750     int r;
 1751     uint64_t first_segment_offset, first_segment_length,
 1752          second_segment_offset, second_segment_length,
 1753          data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT,
 1754          data_size = dev_size - data_shift;
 1755     json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
 1756 
 1757     if (dev_size < data_shift)
 1758         return -EINVAL;
 1759 
 1760     if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
 1761         return -ENOTSUP;
 1762 
 1763     if (move_first_segment) {
 1764         /*
 1765          * future data_device layout:
 1766          * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
 1767          */
 1768         first_segment_offset = dev_size;
 1769         if (data_size < data_shift) {
 1770             first_segment_length = data_size;
 1771             second_segment_length = second_segment_offset = 0;
 1772         } else {
 1773             first_segment_length = data_shift;
 1774             second_segment_offset = data_shift;
 1775             second_segment_length = data_size - data_shift;
 1776         }
 1777     } else if (data_shift) {
 1778         first_segment_offset = data_offset;
 1779         first_segment_length = dev_size;
 1780     } else {
 1781         /* future data_device layout with detached header: [first data segment] */
 1782         first_segment_offset = data_offset;
 1783         first_segment_length = 0; /* dynamic */
 1784     }
 1785 
 1786     jobj_segments = json_object_new_object();
 1787     if (!jobj_segments)
 1788         return -ENOMEM;
 1789 
 1790     r = -EINVAL;
 1791     if (move_first_segment) {
 1792         jobj_segment_first =  json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
 1793         if (second_segment_length &&
 1794             !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
 1795             log_dbg(cd, "Failed generate 2nd segment.");
 1796             return r;
 1797         }
 1798     } else
 1799         jobj_segment_first =  json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
 1800 
 1801     if (!jobj_segment_first) {
 1802         log_dbg(cd, "Failed generate 1st segment.");
 1803         return r;
 1804     }
 1805 
 1806     json_object_object_add(jobj_segments, "0", jobj_segment_first);
 1807     if (jobj_segment_second)
 1808         json_object_object_add(jobj_segments, "1", jobj_segment_second);
 1809 
 1810     r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
 1811 
 1812     return r ?: LUKS2_segments_set(cd, hdr, jobj_segments, 0);
 1813 }
 1814 
 1815 static int reencrypt_make_targets(struct crypt_device *cd,
 1816                 struct luks2_hdr *hdr,
 1817                 struct device *hz_device,
 1818                 struct volume_key *vks,
 1819                 struct dm_target *result,
 1820                 uint64_t size)
 1821 {
 1822     bool reenc_seg;
 1823     struct volume_key *vk;
 1824     uint64_t segment_size, segment_offset, segment_start = 0;
 1825     int r;
 1826     int s = 0;
 1827     json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
 1828 
 1829     while (result) {
 1830         jobj = json_segments_get_segment(jobj_segments, s);
 1831         if (!jobj) {
 1832             log_dbg(cd, "Internal error. Segment %u is null.", s);
 1833             return -EINVAL;
 1834         }
 1835 
 1836         reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
 1837 
 1838         segment_offset = json_segment_get_offset(jobj, 1);
 1839         segment_size = json_segment_get_size(jobj, 1);
 1840         /* 'dynamic' length allowed in last segment only */
 1841         if (!segment_size && !result->next)
 1842             segment_size = (size >> SECTOR_SHIFT) - segment_start;
 1843         if (!segment_size) {
 1844             log_dbg(cd, "Internal error. Wrong segment size %u", s);
 1845             return -EINVAL;
 1846         }
 1847 
 1848         if (reenc_seg)
 1849             segment_offset -= crypt_get_data_offset(cd);
 1850 
 1851         if (!strcmp(json_segment_type(jobj), "crypt")) {
 1852             vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
 1853             if (!vk) {
 1854                 log_err(cd, _("Missing key for dm-crypt segment %u"), s);
 1855                 return -EINVAL;
 1856             }
 1857 
 1858             r = dm_crypt_target_set(result, segment_start, segment_size,
 1859                         reenc_seg ? hz_device : crypt_data_device(cd),
 1860                         vk,
 1861                         json_segment_get_cipher(jobj),
 1862                         json_segment_get_iv_offset(jobj),
 1863                         segment_offset,
 1864                         "none",
 1865                         0,
 1866                         json_segment_get_sector_size(jobj));
 1867             if (r) {
 1868                 log_err(cd, _("Failed to set dm-crypt segment."));
 1869                 return r;
 1870             }
 1871         } else if (!strcmp(json_segment_type(jobj), "linear")) {
 1872             r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
 1873             if (r) {
 1874                 log_err(cd, _("Failed to set dm-linear segment."));
 1875                 return r;
 1876             }
 1877         } else
 1878             return EINVAL;
 1879 
 1880         segment_start += segment_size;
 1881         s++;
 1882         result = result->next;
 1883     }
 1884 
 1885     return s;
 1886 }
 1887 
 1888 /* GLOBAL FIXME: audit function names and parameters names */
 1889 
 1890 /* FIXME:
 1891  *  1) audit log routines
 1892  *  2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
 1893  */
 1894 static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
 1895     const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
 1896     uint32_t flags)
 1897 {
 1898     char hz_path[PATH_MAX];
 1899     int r;
 1900 
 1901     struct device *hz_dev = NULL;
 1902     struct crypt_dm_active_device dmd = {
 1903         .flags = flags,
 1904     };
 1905 
 1906     log_dbg(cd, "Loading new table for overlay device %s.", overlay);
 1907 
 1908     r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
 1909     if (r < 0 || r >= PATH_MAX) {
 1910         r = -EINVAL;
 1911         goto out;
 1912     }
 1913 
 1914     r = device_alloc(cd, &hz_dev, hz_path);
 1915     if (r)
 1916         goto out;
 1917 
 1918     r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
 1919     if (r)
 1920         goto out;
 1921 
 1922     r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
 1923     if (r < 0)
 1924         goto out;
 1925 
 1926     r = dm_reload_device(cd, overlay, &dmd, 0, 0);
 1927 
 1928     /* what else on error here ? */
 1929 out:
 1930     dm_targets_free(cd, &dmd);
 1931     device_free(cd, hz_dev);
 1932 
 1933     return r;
 1934 }
 1935 
 1936 static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
 1937 {
 1938     int r, exists = 1;
 1939     struct crypt_dm_active_device dmd_source, dmd_target = {};
 1940     uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
 1941 
 1942     log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
 1943 
 1944     /* check only whether target device exists */
 1945     r = dm_status_device(cd, target);
 1946     if (r < 0) {
 1947         if (r == -ENODEV)
 1948             exists = 0;
 1949         else
 1950             return r;
 1951     }
 1952 
 1953     r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
 1954                 DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
 1955 
 1956     if (r < 0)
 1957         return r;
 1958 
 1959     if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
 1960         goto out;
 1961 
 1962     dmd_source.flags |= flags;
 1963     dmd_source.uuid = crypt_get_uuid(cd);
 1964 
 1965     if (exists) {
 1966         if (dmd_target.size != dmd_source.size) {
 1967             log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
 1968                 dmd_source.size, dmd_target.size);
 1969             r = -EINVAL;
 1970             goto out;
 1971         }
 1972         r = dm_reload_device(cd, target, &dmd_source, 0, 0);
 1973         if (!r) {
 1974             log_dbg(cd, "Resuming device %s", target);
 1975             r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
 1976         }
 1977     } else
 1978         r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
 1979 out:
 1980     dm_targets_free(cd, &dmd_source);
 1981     dm_targets_free(cd, &dmd_target);
 1982 
 1983     return r;
 1984 }
 1985 
 1986 static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
 1987                   const char *new_backend_name)
 1988 {
 1989     int r;
 1990     struct device *overlay_dev = NULL;
 1991     char overlay_path[PATH_MAX] = { 0 };
 1992     struct crypt_dm_active_device dmd = {};
 1993 
 1994     log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
 1995 
 1996     r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
 1997     if (r < 0 || r >= PATH_MAX) {
 1998         r = -EINVAL;
 1999         goto out;
 2000     }
 2001 
 2002     r = device_alloc(cd, &overlay_dev, overlay_path);
 2003     if (r)
 2004         goto out;
 2005 
 2006     r = device_block_adjust(cd, overlay_dev, DEV_OK,
 2007                 0, &dmd.size, &dmd.flags);
 2008     if (r)
 2009         goto out;
 2010 
 2011     r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
 2012     if (r)
 2013         goto out;
 2014 
 2015     r = dm_reload_device(cd, name, &dmd, 0, 0);
 2016     if (!r) {
 2017         log_dbg(cd, "Resuming device %s", name);
 2018         r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
 2019     }
 2020 
 2021 out:
 2022     dm_targets_free(cd, &dmd);
 2023     device_free(cd, overlay_dev);
 2024 
 2025     return r;
 2026 }
 2027 
 2028 static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
 2029 {
 2030     int r;
 2031     uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
 2032 
 2033     struct crypt_dm_active_device dmd = {
 2034         .flags = flags,
 2035         .uuid = crypt_get_uuid(cd),
 2036         .size = device_size >> SECTOR_SHIFT
 2037     };
 2038 
 2039     log_dbg(cd, "Activating hotzone device %s.", name);
 2040 
 2041     r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
 2042                 new_offset, &dmd.size, &dmd.flags);
 2043     if (r)
 2044         goto out;
 2045 
 2046     r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
 2047     if (r)
 2048         goto out;
 2049 
 2050     r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
 2051 out:
 2052     dm_targets_free(cd, &dmd);
 2053 
 2054     return r;
 2055 }
 2056 
 2057 static int reencrypt_init_device_stack(struct crypt_device *cd,
 2058                              const struct luks2_reencrypt *rh)
 2059 {
 2060     int r;
 2061 
 2062     /* Activate hotzone device 1:1 linear mapping to data_device */
 2063     r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
 2064     if (r) {
 2065         log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
 2066         return r;
 2067     }
 2068 
 2069     /*
 2070      * Activate overlay device with exactly same table as original 'name' mapping.
 2071      * Note that within this step the 'name' device may already include a table
 2072      * constructed from more than single dm-crypt segment. Therefore transfer
 2073      * mapping as is.
 2074      *
 2075      * If we're about to resume reencryption orig mapping has to be already validated for
 2076      * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
 2077      *
 2078      * TODO: in crypt_activate_by*
 2079      */
 2080     r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
 2081     if (r) {
 2082         log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
 2083         goto err;
 2084     }
 2085 
 2086     /* swap origin mapping to overlay device */
 2087     r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
 2088     if (r) {
 2089         log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
 2090         goto err;
 2091     }
 2092 
 2093     /*
 2094      * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
 2095      * The overlay device has a original live table of 'name' device in-before the swap.
 2096      */
 2097 
 2098     return 0;
 2099 err:
 2100     /* TODO: force error helper devices on error path */
 2101     dm_remove_device(cd, rh->overlay_name, 0);
 2102     dm_remove_device(cd, rh->hotzone_name, 0);
 2103 
 2104     return r;
 2105 }
 2106 
 2107 /* TODO:
 2108  *  1) audit error path. any error in this routine is fatal and should be unlikely.
 2109  *     usually it would hint some collision with another userspace process touching
 2110  *     dm devices directly.
 2111  */
 2112 static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
 2113 {
 2114     int r;
 2115 
 2116     /*
 2117      * we have to explicitly suspend the overlay device before suspending
 2118      * the hotzone one. Resuming overlay device (aka switching tables) only
 2119      * after suspending the hotzone may lead to deadlock.
 2120      *
 2121      * In other words: always suspend the stack from top to bottom!
 2122      */
 2123     r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
 2124     if (r) {
 2125         log_err(cd, _("Failed to suspend device %s."), overlay);
 2126         return r;
 2127     }
 2128 
 2129     /* suspend HZ device */
 2130     r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
 2131     if (r) {
 2132         log_err(cd, _("Failed to suspend device %s."), hotzone);
 2133         return r;
 2134     }
 2135 
 2136     /* resume overlay device: inactive table (with hotozne) -> live */
 2137     r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
 2138     if (r)
 2139         log_err(cd, _("Failed to resume device %s."), overlay);
 2140 
 2141     return r;
 2142 }
 2143 
 2144 static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
 2145         struct luks2_hdr *hdr,
 2146         const char *overlay,
 2147         const char *hotzone,
 2148         struct volume_key *vks,
 2149         uint64_t device_size,
 2150         uint32_t flags)
 2151 {
 2152     int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
 2153     if (r) {
 2154         log_err(cd, _("Failed to reload device %s."), overlay);
 2155         return REENC_ERR;
 2156     }
 2157 
 2158     r = reenc_refresh_helper_devices(cd, overlay, hotzone);
 2159     if (r) {
 2160         log_err(cd, _("Failed to refresh reencryption devices stack."));
 2161         return REENC_ROLLBACK;
 2162     }
 2163 
 2164     return REENC_OK;
 2165 }
 2166 
 2167 static int reencrypt_move_data(struct crypt_device *cd, int devfd, uint64_t data_shift)
 2168 {
 2169     void *buffer;
 2170     int r;
 2171     ssize_t ret;
 2172     uint64_t buffer_len, offset;
 2173     struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
 2174 
 2175     log_dbg(cd, "Going to move data from head of data device.");
 2176 
 2177     offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
 2178     buffer_len = json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0);
 2179     if (!buffer_len || buffer_len > data_shift)
 2180         return -EINVAL;
 2181 
 2182     if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
 2183         return -ENOMEM;
 2184 
 2185     ret = read_lseek_blockwise(devfd,
 2186             device_block_size(cd, crypt_data_device(cd)),
 2187             device_alignment(crypt_data_device(cd)),
 2188             buffer, buffer_len, 0);
 2189     if (ret < 0 || (uint64_t)ret != buffer_len) {
 2190         r = -EIO;
 2191         goto out;
 2192     }
 2193 
 2194     log_dbg(cd, "Going to write %" PRIu64 " bytes at offset %" PRIu64, buffer_len, offset);
 2195     ret = write_lseek_blockwise(devfd,
 2196             device_block_size(cd, crypt_data_device(cd)),
 2197             device_alignment(crypt_data_device(cd)),
 2198             buffer, buffer_len, offset);
 2199     if (ret < 0 || (uint64_t)ret != buffer_len) {
 2200         r = -EIO;
 2201         goto out;
 2202     }
 2203 
 2204     r = 0;
 2205 out:
 2206     memset(buffer, 0, buffer_len);
 2207     free(buffer);
 2208     return r;
 2209 }
 2210 
 2211 static int reencrypt_make_backup_segments(struct crypt_device *cd,
 2212         struct luks2_hdr *hdr,
 2213         int keyslot_new,
 2214         const char *cipher,
 2215         uint64_t data_offset,
 2216         const struct crypt_params_reencrypt *params)
 2217 {
 2218     int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
 2219     json_object *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
 2220     uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
 2221     uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT;
 2222 
 2223     if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
 2224         digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
 2225         if (digest_new < 0)
 2226             return -EINVAL;
 2227     }
 2228 
 2229     if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
 2230         digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
 2231         if (digest_old < 0)
 2232             return -EINVAL;
 2233     }
 2234 
 2235     segment = LUKS2_segment_first_unused_id(hdr);
 2236     if (segment < 0)
 2237         return -EINVAL;
 2238 
 2239     if (params->mode == CRYPT_REENCRYPT_ENCRYPT &&
 2240         (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)) {
 2241         json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp);
 2242         r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
 2243         if (r)
 2244             goto err;
 2245         moved_segment = segment++;
 2246         json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
 2247     }
 2248 
 2249     /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
 2250     if (digest_old >= 0)
 2251         json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old);
 2252     else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
 2253         r = LUKS2_get_data_size(hdr, &tmp, NULL);
 2254         if (r)
 2255             goto err;
 2256 
 2257         if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)
 2258             jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
 2259         else
 2260             jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0);
 2261     }
 2262 
 2263     if (!jobj_segment_old) {
 2264         r = -EINVAL;
 2265         goto err;
 2266     }
 2267 
 2268     r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
 2269     if (r)
 2270         goto err;
 2271     json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
 2272     jobj_segment_old = NULL;
 2273     if (digest_old >= 0)
 2274         LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
 2275     segment++;
 2276 
 2277     if (digest_new >= 0) {
 2278         segment_offset = data_offset;
 2279         if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
 2280             modify_offset(&segment_offset, data_shift, params->direction)) {
 2281             r = -EINVAL;
 2282             goto err;
 2283         }
 2284         jobj_segment_new = json_segment_create_crypt(segment_offset,
 2285                             crypt_get_iv_offset(cd),
 2286                             NULL, cipher, sector_size, 0);
 2287     } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
 2288         segment_offset = data_offset;
 2289         if (modify_offset(&segment_offset, data_shift, params->direction)) {
 2290             r = -EINVAL;
 2291             goto err;
 2292         }
 2293         jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
 2294     }
 2295 
 2296     if (!jobj_segment_new) {
 2297         r = -EINVAL;
 2298         goto err;
 2299     }
 2300 
 2301     r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
 2302     if (r)
 2303         goto err;
 2304     json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
 2305     jobj_segment_new = NULL;
 2306     if (digest_new >= 0)
 2307         LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
 2308 
 2309     /* FIXME: also check occupied space by keyslot in shrunk area */
 2310     if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
 2311         crypt_metadata_device(cd) == crypt_data_device(cd) &&
 2312         LUKS2_set_keyslots_size(cd, hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
 2313         log_err(cd, _("Failed to set new keyslots area size."));
 2314         r = -EINVAL;
 2315         goto err;
 2316     }
 2317 
 2318     return 0;
 2319 err:
 2320     json_object_put(jobj_segment_new);
 2321     json_object_put(jobj_segment_old);
 2322     return r;
 2323 }
 2324 
 2325 static int reencrypt_verify_and_upload_keys(struct crypt_device *cd, struct luks2_hdr *hdr, int digest_old, int digest_new, struct volume_key *vks)
 2326 {
 2327     int r;
 2328     struct volume_key *vk;
 2329 
 2330     if (digest_new >= 0) {
 2331         vk = crypt_volume_key_by_id(vks, digest_new);
 2332         if (!vk)
 2333             return -ENOENT;
 2334         else {
 2335             if (LUKS2_digest_verify_by_digest(cd, hdr, digest_new, vk) != digest_new)
 2336                 return -EINVAL;
 2337 
 2338             if (crypt_use_keyring_for_vk(cd) && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
 2339                 (r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
 2340                 return r;
 2341         }
 2342     }
 2343 
 2344     if (digest_old >= 0 && digest_old != digest_new) {
 2345         vk = crypt_volume_key_by_id(vks, digest_old);
 2346         if (!vk) {
 2347             r = -ENOENT;
 2348             goto err;
 2349         } else {
 2350             if (LUKS2_digest_verify_by_digest(cd, hdr, digest_old, vk) != digest_old) {
 2351                 r = -EINVAL;
 2352                 goto err;
 2353             }
 2354             if (crypt_use_keyring_for_vk(cd) && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
 2355                 (r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
 2356                 goto err;
 2357         }
 2358     }
 2359 
 2360     return 0;
 2361 err:
 2362     crypt_drop_keyring_key(cd, vks);
 2363     return r;
 2364 }
 2365 
 2366 /* This function must be called with metadata lock held */
 2367 static int reencrypt_init(struct crypt_device *cd,
 2368         const char *name,
 2369         struct luks2_hdr *hdr,
 2370         const char *passphrase,
 2371         size_t passphrase_size,
 2372         int keyslot_old,
 2373         int keyslot_new,
 2374         const char *cipher,
 2375         const char *cipher_mode,
 2376         const struct crypt_params_reencrypt *params,
 2377         struct volume_key **vks)
 2378 {
 2379     bool move_first_segment;
 2380     char _cipher[128];
 2381     uint32_t sector_size;
 2382     int r, reencrypt_keyslot, devfd = -1;
 2383     uint64_t data_offset, dev_size = 0;
 2384     struct crypt_dm_active_device dmd_target, dmd_source = {
 2385         .uuid = crypt_get_uuid(cd),
 2386         .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
 2387     };
 2388 
 2389     if (!params || params->mode > CRYPT_REENCRYPT_DECRYPT)
 2390         return -EINVAL;
 2391 
 2392     if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
 2393         (!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
 2394         return -EINVAL;
 2395 
 2396     log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
 2397             crypt_reencrypt_mode_to_str(params->mode));
 2398 
 2399     move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
 2400 
 2401     /* implicit sector size 512 for decryption */
 2402     sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
 2403     if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
 2404         NOTPOW2(sector_size)) {
 2405         log_err(cd, _("Unsupported encryption sector size."));
 2406         return -EINVAL;
 2407     }
 2408 
 2409     if (!cipher_mode || *cipher_mode == '\0')
 2410         r = snprintf(_cipher, sizeof(_cipher), "%s", cipher);
 2411     else
 2412         r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
 2413     if (r < 0 || (size_t)r >= sizeof(_cipher))
 2414         return -EINVAL;
 2415 
 2416     if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
 2417         log_err(cd, _("Data shift is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
 2418         return -EINVAL;
 2419     }
 2420 
 2421     data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
 2422 
 2423     r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
 2424     if (r)
 2425         return r;
 2426 
 2427     r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
 2428     if (r)
 2429         return r;
 2430 
 2431     r = device_size(crypt_data_device(cd), &dev_size);
 2432     if (r)
 2433         return r;
 2434 
 2435     dev_size -= data_offset;
 2436 
 2437     if (MISALIGNED(dev_size, sector_size)) {
 2438         log_err(cd, _("Data device is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
 2439         return -EINVAL;
 2440     }
 2441 
 2442     reencrypt_keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
 2443     if (reencrypt_keyslot < 0) {
 2444         log_err(cd, _("All key slots full."));
 2445         return -EINVAL;
 2446     }
 2447 
 2448     /*
 2449      * We must perform data move with exclusive open data device
 2450      * to exclude another cryptsetup process to colide with
 2451      * encryption initialization (or mount)
 2452      */
 2453     if (move_first_segment) {
 2454         if (dev_size < (params->data_shift << SECTOR_SHIFT)) {
 2455             log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
 2456             return -EINVAL;
 2457         }
 2458         if (params->data_shift < LUKS2_get_data_offset(hdr)) {
 2459             log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."), params->data_shift, LUKS2_get_data_offset(hdr));
 2460             return -EINVAL;
 2461         }
 2462         devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
 2463         if (devfd < 0) {
 2464             if (devfd == -EBUSY)
 2465                 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
 2466             return -EINVAL;
 2467         }
 2468     }
 2469 
 2470     if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
 2471         /* in-memory only */
 2472         r = reencrypt_set_encrypt_segments(cd, hdr, dev_size, params->data_shift << SECTOR_SHIFT, move_first_segment, params->direction);
 2473         if (r)
 2474             goto out;
 2475     }
 2476 
 2477     r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot,
 2478                        params);
 2479     if (r < 0)
 2480         goto out;
 2481 
 2482     r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
 2483     if (r) {
 2484         log_dbg(cd, "Failed to create reencryption backup device segments.");
 2485         goto out;
 2486     }
 2487 
 2488     r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
 2489     if (r < 0)
 2490         goto out;
 2491 
 2492     r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, *vks);
 2493     if (r < 0)
 2494         goto out;
 2495 
 2496     if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
 2497         r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
 2498         if (r)
 2499             goto out;
 2500 
 2501         r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
 2502                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
 2503                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
 2504         if (r < 0)
 2505             goto out;
 2506 
 2507         r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
 2508         if (!r) {
 2509             r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
 2510             if (r)
 2511                 log_err(cd, _("Mismatching parameters on device %s."), name);
 2512         }
 2513 
 2514         dm_targets_free(cd, &dmd_source);
 2515         dm_targets_free(cd, &dmd_target);
 2516         free(CONST_CAST(void*)dmd_target.uuid);
 2517 
 2518         if (r)
 2519             goto out;
 2520     }
 2521 
 2522     if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT)) {
 2523         r = -EIO;
 2524         goto out;
 2525     }
 2526 
 2527     /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
 2528     r = reencrypt_update_flag(cd, 1, true);
 2529     if (r) {
 2530         log_dbg(cd, "Failed to set online-reencryption requirement.");
 2531         r = -EINVAL;
 2532     } else
 2533         r = reencrypt_keyslot;
 2534 out:
 2535     device_release_excl(cd, crypt_data_device(cd));
 2536     if (r < 0)
 2537         crypt_load(cd, CRYPT_LUKS2, NULL);
 2538 
 2539     return r;
 2540 }
 2541 
 2542 static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
 2543     struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
 2544     const void *buffer, size_t buffer_len)
 2545 {
 2546     const void *pbuffer;
 2547     size_t data_offset, len;
 2548     int r;
 2549 
 2550     if (rh->rp.type == REENC_PROTECTION_NONE)
 2551         return 0;
 2552 
 2553     if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
 2554         log_dbg(cd, "Checksums hotzone resilience.");
 2555 
 2556         for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rh->alignment, len += rh->rp.p.csum.hash_size) {
 2557             if (crypt_hash_write(rh->rp.p.csum.ch, (const char *)buffer + data_offset, rh->alignment)) {
 2558                 log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
 2559                 return -EINVAL;
 2560             }
 2561             if (crypt_hash_final(rh->rp.p.csum.ch, (char *)rh->rp.p.csum.checksums + len, rh->rp.p.csum.hash_size)) {
 2562                 log_dbg(cd, "Failed to finalize hash.");
 2563                 return -EINVAL;
 2564             }
 2565         }
 2566         pbuffer = rh->rp.p.csum.checksums;
 2567     } else if (rh->rp.type == REENC_PROTECTION_JOURNAL) {
 2568         log_dbg(cd, "Journal hotzone resilience.");
 2569         len = buffer_len;
 2570         pbuffer = buffer;
 2571     } else if (rh->rp.type == REENC_PROTECTION_DATASHIFT) {
 2572         log_dbg(cd, "Data shift hotzone resilience.");
 2573         return LUKS2_hdr_write(cd, hdr);
 2574     } else
 2575         return -EINVAL;
 2576 
 2577     log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
 2578 
 2579     r = LUKS2_keyslot_reencrypt_store(cd, hdr, rh->reenc_keyslot, pbuffer, len);
 2580 
 2581     return r > 0 ? 0 : r;
 2582 }
 2583 
 2584 static int reencrypt_context_update(struct crypt_device *cd,
 2585     struct luks2_reencrypt *rh)
 2586 {
 2587     if (rh->read < 0)
 2588         return -EINVAL;
 2589 
 2590     if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
 2591         if (rh->data_shift && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
 2592             if (rh->offset)
 2593                 rh->offset -= rh->data_shift;
 2594             if (rh->offset && (rh->offset < rh->data_shift)) {
 2595                 rh->length = rh->offset;
 2596                 rh->offset = rh->data_shift;
 2597             }
 2598             if (!rh->offset)
 2599                 rh->length = rh->data_shift;
 2600         } else {
 2601             if (rh->offset < rh->length)
 2602                 rh->length = rh->offset;
 2603             rh->offset -= rh->length;
 2604         }
 2605     } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
 2606         rh->offset += (uint64_t)rh->read;
 2607         /* it fails in-case of device_size < rh->offset later */
 2608         if (rh->device_size - rh->offset < rh->length)
 2609             rh->length = rh->device_size - rh->offset;
 2610     } else
 2611         return -EINVAL;
 2612 
 2613     if (rh->device_size < rh->offset) {
 2614         log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
 2615         return -EINVAL;
 2616     }
 2617 
 2618     rh->progress += (uint64_t)rh->read;
 2619 
 2620     return 0;
 2621 }
 2622 
 2623 static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
 2624         uint64_t device_size,
 2625         const struct crypt_params_reencrypt *params,
 2626         struct volume_key *vks,
 2627         struct luks2_reencrypt **rh)
 2628 {
 2629     int r;
 2630     struct luks2_reencrypt *tmp = NULL;
 2631     crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
 2632 
 2633     if (ri == CRYPT_REENCRYPT_NONE) {
 2634         log_err(cd, _("Device not marked for LUKS2 reencryption."));
 2635         return -EINVAL;
 2636     } else if (ri == CRYPT_REENCRYPT_INVALID)
 2637         return -EINVAL;
 2638 
 2639     r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
 2640     if (r < 0)
 2641         return r;
 2642 
 2643     if (ri == CRYPT_REENCRYPT_CLEAN)
 2644         r = reencrypt_load_clean(cd, hdr, device_size, &tmp, params);
 2645     else if (ri == CRYPT_REENCRYPT_CRASH)
 2646         r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
 2647     else
 2648         r = -EINVAL;
 2649 
 2650     if (r < 0 || !tmp) {
 2651         log_err(cd, _("Failed to load LUKS2 reencryption context."));
 2652         return r;
 2653     }
 2654 
 2655     *rh = tmp;
 2656 
 2657     return 0;
 2658 }
 2659 #endif
 2660 static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
 2661 {
 2662     int r;
 2663     char *lock_resource;
 2664 
 2665     if (!crypt_metadata_locking_enabled()) {
 2666         *reencrypt_lock = NULL;
 2667         return 0;
 2668     }
 2669 
 2670     r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
 2671     if (r < 0)
 2672         return -ENOMEM;
 2673     if (r < 20) {
 2674         free(lock_resource);
 2675         return -EINVAL;
 2676     }
 2677 
 2678     r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
 2679 
 2680     free(lock_resource);
 2681 
 2682     return r;
 2683 }
 2684 
 2685 /* internal only */
 2686 int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid,
 2687     struct crypt_lock_handle **reencrypt_lock)
 2688 {
 2689     int r;
 2690     char hdr_uuid[37];
 2691     const char *uuid = crypt_get_uuid(cd);
 2692 
 2693     if (!dm_uuid)
 2694         return -EINVAL;
 2695 
 2696     if (!uuid) {
 2697         r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
 2698              dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
 2699         if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
 2700             return -EINVAL;
 2701     } else if (crypt_uuid_cmp(dm_uuid, uuid))
 2702         return -EINVAL;
 2703 
 2704     return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
 2705 }
 2706 
 2707 /* internal only */
 2708 int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
 2709 {
 2710     if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
 2711         return -EINVAL;
 2712 
 2713     return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
 2714 }
 2715 
 2716 /* internal only */
 2717 void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
 2718 {
 2719     crypt_unlock_internal(cd, reencrypt_lock);
 2720 }
 2721 #if USE_LUKS2_REENCRYPTION
 2722 static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
 2723         struct crypt_lock_handle **reencrypt_lock)
 2724 {
 2725     int r;
 2726     crypt_reencrypt_info ri;
 2727     struct crypt_lock_handle *h;
 2728 
 2729     ri = LUKS2_reencrypt_status(hdr);
 2730     if (ri == CRYPT_REENCRYPT_INVALID) {
 2731         log_err(cd, _("Failed to get reencryption state."));
 2732         return -EINVAL;
 2733     }
 2734     if (ri < CRYPT_REENCRYPT_CLEAN) {
 2735         log_err(cd, _("Device is not in reencryption."));
 2736         return -EINVAL;
 2737     }
 2738 
 2739     r = LUKS2_reencrypt_lock(cd, &h);
 2740     if (r < 0) {
 2741         if (r == -EBUSY)
 2742             log_err(cd, _("Reencryption process is already running."));
 2743         else
 2744             log_err(cd, _("Failed to acquire reencryption lock."));
 2745         return r;
 2746     }
 2747 
 2748     /* With reencryption lock held, reload device context and verify metadata state */
 2749     r = crypt_load(cd, CRYPT_LUKS2, NULL);
 2750     if (r) {
 2751         LUKS2_reencrypt_unlock(cd, h);
 2752         return r;
 2753     }
 2754 
 2755     ri = LUKS2_reencrypt_status(hdr);
 2756     if (ri == CRYPT_REENCRYPT_CLEAN) {
 2757         *reencrypt_lock = h;
 2758         return 0;
 2759     }
 2760 
 2761     LUKS2_reencrypt_unlock(cd, h);
 2762     log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
 2763     return -EINVAL;
 2764 }
 2765 
 2766 static int reencrypt_load_by_passphrase(struct crypt_device *cd,
 2767         const char *name,
 2768         const char *passphrase,
 2769         size_t passphrase_size,
 2770         int keyslot_old,
 2771         int keyslot_new,
 2772         struct volume_key **vks,
 2773         const struct crypt_params_reencrypt *params)
 2774 {
 2775     int r, old_ss, new_ss;
 2776     struct luks2_hdr *hdr;
 2777     struct crypt_lock_handle *reencrypt_lock;
 2778     struct luks2_reencrypt *rh;
 2779     const struct volume_key *vk;
 2780     struct crypt_dm_active_device dmd_target, dmd_source = {
 2781         .uuid = crypt_get_uuid(cd),
 2782         .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
 2783     };
 2784     uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0;
 2785     bool dynamic;
 2786     struct crypt_params_reencrypt rparams = {};
 2787     uint32_t flags = 0;
 2788 
 2789     if (params) {
 2790         rparams = *params;
 2791         required_size = params->device_size;
 2792     }
 2793 
 2794     log_dbg(cd, "Loading LUKS2 reencryption context.");
 2795 
 2796     rh = crypt_get_luks2_reencrypt(cd);
 2797     if (rh) {
 2798         LUKS2_reencrypt_free(cd, rh);
 2799         crypt_set_luks2_reencrypt(cd, NULL);
 2800         rh = NULL;
 2801     }
 2802 
 2803     hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
 2804 
 2805     r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
 2806     if (r)
 2807         return r;
 2808 
 2809     /* From now on we hold reencryption lock */
 2810 
 2811     if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
 2812         return -EINVAL;
 2813 
 2814     /* some configurations provides fixed device size */
 2815     r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
 2816     if (r) {
 2817         r = -EINVAL;
 2818         goto err;
 2819     }
 2820 
 2821     minimal_size >>= SECTOR_SHIFT;
 2822 
 2823     old_ss = reencrypt_get_sector_size_old(hdr);
 2824     new_ss = reencrypt_get_sector_size_new(hdr);
 2825 
 2826     r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
 2827     if (r == -ENOENT) {
 2828         log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
 2829         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
 2830         if (r < 0)
 2831             goto err;
 2832         r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
 2833     }
 2834 
 2835     if (r < 0)
 2836         goto err;
 2837 
 2838     if (name) {
 2839         r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
 2840                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
 2841                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
 2842         if (r < 0)
 2843             goto err;
 2844         flags = dmd_target.flags;
 2845 
 2846         /*
 2847          * By default reencryption code aims to retain flags from existing dm device.
 2848          * The keyring activation flag can not be inherited if original cipher is null.
 2849          *
 2850          * In this case override the flag based on decision made in reencrypt_verify_and_upload_keys
 2851          * above. The code checks if new VK is eligible for keyring.
 2852          */
 2853         vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr));
 2854         if (vk && vk->key_description && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) {
 2855             flags |= CRYPT_ACTIVATE_KEYRING_KEY;
 2856             dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
 2857         }
 2858 
 2859         r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
 2860         if (!r) {
 2861             r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
 2862             if (r)
 2863                 log_err(cd, _("Mismatching parameters on device %s."), name);
 2864         }
 2865 
 2866         dm_targets_free(cd, &dmd_source);
 2867         dm_targets_free(cd, &dmd_target);
 2868         free(CONST_CAST(void*)dmd_target.uuid);
 2869         if (r)
 2870             goto err;
 2871         mapping_size = dmd_target.size;
 2872     }
 2873 
 2874     r = -EINVAL;
 2875     if (required_size && mapping_size && (required_size != mapping_size)) {
 2876         log_err(cd, _("Active device size and requested reencryption size don't match."));
 2877         goto err;
 2878     }
 2879 
 2880     if (mapping_size)
 2881         required_size = mapping_size;
 2882 
 2883     if (required_size) {
 2884         /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
 2885         if ((minimal_size && (required_size < minimal_size)) ||
 2886             (required_size > (device_size >> SECTOR_SHIFT)) ||
 2887             (!dynamic && (required_size != minimal_size)) ||
 2888             (old_ss > 0 && MISALIGNED(required_size, old_ss >> SECTOR_SHIFT)) ||
 2889             (new_ss > 0 && MISALIGNED(required_size, new_ss >> SECTOR_SHIFT))) {
 2890             log_err(cd, _("Illegal device size requested in reencryption parameters."));
 2891             goto err;
 2892         }
 2893         rparams.device_size = required_size;
 2894     }
 2895 
 2896     r = reencrypt_load(cd, hdr, device_size, &rparams, *vks, &rh);
 2897     if (r < 0 || !rh)
 2898         goto err;
 2899 
 2900     if (name && (r = reencrypt_context_set_names(rh, name)))
 2901         goto err;
 2902 
 2903     /* Reassure device is not mounted and there's no dm mapping active */
 2904     if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
 2905         log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
 2906         r = -EBUSY;
 2907         goto err;
 2908     }
 2909     device_release_excl(cd, crypt_data_device(cd));
 2910 
 2911     /* There's a race for dm device activation not managed by cryptsetup.
 2912      *
 2913      * 1) excl close
 2914      * 2) rogue dm device activation
 2915      * 3) one or more dm-crypt based wrapper activation
 2916      * 4) next excl open gets skipped due to 3) device from 2) remains undetected.
 2917      */
 2918     r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
 2919     if (r)
 2920         goto err;
 2921 
 2922     /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
 2923     if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
 2924         crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
 2925         if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
 2926             log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
 2927             r = -EBUSY;
 2928             goto err;
 2929         }
 2930     }
 2931 
 2932     rh->flags = flags;
 2933 
 2934     MOVE_REF(rh->vks, *vks);
 2935     MOVE_REF(rh->reenc_lock, reencrypt_lock);
 2936 
 2937     crypt_set_luks2_reencrypt(cd, rh);
 2938 
 2939     return 0;
 2940 err:
 2941     LUKS2_reencrypt_unlock(cd, reencrypt_lock);
 2942     LUKS2_reencrypt_free(cd, rh);
 2943     return r;
 2944 }
 2945 
 2946 static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
 2947     struct luks2_hdr *hdr,
 2948     int keyslot_old,
 2949     int keyslot_new,
 2950     const char *passphrase,
 2951     size_t passphrase_size)
 2952 {
 2953     int r;
 2954     crypt_reencrypt_info ri;
 2955     struct crypt_lock_handle *reencrypt_lock;
 2956 
 2957     r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
 2958     if (r) {
 2959         if (r == -EBUSY)
 2960             log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
 2961         else
 2962             log_err(cd, _("Failed to get reencryption lock."));
 2963         return r;
 2964     }
 2965 
 2966     if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
 2967         LUKS2_reencrypt_unlock(cd, reencrypt_lock);
 2968         return r;
 2969     }
 2970 
 2971     ri = LUKS2_reencrypt_status(hdr);
 2972     if (ri == CRYPT_REENCRYPT_INVALID) {
 2973         LUKS2_reencrypt_unlock(cd, reencrypt_lock);
 2974         return -EINVAL;
 2975     }
 2976 
 2977     if (ri == CRYPT_REENCRYPT_CRASH) {
 2978         r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
 2979                 passphrase, passphrase_size, 0, NULL);
 2980         if (r < 0)
 2981             log_err(cd, _("LUKS2 reencryption recovery failed."));
 2982     } else {
 2983         log_dbg(cd, "No LUKS2 reencryption recovery needed.");
 2984         r = 0;
 2985     }
 2986 
 2987     LUKS2_reencrypt_unlock(cd, reencrypt_lock);
 2988     return r;
 2989 }
 2990 
 2991 static int reencrypt_repair_by_passphrase(
 2992         struct crypt_device *cd,
 2993         struct luks2_hdr *hdr,
 2994         int keyslot_old,
 2995         int keyslot_new,
 2996         const char *passphrase,
 2997         size_t passphrase_size)
 2998 {
 2999     int r;
 3000     struct crypt_lock_handle *reencrypt_lock;
 3001     struct luks2_reencrypt *rh;
 3002     crypt_reencrypt_info ri;
 3003     struct volume_key *vks = NULL;
 3004 
 3005     log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair.");
 3006 
 3007     rh = crypt_get_luks2_reencrypt(cd);
 3008     if (rh) {
 3009         LUKS2_reencrypt_free(cd, rh);
 3010         crypt_set_luks2_reencrypt(cd, NULL);
 3011         rh = NULL;
 3012     }
 3013 
 3014     ri = LUKS2_reencrypt_status(hdr);
 3015     if (ri == CRYPT_REENCRYPT_INVALID)
 3016         return -EINVAL;
 3017 
 3018     if (ri < CRYPT_REENCRYPT_CLEAN) {
 3019         log_err(cd, _("Device is not in reencryption."));
 3020         return -EINVAL;
 3021     }
 3022 
 3023     r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
 3024     if (r < 0) {
 3025         if (r == -EBUSY)
 3026             log_err(cd, _("Reencryption process is already running."));
 3027         else
 3028             log_err(cd, _("Failed to acquire reencryption lock."));
 3029         return r;
 3030     }
 3031 
 3032     /* With reencryption lock held, reload device context and verify metadata state */
 3033     r = crypt_load(cd, CRYPT_LUKS2, NULL);
 3034     if (r)
 3035         goto out;
 3036 
 3037     ri = LUKS2_reencrypt_status(hdr);
 3038     if (ri == CRYPT_REENCRYPT_INVALID) {
 3039         r = -EINVAL;
 3040         goto out;
 3041     }
 3042     if (ri == CRYPT_REENCRYPT_NONE) {
 3043         r = 0;
 3044         goto out;
 3045     }
 3046 
 3047     r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, &vks);
 3048     if (r < 0)
 3049         goto out;
 3050 
 3051     r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, vks);
 3052     crypt_free_volume_key(vks);
 3053     vks = NULL;
 3054     if (r < 0)
 3055         goto out;
 3056 
 3057     /* removes online-reencrypt flag v1 */
 3058     if ((r = reencrypt_update_flag(cd, 0, false)))
 3059         goto out;
 3060 
 3061     /* adds online-reencrypt flag v2 and commits metadata */
 3062     r = reencrypt_update_flag(cd, 1, true);
 3063 out:
 3064     LUKS2_reencrypt_unlock(cd, reencrypt_lock);
 3065     crypt_free_volume_key(vks);
 3066     return r;
 3067 
 3068 }
 3069 #endif
 3070 static int reencrypt_init_by_passphrase(struct crypt_device *cd,
 3071     const char *name,
 3072     const char *passphrase,
 3073     size_t passphrase_size,
 3074     int keyslot_old,
 3075     int keyslot_new,
 3076     const char *cipher,
 3077     const char *cipher_mode,
 3078     const struct crypt_params_reencrypt *params)
 3079 {
 3080 #if USE_LUKS2_REENCRYPTION
 3081     int r;
 3082     crypt_reencrypt_info ri;
 3083     struct volume_key *vks = NULL;
 3084     uint32_t flags = params ? params->flags : 0;
 3085     struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
 3086 
 3087     /* short-circuit in reencryption metadata update and finish immediately. */
 3088     if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED)
 3089         return reencrypt_repair_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
 3090 
 3091     /* short-circuit in recovery and finish immediately. */
 3092     if (flags & CRYPT_REENCRYPT_RECOVERY)
 3093         return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
 3094 
 3095     if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) {
 3096         r = crypt_keyslot_get_key_size(cd, keyslot_new);
 3097         if (r < 0)
 3098             return r;
 3099         r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
 3100         if (r < 0)
 3101             return r;
 3102     }
 3103 
 3104     r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
 3105     if (r)
 3106         return r;
 3107 
 3108     ri = LUKS2_reencrypt_status(hdr);
 3109     if (ri == CRYPT_REENCRYPT_INVALID) {
 3110         device_write_unlock(cd, crypt_metadata_device(cd));
 3111         return -EINVAL;
 3112     }
 3113 
 3114     if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
 3115         device_write_unlock(cd, crypt_metadata_device(cd));
 3116         log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
 3117         return -EBUSY;
 3118     }
 3119 
 3120     if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
 3121         r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
 3122         if (r < 0)
 3123             log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
 3124     } else if (ri > CRYPT_REENCRYPT_NONE) {
 3125         log_dbg(cd, "LUKS2 reencryption already initialized.");
 3126         r = 0;
 3127     }
 3128 
 3129     device_write_unlock(cd, crypt_metadata_device(cd));
 3130 
 3131     if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
 3132         goto out;
 3133 
 3134     r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
 3135 out:
 3136     if (r < 0)
 3137         crypt_drop_keyring_key(cd, vks);
 3138     crypt_free_volume_key(vks);
 3139     return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
 3140 #else
 3141     log_err(cd, _("This operation is not supported for this device type."));
 3142     return -ENOTSUP;
 3143 #endif
 3144 }
 3145 
 3146 int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
 3147     const char *name,
 3148     const char *passphrase_description,
 3149     int keyslot_old,
 3150     int keyslot_new,
 3151     const char *cipher,
 3152     const char *cipher_mode,
 3153     const struct crypt_params_reencrypt *params)
 3154 {
 3155     int r;
 3156     char *passphrase;
 3157     size_t passphrase_size;
 3158 
 3159     if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
 3160         return -EINVAL;
 3161     if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
 3162         return -EINVAL;
 3163 
 3164     r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
 3165     if (r < 0) {
 3166         log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
 3167         return -EINVAL;
 3168     }
 3169 
 3170     r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
 3171 
 3172     crypt_safe_memzero(passphrase, passphrase_size);
 3173     free(passphrase);
 3174 
 3175     return r;
 3176 }
 3177 
 3178 int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
 3179     const char *name,
 3180     const char *passphrase,
 3181     size_t passphrase_size,
 3182     int keyslot_old,
 3183     int keyslot_new,
 3184     const char *cipher,
 3185     const char *cipher_mode,
 3186     const struct crypt_params_reencrypt *params)
 3187 {
 3188     if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
 3189         return -EINVAL;
 3190     if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
 3191         return -EINVAL;
 3192 
 3193     return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
 3194 }
 3195 
 3196 #if USE_LUKS2_REENCRYPTION
 3197 static reenc_status_t reencrypt_step(struct crypt_device *cd,
 3198         struct luks2_hdr *hdr,
 3199         struct luks2_reencrypt *rh,
 3200         uint64_t device_size,
 3201         bool online)
 3202 {
 3203     int r;
 3204 
 3205     /* in memory only */
 3206     r = reencrypt_make_segments(cd, hdr, rh, device_size);
 3207     if (r)
 3208         return REENC_ERR;
 3209 
 3210     r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
 3211     if (r) {
 3212         log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
 3213         return REENC_ERR;
 3214     }
 3215 
 3216     if (online) {
 3217         r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
 3218         /* Teardown overlay devices with dm-error. None bio shall pass! */
 3219         if (r != REENC_OK)
 3220             return r;
 3221     }
 3222 
 3223     log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
 3224     log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
 3225 
 3226     if (!rh->offset && rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->data_shift &&
 3227         rh->jobj_segment_moved) {
 3228         crypt_storage_wrapper_destroy(rh->cw1);
 3229         log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
 3230         r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
 3231                 LUKS2_reencrypt_get_data_offset_moved(hdr),
 3232                 crypt_get_iv_offset(cd),
 3233                 reencrypt_get_sector_size_old(hdr),
 3234                 reencrypt_segment_cipher_old(hdr),
 3235                 crypt_volume_key_by_id(rh->vks, rh->digest_old),
 3236                 rh->wflags1);
 3237         if (r) {
 3238             log_err(cd, _("Failed to initialize old segment storage wrapper."));
 3239             return REENC_ROLLBACK;
 3240         }
 3241     }
 3242 
 3243     rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
 3244     if (rh->read < 0) {
 3245         /* severity normal */
 3246         log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
 3247         return REENC_ROLLBACK;
 3248     }
 3249 
 3250     /* metadata commit point */
 3251     r = reencrypt_hotzone_protect_final(cd, hdr, rh, rh->reenc_buffer, rh->read);
 3252     if (r < 0) {
 3253         /* severity normal */
 3254         log_err(cd, _("Failed to write reencryption resilience metadata."));
 3255         return REENC_ROLLBACK;
 3256     }
 3257 
 3258     r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
 3259     if (r) {
 3260         /* severity normal */
 3261         log_err(cd, _("Decryption failed."));
 3262         return REENC_ROLLBACK;
 3263     }
 3264     if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
 3265         /* severity fatal */
 3266         log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
 3267         return REENC_FATAL;
 3268     }
 3269 
 3270     if (rh->rp.type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
 3271         log_err(cd, _("Failed to sync data."));
 3272         return REENC_FATAL;
 3273     }
 3274 
 3275     /* metadata commit safe point */
 3276     r = reencrypt_assign_segments(cd, hdr, rh, 0, rh->rp.type != REENC_PROTECTION_NONE);
 3277     if (r) {
 3278         /* severity fatal */
 3279         log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
 3280         return REENC_FATAL;
 3281     }
 3282 
 3283     if (online) {
 3284         /* severity normal */
 3285         log_dbg(cd, "Resuming device %s", rh->hotzone_name);
 3286         r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
 3287         if (r) {
 3288             log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
 3289             return REENC_ERR;
 3290         }
 3291     }
 3292 
 3293     return REENC_OK;
 3294 }
 3295 
 3296 static int reencrypt_erase_backup_segments(struct crypt_device *cd,
 3297         struct luks2_hdr *hdr)
 3298 {
 3299     int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
 3300     if (segment >= 0) {
 3301         if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
 3302             return -EINVAL;
 3303         json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
 3304     }
 3305     segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
 3306     if (segment >= 0) {
 3307         if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
 3308             return -EINVAL;
 3309         json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
 3310     }
 3311     segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
 3312     if (segment >= 0) {
 3313         if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
 3314             return -EINVAL;
 3315         json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
 3316     }
 3317 
 3318     return 0;
 3319 }
 3320 
 3321 static int reencrypt_wipe_moved_segment(struct crypt_device *cd, struct luks2_reencrypt *rh)
 3322 {
 3323     int r = 0;
 3324     uint64_t offset, length;
 3325 
 3326     if (rh->jobj_segment_moved) {
 3327         offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
 3328         length = json_segment_get_size(rh->jobj_segment_moved, 0);
 3329         log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
 3330             length, offset);
 3331         r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
 3332                 offset, length, 1024 * 1024, NULL, NULL);
 3333     }
 3334 
 3335     return r;
 3336 }
 3337 
 3338 static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
 3339 {
 3340     int i, r;
 3341     uint32_t dmt_flags;
 3342     bool finished = !(rh->device_size > rh->progress);
 3343 
 3344     if (rh->rp.type == REENC_PROTECTION_NONE &&
 3345         LUKS2_hdr_write(cd, hdr)) {
 3346         log_err(cd, _("Failed to write LUKS2 metadata."));
 3347         return -EINVAL;
 3348     }
 3349 
 3350     if (rh->online) {
 3351         r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
 3352         if (r)
 3353             log_err(cd, _("Failed to reload device %s."), rh->device_name);
 3354         if (!r) {
 3355             r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
 3356             if (r)
 3357                 log_err(cd, _("Failed to resume device %s."), rh->device_name);
 3358         }
 3359         dm_remove_device(cd, rh->overlay_name, 0);
 3360         dm_remove_device(cd, rh->hotzone_name, 0);
 3361 
 3362         if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
 3363             !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
 3364             dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
 3365     }
 3366 
 3367     if (finished) {
 3368         if (reencrypt_wipe_moved_segment(cd, rh))
 3369             log_err(cd, _("Failed to wipe backup segment data."));
 3370         if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(cd, hdr, reencrypt_get_data_offset_new(hdr)))
 3371             log_dbg(cd, "Failed to set new keyslots area size.");
 3372         if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
 3373             for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
 3374                 if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i))
 3375                     log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i);
 3376 
 3377         if (reencrypt_erase_backup_segments(cd, hdr))
 3378             log_dbg(cd, "Failed to erase backup segments");
 3379 
 3380         if (reencrypt_update_flag(cd, 0, false))
 3381             log_dbg(cd, "Failed to disable reencryption requirement flag.");
 3382 
 3383         /* metadata commit point also removing reencryption flag on-disk */
 3384         if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) {
 3385             log_err(cd, _("Failed to remove reencryption keyslot."));
 3386             return -EINVAL;
 3387         }
 3388     }
 3389 
 3390     return 0;
 3391 }
 3392 
 3393 static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_reencrypt *rh)
 3394 {
 3395     log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
 3396         (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
 3397 
 3398     if (rh->online) {
 3399         log_err(cd, _("Online reencryption failed."));
 3400         if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
 3401             log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
 3402             if (dm_error_device(cd, rh->hotzone_name)) {
 3403                 log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
 3404                 log_err(cd, _("Do not resume the device unless replaced with error target manually."));
 3405             }
 3406         }
 3407     }
 3408 }
 3409 
 3410 static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
 3411         struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted,
 3412         int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
 3413         void *usrptr)
 3414 {
 3415     int r;
 3416 
 3417     switch (rs) {
 3418     case REENC_OK:
 3419         if (progress && !interrupted)
 3420             progress(rh->device_size, rh->progress, usrptr);
 3421         r = reencrypt_teardown_ok(cd, hdr, rh);
 3422         break;
 3423     case REENC_FATAL:
 3424         reencrypt_teardown_fatal(cd, rh);
 3425         /* fall-through */
 3426     default:
 3427         r = -EIO;
 3428     }
 3429 
 3430     /* this frees reencryption lock */
 3431     LUKS2_reencrypt_free(cd, rh);
 3432     crypt_set_luks2_reencrypt(cd, NULL);
 3433 
 3434     return r;
 3435 }
 3436 #endif
 3437 
 3438 int crypt_reencrypt_run(
 3439     struct crypt_device *cd,
 3440     int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
 3441     void *usrptr)
 3442 {
 3443 #if USE_LUKS2_REENCRYPTION
 3444     int r;
 3445     crypt_reencrypt_info ri;
 3446     struct luks2_hdr *hdr;
 3447     struct luks2_reencrypt *rh;
 3448     reenc_status_t rs;
 3449     bool quit = false;
 3450 
 3451     if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
 3452         return -EINVAL;
 3453 
 3454     hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
 3455 
 3456     ri = LUKS2_reencrypt_status(hdr);
 3457     if (ri > CRYPT_REENCRYPT_CLEAN) {
 3458         log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
 3459         return -EINVAL;
 3460     }
 3461 
 3462     rh = crypt_get_luks2_reencrypt(cd);
 3463     if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
 3464         log_err(cd, _("Missing or invalid reencrypt context."));
 3465         return -EINVAL;
 3466     }
 3467 
 3468     log_dbg(cd, "Resuming LUKS2 reencryption.");
 3469 
 3470     if (rh->online && reencrypt_init_device_stack(cd, rh)) {
 3471         log_err(cd, _("Failed to initialize reencryption device stack."));
 3472         return -EINVAL;
 3473     }
 3474 
 3475     log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
 3476 
 3477     rs = REENC_OK;
 3478 
 3479     /* update reencrypt keyslot protection parameters in memory only */
 3480     if (!quit && (rh->device_size > rh->progress)) {
 3481         r = reencrypt_keyslot_update(cd, rh);
 3482         if (r < 0) {
 3483             log_dbg(cd, "Keyslot update failed.");
 3484             return reencrypt_teardown(cd, hdr, rh, REENC_ERR, quit, progress, usrptr);
 3485         }
 3486     }
 3487 
 3488     while (!quit && (rh->device_size > rh->progress)) {
 3489         rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
 3490         if (rs != REENC_OK)
 3491             break;
 3492 
 3493         log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
 3494         if (progress && progress(rh->device_size, rh->progress, usrptr))
 3495             quit = true;
 3496 
 3497         r = reencrypt_context_update(cd, rh);
 3498         if (r) {
 3499             log_err(cd, _("Failed to update reencryption context."));
 3500             rs = REENC_ERR;
 3501             break;
 3502         }
 3503 
 3504         log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
 3505         log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
 3506     }
 3507 
 3508     r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr);
 3509     return r;
 3510 #else
 3511     log_err(cd, _("This operation is not supported for this device type."));
 3512     return -ENOTSUP;
 3513 #endif
 3514 }
 3515 
 3516 int crypt_reencrypt(
 3517     struct crypt_device *cd,
 3518     int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
 3519 {
 3520     return crypt_reencrypt_run(cd, progress, NULL);
 3521 }
 3522 #if USE_LUKS2_REENCRYPTION
 3523 static int reencrypt_recovery(struct crypt_device *cd,
 3524         struct luks2_hdr *hdr,
 3525         uint64_t device_size,
 3526         struct volume_key *vks)
 3527 {
 3528     int r;
 3529     struct luks2_reencrypt *rh = NULL;
 3530 
 3531     r = reencrypt_load(cd, hdr, device_size, NULL, vks, &rh);
 3532     if (r < 0) {
 3533         log_err(cd, _("Failed to load LUKS2 reencryption context."));
 3534         return r;
 3535     }
 3536 
 3537     r = reencrypt_recover_segment(cd, hdr, rh, vks);
 3538     if (r < 0)
 3539         goto out;
 3540 
 3541     if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
 3542         goto out;
 3543 
 3544     r = reencrypt_context_update(cd, rh);
 3545     if (r) {
 3546         log_err(cd, _("Failed to update reencryption context."));
 3547         goto out;
 3548     }
 3549 
 3550     r = reencrypt_teardown_ok(cd, hdr, rh);
 3551     if (!r)
 3552         r = LUKS2_hdr_write(cd, hdr);
 3553 out:
 3554     LUKS2_reencrypt_free(cd, rh);
 3555 
 3556     return r;
 3557 }
 3558 #endif
 3559 /*
 3560  * use only for calculation of minimal data device size.
 3561  * The real data offset is taken directly from segments!
 3562  */
 3563 int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
 3564 {
 3565     crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
 3566     uint64_t data_offset = LUKS2_get_data_offset(hdr);
 3567 
 3568     if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
 3569         data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
 3570 
 3571     return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
 3572 }
 3573 
 3574 /* internal only */
 3575 int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
 3576     uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
 3577 {
 3578     int r;
 3579     uint64_t data_offset, real_size = 0;
 3580 
 3581     if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
 3582         (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
 3583         check_size += reencrypt_data_shift(hdr);
 3584 
 3585     r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
 3586     if (r)
 3587         return r;
 3588 
 3589     data_offset = LUKS2_reencrypt_data_offset(hdr, false);
 3590 
 3591     r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
 3592     if (r)
 3593         return r;
 3594 
 3595     r = device_size(crypt_data_device(cd), &real_size);
 3596     if (r)
 3597         return r;
 3598 
 3599     log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
 3600             ", real device size: %" PRIu64 " (%" PRIu64 " sectors) "
 3601             "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
 3602             check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
 3603             real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
 3604 
 3605     if (real_size < data_offset || (check_size && real_size < check_size)) {
 3606         log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
 3607         return -EINVAL;
 3608     }
 3609 
 3610     *dev_size = real_size - data_offset;
 3611 
 3612     return 0;
 3613 }
 3614 #if USE_LUKS2_REENCRYPTION
 3615 /* returns keyslot number on success (>= 0) or negative errnor otherwise */
 3616 int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
 3617     int keyslot_old,
 3618     int keyslot_new,
 3619     const char *passphrase,
 3620     size_t passphrase_size,
 3621     uint32_t flags __attribute__((unused)),
 3622     struct volume_key **vks)
 3623 {
 3624     uint64_t minimal_size, device_size;
 3625     int keyslot, r = -EINVAL;
 3626     struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
 3627     struct volume_key *vk = NULL, *_vks = NULL;
 3628 
 3629     log_dbg(cd, "Entering reencryption crash recovery.");
 3630 
 3631     if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
 3632         return r;
 3633 
 3634     r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
 3635             passphrase, passphrase_size, &_vks);
 3636     if (r < 0)
 3637         goto out;
 3638     keyslot = r;
 3639 
 3640     if (crypt_use_keyring_for_vk(cd))
 3641         vk = _vks;
 3642 
 3643     while (vk) {
 3644         r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk));
 3645         if (r < 0)
 3646             goto out;
 3647         vk = crypt_volume_key_next(vk);
 3648     }
 3649 
 3650     if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
 3651         goto out;
 3652 
 3653     r = reencrypt_recovery(cd, hdr, device_size, _vks);
 3654 
 3655     if (!r && vks)
 3656         MOVE_REF(*vks, _vks);
 3657 out:
 3658     if (r < 0)
 3659         crypt_drop_keyring_key(cd, _vks);
 3660     crypt_free_volume_key(_vks);
 3661 
 3662     return r < 0 ? r : keyslot;
 3663 }
 3664 #endif
 3665 crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
 3666     struct crypt_params_reencrypt *params)
 3667 {
 3668     crypt_reencrypt_info ri;
 3669     int digest;
 3670     uint32_t version;
 3671 
 3672     ri = LUKS2_reencrypt_status(hdr);
 3673     if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
 3674         return ri;
 3675 
 3676     digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
 3677     if (digest < 0 && digest != -ENOENT)
 3678         return CRYPT_REENCRYPT_INVALID;
 3679 
 3680     /*
 3681      * In case there's an old "online-reencrypt" requirement or reencryption
 3682      * keyslot digest is missing inform caller reencryption metadata requires repair.
 3683      */
 3684     if (!LUKS2_config_get_reencrypt_version(hdr, &version) &&
 3685         (version < 2 || digest == -ENOENT)) {
 3686         params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED;
 3687         return ri;
 3688     }
 3689 
 3690     params->mode = reencrypt_mode(hdr);
 3691     params->direction = reencrypt_direction(hdr);
 3692     params->resilience = reencrypt_resilience_type(hdr);
 3693     params->hash = reencrypt_resilience_hash(hdr);
 3694     params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
 3695     params->max_hotzone_size = 0;
 3696     if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
 3697         params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
 3698 
 3699     return ri;
 3700 }