Add support for reencryption of initial device part.

It's useful to reencrypt only initial device part only.
For example with golden image reencryption it may be useful
to reencrypt only first X bytes of device because we know
the rest of device is empty.
This commit is contained in:
Ondrej Kozina
2019-05-16 14:50:00 +02:00
parent af6c321395
commit 31cd41bfe4
6 changed files with 170 additions and 85 deletions

View File

@@ -175,6 +175,7 @@ char *crypt_get_base_device(const char *dev_path);
uint64_t crypt_dev_partition_offset(const char *dev_path); uint64_t crypt_dev_partition_offset(const char *dev_path);
int lookup_by_disk_id(const char *dm_uuid); int lookup_by_disk_id(const char *dm_uuid);
int lookup_by_sysfs_uuid_field(const char *dm_uuid, size_t max_len); int lookup_by_sysfs_uuid_field(const char *dm_uuid, size_t max_len);
int crypt_uuid_cmp(const char *dm_uuid, const char *hdr_uuid);
size_t crypt_getpagesize(void); size_t crypt_getpagesize(void);
unsigned crypt_cpusonline(void); unsigned crypt_cpusonline(void);

View File

@@ -2145,6 +2145,7 @@ struct crypt_params_reencrypt {
const char *hash; /**< Used hash for "checksum" resilience type, ignored otherwise. */ const char *hash; /**< Used hash for "checksum" resilience type, ignored otherwise. */
uint64_t data_shift; /**< Used in "shift" mode, must be non-zero, immutable after first init. */ uint64_t data_shift; /**< Used in "shift" mode, must be non-zero, immutable after first init. */
uint64_t max_hotzone_size; /**< Hotzone size for "none" mode; maximum hotzone size for "checksum" mode. */ uint64_t max_hotzone_size; /**< Hotzone size for "none" mode; maximum hotzone size for "checksum" mode. */
uint64_t device_size; /**< Reencrypt only initial part of the data device. */
const struct crypt_params_luks2 *luks2; /**< LUKS2 parameters for the final reencryption volume.*/ const struct crypt_params_luks2 *luks2; /**< LUKS2 parameters for the final reencryption volume.*/
uint32_t flags; /**< Reencryption flags. */ uint32_t flags; /**< Reencryption flags. */
}; };

View File

@@ -153,6 +153,7 @@ struct luks2_reenc_context {
size_t alignment; size_t alignment;
uint64_t device_size; uint64_t device_size;
bool online; bool online;
bool fixed_length;
crypt_reencrypt_direction_info direction; crypt_reencrypt_direction_info direction;
enum { REENCRYPT = 0, ENCRYPT, DECRYPT } type; enum { REENCRYPT = 0, ENCRYPT, DECRYPT } type;
@@ -427,6 +428,7 @@ const char *LUKS2_reencrypt_protection_type(struct luks2_hdr *hdr);
const char *LUKS2_reencrypt_protection_hash(struct luks2_hdr *hdr); const char *LUKS2_reencrypt_protection_hash(struct luks2_hdr *hdr);
uint64_t LUKS2_reencrypt_data_shift(struct luks2_hdr *hdr); uint64_t LUKS2_reencrypt_data_shift(struct luks2_hdr *hdr);
const char *LUKS2_reencrypt_mode(struct luks2_hdr *hdr); const char *LUKS2_reencrypt_mode(struct luks2_hdr *hdr);
int LUKS2_reencrypt_direction(struct luks2_hdr *hdr, crypt_reencrypt_direction_info *di);
/* /*
* Generic LUKS2 digest * Generic LUKS2 digest
@@ -503,6 +505,7 @@ int LUKS2_deactivate(struct crypt_device *cd,
int LUKS2_reload(struct crypt_device *cd, int LUKS2_reload(struct crypt_device *cd,
const char *name, const char *name,
struct volume_key *vks, struct volume_key *vks,
uint64_t device_size,
uint32_t flags); uint32_t flags);
int LUKS2_keyslot_luks2_format(struct crypt_device *cd, int LUKS2_keyslot_luks2_format(struct crypt_device *cd,
@@ -533,7 +536,7 @@ int LUKS2_wipe_header_areas(struct crypt_device *cd,
struct luks2_hdr *hdr); struct luks2_hdr *hdr);
uint64_t LUKS2_get_data_offset(struct luks2_hdr *hdr); uint64_t LUKS2_get_data_offset(struct luks2_hdr *hdr);
int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size); int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size, bool *dynamic);
int LUKS2_get_sector_size(struct luks2_hdr *hdr); int LUKS2_get_sector_size(struct luks2_hdr *hdr);
const char *LUKS2_get_cipher(struct luks2_hdr *hdr, int segment); const char *LUKS2_get_cipher(struct luks2_hdr *hdr, int segment);
const char *LUKS2_get_integrity(struct luks2_hdr *hdr, int segment); const char *LUKS2_get_integrity(struct luks2_hdr *hdr, int segment);
@@ -618,6 +621,6 @@ int reenc_erase_backup_segments(struct crypt_device *cd, struct luks2_hdr *hdr);
int crypt_reencrypt_lock(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock); int crypt_reencrypt_lock(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock);
void crypt_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock); void crypt_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock);
int luks2_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t *device_size, bool activation); int luks2_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t check_size, uint64_t *device_size, bool activation);
#endif #endif

View File

@@ -1702,11 +1702,17 @@ int LUKS2_hdr_dump(struct crypt_device *cd, struct luks2_hdr *hdr)
return 0; return 0;
} }
int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size) int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size, bool *dynamic)
{ {
crypt_reencrypt_direction_info di;
int sector_size;
json_object *jobj_segments, *jobj_size; json_object *jobj_segments, *jobj_size;
uint64_t tmp = 0; uint64_t tmp = 0;
int sector_size;
/* for reencryption with data shift and moved segment we have to add datashift to minimal required size */
if (!LUKS2_reencrypt_direction(hdr, &di) && (di == CRYPT_REENCRYPT_BACKWARD) &&
LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"))
tmp += LUKS2_reencrypt_data_shift(hdr);
if (!size || !json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments)) if (!size || !json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments))
return -EINVAL; return -EINVAL;
@@ -1724,17 +1730,21 @@ int LUKS2_get_data_size(struct luks2_hdr *hdr, uint64_t *size)
*size = tmp + (sector_size > 0 ? sector_size : SECTOR_SIZE); *size = tmp + (sector_size > 0 ? sector_size : SECTOR_SIZE);
else else
*size = 0; *size = 0;
if (dynamic)
*dynamic = true;
return 0; return 0;
} }
tmp += json_object_get_uint64(jobj_size); tmp += json_object_get_uint64(jobj_size);
} }
/* impossible, segments with size set to 0 are illegal */ /* impossible, real device size must not be zero */
if (!tmp) if (!tmp)
return -EINVAL; return -EINVAL;
*size = tmp; *size = tmp;
if (dynamic)
*dynamic = false;
return 0; return 0;
} }
@@ -2024,12 +2034,14 @@ static int _reload_custom_multi(struct crypt_device *cd,
const char *name, const char *name,
struct volume_key *vks, struct volume_key *vks,
json_object *jobj_segments, json_object *jobj_segments,
uint64_t device_size,
uint32_t flags) uint32_t flags)
{ {
int r, count = json_segments_count(jobj_segments); int r, count = json_segments_count(jobj_segments);
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2); struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
struct crypt_dm_active_device dmd = { struct crypt_dm_active_device dmd = {
.uuid = crypt_get_uuid(cd), .uuid = crypt_get_uuid(cd),
.size = device_size >> SECTOR_SHIFT
}; };
if (count < 0) if (count < 0)
@@ -2062,13 +2074,14 @@ static int _reload_custom_multi(struct crypt_device *cd,
int LUKS2_reload(struct crypt_device *cd, int LUKS2_reload(struct crypt_device *cd,
const char *name, const char *name,
struct volume_key *vks, struct volume_key *vks,
uint64_t device_size,
uint32_t flags) uint32_t flags)
{ {
if (crypt_get_integrity_tag_size(cd)) if (crypt_get_integrity_tag_size(cd))
return -ENOTSUP; return -ENOTSUP;
return _reload_custom_multi(cd, name, vks, return _reload_custom_multi(cd, name, vks,
LUKS2_get_segments_jobj(crypt_get_hdr(cd, CRYPT_LUKS2)), flags); LUKS2_get_segments_jobj(crypt_get_hdr(cd, CRYPT_LUKS2)), device_size, flags);
} }
int LUKS2_activate_multi(struct crypt_device *cd, int LUKS2_activate_multi(struct crypt_device *cd,

View File

@@ -312,9 +312,9 @@ static json_object *_reenc_segments_forward_after(struct crypt_device *cd,
uint64_t data_offset) uint64_t data_offset)
{ {
int reenc_seg; int reenc_seg;
json_object *jobj_new_seg_after, *jobj_old_seg, json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
*jobj_segs_after = json_object_new_object(); *jobj_segs_after = json_object_new_object();
uint64_t tmp = rh->offset + rh->length; uint64_t fixed_length = rh->offset + rh->length;
if (!rh->jobj_segs_pre || !jobj_segs_after) if (!rh->jobj_segs_pre || !jobj_segs_after)
goto err; goto err;
@@ -329,14 +329,24 @@ static json_object *_reenc_segments_forward_after(struct crypt_device *cd,
* if there's no old segment after reencryption, we're done. * if there's no old segment after reencryption, we're done.
* Set size to 'dynamic' again. * Set size to 'dynamic' again.
*/ */
jobj_new_seg_after = LUKS2_create_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &tmp : NULL); jobj_new_seg_after = LUKS2_create_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
if (!jobj_new_seg_after) if (!jobj_new_seg_after)
goto err; goto err;
JSON_DBG(cd, jobj_new_seg_after, "jobj_new_seg_after:"); JSON_DBG(cd, jobj_new_seg_after, "jobj_new_seg_after:");
json_object_object_add_by_uint(jobj_segs_after, 0, jobj_new_seg_after); json_object_object_add_by_uint(jobj_segs_after, 0, jobj_new_seg_after);
if (jobj_old_seg) if (jobj_old_seg) {
json_object_object_add_by_uint(jobj_segs_after, 1, json_object_get(jobj_old_seg)); if (rh->fixed_length) {
if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
goto err;
jobj_old_seg = jobj_old_seg_copy;
fixed_length = rh->device_size - fixed_length;
json_object_object_add(jobj_old_seg, "size", json_object_new_uint64(fixed_length));
JSON_DBG(cd, jobj_old_seg, "fixed size jobj_old_seg:");
} else
json_object_get(jobj_old_seg);
json_object_object_add_by_uint(jobj_segs_after, 1, jobj_old_seg);
}
return jobj_segs_after; return jobj_segs_after;
err: err:
@@ -350,6 +360,8 @@ static json_object *_reenc_segments_backward_after(struct crypt_device *cd,
uint64_t data_offset) uint64_t data_offset)
{ {
int reenc_seg; int reenc_seg;
uint64_t fixed_length;
json_object *jobj_new_seg_after, *jobj_old_seg, json_object *jobj_new_seg_after, *jobj_old_seg,
*jobj_segs_after = json_object_new_object(); *jobj_segs_after = json_object_new_object();
@@ -363,7 +375,11 @@ static json_object *_reenc_segments_backward_after(struct crypt_device *cd,
jobj_old_seg = json_segments_get_segment(rh->jobj_segs_pre, reenc_seg - 1); jobj_old_seg = json_segments_get_segment(rh->jobj_segs_pre, reenc_seg - 1);
if (jobj_old_seg) if (jobj_old_seg)
json_object_object_add_by_uint(jobj_segs_after, reenc_seg - 1, json_object_get(jobj_old_seg)); json_object_object_add_by_uint(jobj_segs_after, reenc_seg - 1, json_object_get(jobj_old_seg));
jobj_new_seg_after = LUKS2_create_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL); if (rh->fixed_length) {
fixed_length = rh->device_size - data_offset - rh->offset + rh->data_shift;
jobj_new_seg_after = LUKS2_create_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
} else
jobj_new_seg_after = LUKS2_create_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
if (!jobj_new_seg_after) if (!jobj_new_seg_after)
goto err; goto err;
JSON_DBG(cd, jobj_new_seg_after, "jobj_new_seg_after:"); JSON_DBG(cd, jobj_new_seg_after, "jobj_new_seg_after:");
@@ -431,8 +447,8 @@ static json_object *_reenc_segments_forward_pre(struct crypt_device *cd,
uint64_t data_offset) uint64_t data_offset)
{ {
json_object *jobj_segs_pre, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg; json_object *jobj_segs_pre, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
uint64_t fixed_length, tmp = rh->offset + rh->length;
unsigned int sg = 0; unsigned int sg = 0;
uint64_t tmp = rh->offset + rh->length;
jobj_segs_pre = json_object_new_object(); jobj_segs_pre = json_object_new_object();
if (!jobj_segs_pre) if (!jobj_segs_pre)
@@ -455,7 +471,8 @@ static json_object *_reenc_segments_forward_pre(struct crypt_device *cd,
json_object_object_add_by_uint(jobj_segs_pre, sg++, jobj_reenc_seg); json_object_object_add_by_uint(jobj_segs_pre, sg++, jobj_reenc_seg);
if (tmp < device_size) { if (tmp < device_size) {
jobj_old_seg = LUKS2_create_segment_old(cd, hdr, rh, data_offset + rh->data_shift, rh->offset + rh->length, NULL); fixed_length = device_size - tmp;
jobj_old_seg = LUKS2_create_segment_old(cd, hdr, rh, data_offset + rh->data_shift, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
if (!jobj_old_seg) if (!jobj_old_seg)
goto err; goto err;
JSON_DBG(cd, jobj_old_seg, "jobj_old_seg:"); JSON_DBG(cd, jobj_old_seg, "jobj_old_seg:");
@@ -477,7 +494,7 @@ static json_object *_reenc_segments_backward_pre(struct crypt_device *cd,
json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL, json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
*jobj_segs_pre = json_object_new_object(); *jobj_segs_pre = json_object_new_object();
int sg = 0; int sg = 0;
uint64_t tmp = rh->offset + rh->length; uint64_t fixed_length, tmp = rh->offset + rh->length;
if (!jobj_segs_pre) if (!jobj_segs_pre)
return NULL; return NULL;
@@ -500,7 +517,8 @@ static json_object *_reenc_segments_backward_pre(struct crypt_device *cd,
json_object_object_add_by_uint(jobj_segs_pre, sg++, jobj_reenc_seg); json_object_object_add_by_uint(jobj_segs_pre, sg++, jobj_reenc_seg);
if (tmp < device_size) { if (tmp < device_size) {
jobj_new_seg = LUKS2_create_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length, rh->offset + rh->length, NULL); fixed_length = device_size - tmp;
jobj_new_seg = LUKS2_create_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
if (!jobj_new_seg) if (!jobj_new_seg)
goto err; goto err;
JSON_DBG(cd, jobj_new_seg, "jobj_new_seg:"); JSON_DBG(cd, jobj_new_seg, "jobj_new_seg:");
@@ -605,7 +623,7 @@ const char *LUKS2_reencrypt_mode(struct luks2_hdr *hdr)
return json_object_get_string(jobj_mode); return json_object_get_string(jobj_mode);
} }
static int LUKS2_reencrypt_direction(struct luks2_hdr *hdr, crypt_reencrypt_direction_info *di) int LUKS2_reencrypt_direction(struct luks2_hdr *hdr, crypt_reencrypt_direction_info *di)
{ {
const char *value; const char *value;
json_object *jobj_keyslot, *jobj_mode; json_object *jobj_keyslot, *jobj_mode;
@@ -740,8 +758,6 @@ static int _offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segme
/* all active linear segments length */ /* all active linear segments length */
if (linear_length) { if (linear_length) {
log_dbg(NULL, "linear length: %" PRIu64, linear_length);
/* this must not happen. first linear segment is exaclty data offset long */
if (linear_length < data_shift) if (linear_length < data_shift)
return -EINVAL; return -EINVAL;
tmp = linear_length - data_shift; tmp = linear_length - data_shift;
@@ -920,6 +936,13 @@ static int _reenc_load(struct crypt_device *cd, struct luks2_hdr *hdr, struct lu
return -EINVAL; return -EINVAL;
} }
if (params->device_size) {
log_dbg(cd, "Switching reencryption to fixed size mode.");
device_size = params->device_size;
rh->fixed_length = true;
} else
rh->fixed_length = false;
rh->length = LUKS2_get_reencrypt_length(hdr, rh, area_length, params->max_hotzone_size); rh->length = LUKS2_get_reencrypt_length(hdr, rh, area_length, params->max_hotzone_size);
if (LUKS2_get_reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) { if (LUKS2_get_reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
log_err(cd, _("Failed to get reencryption offset.")); log_err(cd, _("Failed to get reencryption offset."));
@@ -978,17 +1001,17 @@ static int _LUKS2_reenc_load(struct crypt_device *cd,
int r; int r;
const struct crypt_params_reencrypt hdr_reenc_params = { const struct crypt_params_reencrypt hdr_reenc_params = {
.resilience = LUKS2_reencrypt_protection_type(hdr), .resilience = LUKS2_reencrypt_protection_type(hdr),
.hash = LUKS2_reencrypt_protection_hash(hdr) .hash = LUKS2_reencrypt_protection_hash(hdr),
.device_size = params ? params->device_size : 0
}; };
struct luks2_reenc_context *tmp = calloc(1, sizeof (*tmp)); struct luks2_reenc_context *tmp = calloc(1, sizeof (*tmp));
if (!tmp) if (!tmp)
return -ENOMEM; return -ENOMEM;
if (!hdr_reenc_params.resilience) { r = -EINVAL;
r = -EINVAL; if (!hdr_reenc_params.resilience)
goto err; goto err;
}
/* skip context update if data shift is detected in header */ /* skip context update if data shift is detected in header */
if (!strcmp(hdr_reenc_params.resilience, "datashift")) if (!strcmp(hdr_reenc_params.resilience, "datashift"))
@@ -996,7 +1019,7 @@ static int _LUKS2_reenc_load(struct crypt_device *cd,
log_dbg(cd, "Initializing reencryption context (%s).", params ? "update" : "load"); log_dbg(cd, "Initializing reencryption context (%s).", params ? "update" : "load");
if (!params) if (!params || !params->resilience)
params = &hdr_reenc_params; params = &hdr_reenc_params;
r = _reenc_load(cd, hdr, tmp, device_size, params); r = _reenc_load(cd, hdr, tmp, device_size, params);
@@ -1065,9 +1088,18 @@ static int _load_segments_crashed(struct crypt_device *cd,
static int LUKS2_reenc_load_crashed(struct crypt_device *cd, static int LUKS2_reenc_load_crashed(struct crypt_device *cd,
struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reenc_context **rh) struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reenc_context **rh)
{ {
bool dynamic;
uint64_t minimal_size;
int r, reenc_seg; int r, reenc_seg;
struct crypt_params_reencrypt params = {};
r = _LUKS2_reenc_load(cd, hdr, device_size, rh, NULL); if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
return -EINVAL;
if (!dynamic)
params.device_size = minimal_size;
r = _LUKS2_reenc_load(cd, hdr, device_size, rh, &params);
if (!r) { if (!r) {
reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr)); reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
@@ -1775,7 +1807,6 @@ out:
static int reenc_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags) static int reenc_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
{ {
int r, exists = 1; int r, exists = 1;
uint64_t size = 0;
struct crypt_dm_active_device dmd_source, dmd_target = {}; struct crypt_dm_active_device dmd_source, dmd_target = {};
uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH; uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
@@ -1796,23 +1827,19 @@ static int reenc_replace_device(struct crypt_device *cd, const char *target, con
if (r < 0) if (r < 0)
return r; return r;
if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
goto err;
dmd_source.flags |= flags; dmd_source.flags |= flags;
dmd_source.uuid = crypt_get_uuid(cd); dmd_source.uuid = crypt_get_uuid(cd);
r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
crypt_get_data_offset(cd), &size, &dmd_source.flags);
if (r)
goto err;
if (exists && size != dmd_source.size) {
log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
dmd_source.size, size);
r = -EINVAL;
goto err;
}
if (exists) { if (exists) {
if (dmd_target.size != dmd_source.size) {
log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
dmd_source.size, dmd_target.size);
r = -EINVAL;
goto err;
}
r = dm_reload_device(cd, target, &dmd_source, 0, 0); r = dm_reload_device(cd, target, &dmd_source, 0, 0);
if (!r) { if (!r) {
log_dbg(cd, "Resuming device %s", target); log_dbg(cd, "Resuming device %s", target);
@@ -1874,14 +1901,15 @@ out:
return r; return r;
} }
static int reenc_activate_hotzone_device(struct crypt_device *cd, const char *name, uint32_t flags) static int reenc_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
{ {
int r; int r;
uint64_t new_offset = LUKS2_reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT; uint64_t new_offset = LUKS2_reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
struct crypt_dm_active_device dmd = { struct crypt_dm_active_device dmd = {
.flags = flags, .flags = flags,
.uuid = crypt_get_uuid(cd) .uuid = crypt_get_uuid(cd),
.size = device_size >> SECTOR_SHIFT
}; };
log_dbg(cd, "Activating hotzone device %s.", name); log_dbg(cd, "Activating hotzone device %s.", name);
@@ -1904,16 +1932,14 @@ err:
/* pass reenc context instead? */ /* pass reenc context instead? */
static int reenc_init_helper_devices(struct crypt_device *cd, static int reenc_init_helper_devices(struct crypt_device *cd,
const char *name, const struct luks2_reenc_context *rh)
const char *hotzone,
const char *overlay)
{ {
int r; int r;
/* Activate hotzone device 1:1 linear mapping to data_device */ /* Activate hotzone device 1:1 linear mapping to data_device */
r = reenc_activate_hotzone_device(cd, hotzone, CRYPT_ACTIVATE_PRIVATE); r = reenc_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
if (r) { if (r) {
log_err(cd, _("Failed to activate hotzone device %s."), hotzone); log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
return r; return r;
} }
@@ -1928,16 +1954,16 @@ static int reenc_init_helper_devices(struct crypt_device *cd,
* *
* TODO: in crypt_activate_by* * TODO: in crypt_activate_by*
*/ */
r = reenc_replace_device(cd, overlay, name, CRYPT_ACTIVATE_PRIVATE); r = reenc_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
if (r) { if (r) {
log_err(cd, _("Failed to activate overlay device %s with actual origin table."), overlay); log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
goto err; goto err;
} }
/* swap origin mapping to overlay device */ /* swap origin mapping to overlay device */
r = reenc_swap_backing_device(cd, name, overlay, CRYPT_ACTIVATE_KEYRING_KEY); r = reenc_swap_backing_device(cd, rh->device_name, rh->overlay_name, CRYPT_ACTIVATE_KEYRING_KEY);
if (r) { if (r) {
log_err(cd, _("Failed to load new maping for device %s."), name); log_err(cd, _("Failed to load new maping for device %s."), rh->device_name);
goto err; goto err;
} }
@@ -1949,8 +1975,8 @@ static int reenc_init_helper_devices(struct crypt_device *cd,
return 0; return 0;
err: err:
/* TODO: force error helper devices on error path */ /* TODO: force error helper devices on error path */
dm_remove_device(cd, overlay, 0); dm_remove_device(cd, rh->overlay_name, 0);
dm_remove_device(cd, hotzone, 0); dm_remove_device(cd, rh->hotzone_name, 0);
return r; return r;
} }
@@ -2138,7 +2164,7 @@ static int _create_backup_segments(struct crypt_device *cd,
if (digest_old >= 0) if (digest_old >= 0)
json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old); json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old);
else if (!strcmp(params->mode, "encrypt")) { else if (!strcmp(params->mode, "encrypt")) {
r = LUKS2_get_data_size(hdr, &tmp); r = LUKS2_get_data_size(hdr, &tmp, NULL);
if (r) if (r)
goto err; goto err;
jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0); jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
@@ -2599,11 +2625,19 @@ static int _reencrypt_load(struct crypt_device *cd,
struct volume_key **vks, struct volume_key **vks,
const struct crypt_params_reencrypt *params) const struct crypt_params_reencrypt *params)
{ {
int r; int r, old_ss, new_ss;
struct luks2_hdr *hdr; struct luks2_hdr *hdr;
struct crypt_lock_handle *reencrypt_lock; struct crypt_lock_handle *reencrypt_lock;
struct luks2_reenc_context *rh; struct luks2_reenc_context *rh;
uint64_t device_size; struct crypt_dm_active_device dmd;
uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0;
bool dynamic;
struct crypt_params_reencrypt rparams = {};
if (params) {
rparams = *params;
required_size = params->device_size;
}
log_dbg(cd, "Loading LUKS2 reencryption context."); log_dbg(cd, "Loading LUKS2 reencryption context.");
@@ -2622,13 +2656,60 @@ static int _reencrypt_load(struct crypt_device *cd,
/* From now on we hold reencryption lock */ /* From now on we hold reencryption lock */
if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
return -EINVAL;
/* some configurations provides fixed device size */ /* some configurations provides fixed device size */
if ((r = luks2_check_device_size(cd, hdr, &device_size, false))) { if ((r = luks2_check_device_size(cd, hdr, minimal_size >> SECTOR_SHIFT, &device_size, false))) {
r = -EINVAL; r = -EINVAL;
goto err; goto err;
} }
r = LUKS2_reenc_load(cd, hdr, device_size, params, &rh); old_ss = LUKS2_reencrypt_get_sector_size_old(hdr);
new_ss = LUKS2_reencrypt_get_sector_size_new(hdr);
if (name) {
r = dm_query_device(cd, name, DM_ACTIVE_UUID, &dmd);
if (r < 0) {
log_err(cd, _("Failed to read device info from %s."), name);
r = -EINVAL;
goto err;
}
if (crypt_uuid_cmp(dmd.uuid, hdr->uuid)) {
log_dbg(cd, "LUKS device header uuid: %s mismatches DM returned uuid %s",
hdr->uuid, dmd.uuid);
r = -EINVAL;
}
dm_targets_free(cd, &dmd);
free(CONST_CAST(void*)dmd.uuid);
if (r < 0)
goto err;
mapping_size = dmd.size << SECTOR_SHIFT;
}
r = -EINVAL;
if (required_size && mapping_size && (required_size != mapping_size)) {
log_err(cd, _("Active device size and requested reencryption size don't match."));
goto err;
}
if (mapping_size)
required_size = mapping_size;
if (required_size) {
/* TODO: Add support for chaning fixed minimal size in reencryption mda where possible */
if ((minimal_size && (required_size < minimal_size)) ||
(required_size > device_size) ||
(!dynamic && (required_size != minimal_size)) ||
(old_ss > 0 && MISALIGNED(required_size, old_ss)) ||
(new_ss > 0 && MISALIGNED(required_size, new_ss))) {
log_err(cd, _("Illegal device size requested in reencryption parameters."));
goto err;
}
rparams.device_size = required_size;
}
r = LUKS2_reenc_load(cd, hdr, device_size, &rparams, &rh);
if (r < 0 || !rh) if (r < 0 || !rh)
goto err; goto err;
@@ -2901,7 +2982,7 @@ static int _reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr
} }
if (rh->online) { if (rh->online) {
r = LUKS2_reload(cd, rh->device_name, rh->vks, CRYPT_ACTIVATE_KEYRING_KEY | CRYPT_ACTIVATE_SHARED); r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, CRYPT_ACTIVATE_KEYRING_KEY | CRYPT_ACTIVATE_SHARED);
if (r) if (r)
log_err(cd, _("Failed to reload %s device."), rh->device_name); log_err(cd, _("Failed to reload %s device."), rh->device_name);
if (!r) { if (!r) {
@@ -3004,7 +3085,7 @@ int crypt_reencrypt(struct crypt_device *cd,
log_dbg(cd, "Resuming LUKS2 reencryption."); log_dbg(cd, "Resuming LUKS2 reencryption.");
if (rh->online) { if (rh->online) {
r = reenc_init_helper_devices(cd, rh->device_name, rh->hotzone_name, rh->overlay_name); r = reenc_init_helper_devices(cd, rh);
if (r) { if (r) {
log_err(cd, _("Failed to initalize reencryption device stack.")); log_err(cd, _("Failed to initalize reencryption device stack."));
return -EINVAL; return -EINVAL;
@@ -3076,30 +3157,11 @@ int reenc_erase_backup_segments(struct crypt_device *cd,
} }
/* internal only */ /* internal only */
int luks2_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t *device_size, bool activation) int luks2_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t check_size, uint64_t *device_size, bool activation)
{ {
int r; int r;
int64_t data_shift; uint64_t real_size = 0;
uint64_t check_size, real_size = 0;
/*
* Calculate sum of all segments. The last dynamic segment adds one encryption sector to the sum
* If there's only single dynamic segment it returns 0 which later reads data device size (minus offset)
*/
if (LUKS2_get_data_size(hdr, &check_size))
return -EINVAL;
data_shift = LUKS2_reencrypt_data_shift(hdr);
/* initial data device reduction must be extended for spare space for data shift
* layout looks like:
* [LUKS2 hdr (data offset)][segment s with fixed size] [spare space for shift] [moved segment]
*/
if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
check_size += data_shift;
check_size >>= SECTOR_SHIFT;
/* Here we check minimal size */
r = device_block_adjust(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK, r = device_block_adjust(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK,
crypt_get_data_offset(cd), &check_size, NULL); crypt_get_data_offset(cd), &check_size, NULL);
@@ -3113,5 +3175,5 @@ int luks2_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, uint
*device_size = real_size << SECTOR_SHIFT; *device_size = real_size << SECTOR_SHIFT;
return r; return 0;
} }

View File

@@ -435,7 +435,7 @@ static int keyslot_verify_or_find_empty(struct crypt_device *cd, int *keyslot)
/* /*
* compares UUIDs returned by device-mapper (striped by cryptsetup) and uuid in header * compares UUIDs returned by device-mapper (striped by cryptsetup) and uuid in header
*/ */
static int crypt_uuid_cmp(const char *dm_uuid, const char *hdr_uuid) int crypt_uuid_cmp(const char *dm_uuid, const char *hdr_uuid)
{ {
int i, j; int i, j;
char *str; char *str;
@@ -3704,7 +3704,7 @@ static int _open_and_activate_reencrypt_device(struct crypt_device *cd,
uint32_t flags) uint32_t flags)
{ {
crypt_reencrypt_info ri; crypt_reencrypt_info ri;
uint64_t device_size; uint64_t check_size, device_size;
bool use_keyring, keys_ready = false; bool use_keyring, keys_ready = false;
struct volume_key *vks = NULL; struct volume_key *vks = NULL;
int r = 0; int r = 0;
@@ -3727,8 +3727,13 @@ static int _open_and_activate_reencrypt_device(struct crypt_device *cd,
ri = LUKS2_reenc_status(hdr); ri = LUKS2_reenc_status(hdr);
if (name && (r = luks2_check_device_size(cd, hdr, &device_size, true))) if (name) {
goto err; r = -EINVAL;
if (LUKS2_get_data_size(hdr, &check_size, NULL))
goto err;
if (luks2_check_device_size(cd, hdr, check_size >> SECTOR_SHIFT, &device_size, true))
goto err;
}
if (name && ri == CRYPT_REENCRYPT_CRASH) { if (name && ri == CRYPT_REENCRYPT_CRASH) {
log_dbg(cd, _("Entering reencryption crash recovery.")); log_dbg(cd, _("Entering reencryption crash recovery."));