[mount] Move zfs code into a seperate function to improve readability

This commit is contained in:
dalto 2021-11-13 14:09:16 -06:00
parent 18ad188ef6
commit ebae698a6e

View File

@ -62,10 +62,9 @@ def get_btrfs_subvolumes(partitions):
def parse_global_storage(gs_value): def parse_global_storage(gs_value):
""" """ Something in the chain is converting on and off to true and false. This converts it back.
Something in the chain is converting on and off to true and false. This converts it back.
:param gs_value: The value from global storage :param gs_value: The value from global storage which needs to be fixed
:return: :return:
""" """
if gs_value is True: if gs_value is True:
@ -76,6 +75,83 @@ def parse_global_storage(gs_value):
return gs_value return gs_value
def mount_zfs(root_mount_point, partition):
""" Mounts a zfs partition at @p root_mount_point
:param root_mount_point: The absolute path to the root of the install
:param partition: The partition map from global storage for this partition
:return:
"""
# Get the list of zpools from global storage
zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo")
if not zfs_pool_list:
libcalamares.utils.warning("Failed to locate zfsPoolInfo data in global storage")
raise Exception("Internal error mounting zfs datasets")
# Find the zpool matching this partition
for zfs_pool in zfs_pool_list:
if zfs_pool["mountpoint"] == partition["mountPoint"]:
pool_name = zfs_pool["poolName"]
ds_name = zfs_pool["dsName"]
# import the zpool
import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, pool_name])
if import_result.returncode != 0:
raise Exception("Failed to import zpool")
# Get the encrpytion information from global storage
zfs_info_list = libcalamares.globalstorage.value("zfsInfo")
encrypt = False
if zfs_info_list:
for zfs_info in zfs_info_list:
if zfs_info["mountpoint"] == partition["mountPoint"] and zfs_info["encrypted"] is True:
encrypt = True
passphrase = zfs_info["passphrase"]
if encrypt is True:
# The zpool is encrypted, we need to unlock it
loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + pool_name])
if loadkey_result.returncode != 0:
raise Exception("Failed to unlock zpool")
if partition["mountPoint"] == '/':
# Get the zfs dataset list from global storage
zfs = libcalamares.globalstorage.value("zfsDatasets")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
raise Exception("Internal error mounting zfs datasets")
# first we handle the / dataset if there is one
for dataset in zfs:
if dataset['mountpoint'] == '/':
# Properly set the canmount field from global storage
can_mount = parse_global_storage(dataset['canMount'])
set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount,
dataset['zpool'] + '/' + dataset['dsName']])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
if dataset['canMount'] == 'noauto':
mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']])
if mount_result.returncode != 0:
raise Exception("Failed to mount root dataset")
# Set the canmount property for each dataset. This will effectively mount the dataset
for dataset in zfs:
# We already handled the / mountpoint above
if dataset['mountpoint'] != '/':
can_mount = parse_global_storage(dataset['canMount'])
set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount,
dataset['zpool'] + '/' + dataset['dsName']])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
else:
set_result = subprocess.run(['zfs', 'set', 'canmount=on', pool_name + '/' + ds_name])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
def mount_partition(root_mount_point, partition, partitions): def mount_partition(root_mount_point, partition, partitions):
""" """
Do a single mount of @p partition inside @p root_mount_point. Do a single mount of @p partition inside @p root_mount_point.
@ -112,71 +188,7 @@ def mount_partition(root_mount_point, partition, partitions):
device = os.path.join("/dev/mapper", partition["luksMapperName"]) device = os.path.join("/dev/mapper", partition["luksMapperName"])
if fstype == "zfs": if fstype == "zfs":
zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo") mount_zfs(root_mount_point, partition)
if not zfs_pool_list:
libcalamares.utils.warning("Failed to locate zfsPoolInfo data in global storage")
raise Exception("Internal error mounting zfs datasets")
for zfs_pool in zfs_pool_list:
if zfs_pool["mountpoint"] == partition["mountPoint"]:
pool_name = zfs_pool["poolName"]
ds_name = zfs_pool["dsName"];
# import the zpool
import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, pool_name])
if import_result.returncode != 0:
raise Exception("Failed to import zpool")
zfs_info_list = libcalamares.globalstorage.value("zfsInfo")
encrypt = False
if zfs_info_list:
for zfs_info in zfs_info_list:
if zfs_info["mountpoint"] == partition["mountPoint"] and zfs_info["encrypted"] is True:
encrypt = True
passphrase = zfs_info["passphrase"]
if encrypt is True:
# The zpool is encrypted, we need to unlock it
loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + pool_name])
if loadkey_result.returncode != 0:
raise Exception("Failed to unlock zpool")
if partition["mountPoint"] == '/':
# Get the zfs dataset list from global storage
zfs = libcalamares.globalstorage.value("zfsDatasets")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
raise Exception("Internal error mounting zfs datasets")
# first we handle the / dataset if there is one
for dataset in zfs:
if dataset['mountpoint'] == '/':
# Properly set the canmount field from global storage
can_mount = parse_global_storage(dataset['canMount'])
set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount,
dataset['zpool'] + '/' + dataset['dsName']])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
if dataset['canMount'] == 'noauto':
mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']])
if mount_result.returncode != 0:
raise Exception("Failed to mount root dataset")
# Set the canmount property for each dataset. This will effectively mount the dataset
for dataset in zfs:
# We already handled the / mountpoint above
if dataset['mountpoint'] != '/':
can_mount = parse_global_storage(dataset['canMount'])
set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount,
dataset['zpool'] + '/' + dataset['dsName']])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
else:
set_result = subprocess.run(['zfs', 'set', 'canmount=on', pool_name + '/' + ds_name])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
else: # fstype == "zfs" else: # fstype == "zfs"
if libcalamares.utils.mount(device, if libcalamares.utils.mount(device,
mount_point, mount_point,