From ac44aab74a4f646515f5ed7ad89e0655fc827b8f Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 6 Nov 2021 09:16:09 -0500 Subject: [PATCH 01/23] [partition] Add zfs to the filesystem list if the zfs modules is enabled --- src/modules/partition/gui/CreatePartitionDialog.cpp | 5 ++++- src/modules/partition/gui/EditExistingPartitionDialog.cpp | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/modules/partition/gui/CreatePartitionDialog.cpp b/src/modules/partition/gui/CreatePartitionDialog.cpp index cdc9992b9..0727ee935 100644 --- a/src/modules/partition/gui/CreatePartitionDialog.cpp +++ b/src/modules/partition/gui/CreatePartitionDialog.cpp @@ -23,6 +23,7 @@ #include "GlobalStorage.h" #include "JobQueue.h" +#include "Settings.h" #include "partition/FileSystem.h" #include "partition/PartitionQuery.h" #include "utils/Logger.h" @@ -104,7 +105,9 @@ CreatePartitionDialog::CreatePartitionDialog( Device* device, QStringList fsNames; for ( auto fs : FileSystemFactory::map() ) { - if ( fs->supportCreate() != FileSystem::cmdSupportNone && fs->type() != FileSystem::Extended ) + // We need to ensure zfs is added to the list if the zfs module is enabled + if ( ( fs->type() == FileSystem::Type::Zfs && Calamares::Settings::instance()->isModuleEnabled( "zfs" ) ) + || ( fs->supportCreate() != FileSystem::cmdSupportNone && fs->type() != FileSystem::Extended ) ) { fsNames << userVisibleFS( fs ); // This is put into the combobox if ( fs->type() == defaultFSType ) diff --git a/src/modules/partition/gui/EditExistingPartitionDialog.cpp b/src/modules/partition/gui/EditExistingPartitionDialog.cpp index 411d6d0dc..04b9527f1 100644 --- a/src/modules/partition/gui/EditExistingPartitionDialog.cpp +++ b/src/modules/partition/gui/EditExistingPartitionDialog.cpp @@ -25,6 +25,7 @@ #include "GlobalStorage.h" #include "JobQueue.h" +#include "Settings.h" #include "partition/FileSystem.h" #include "utils/Logger.h" @@ -89,7 +90,9 @@ EditExistingPartitionDialog::EditExistingPartitionDialog( Device* device, QStringList fsNames; for ( auto fs : FileSystemFactory::map() ) { - if ( fs->supportCreate() != FileSystem::cmdSupportNone && fs->type() != FileSystem::Extended ) + // We need to ensure zfs is added to the list if the zfs module is enabled + if ( ( fs->type() == FileSystem::Type::Zfs && Calamares::Settings::instance()->isModuleEnabled( "zfs" ) ) + || ( fs->supportCreate() != FileSystem::cmdSupportNone && fs->type() != FileSystem::Extended ) ) { fsNames << userVisibleFS( fs ); // For the combo box } From 7faf4f30dfd71bd1939c0ff1bd2afb1f502e7338 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 6 Nov 2021 09:42:07 -0500 Subject: [PATCH 02/23] [partition] Add support for manually creating a partition for zfs --- .../partition/jobs/CreatePartitionJob.cpp | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/src/modules/partition/jobs/CreatePartitionJob.cpp b/src/modules/partition/jobs/CreatePartitionJob.cpp index 241e0a451..07b816b7e 100644 --- a/src/modules/partition/jobs/CreatePartitionJob.cpp +++ b/src/modules/partition/jobs/CreatePartitionJob.cpp @@ -11,8 +11,10 @@ #include "CreatePartitionJob.h" +#include "core/PartitionInfo.h" #include "partition/FileSystem.h" #include "partition/PartitionQuery.h" +#include "utils/CalamaresUtilsSystem.h" #include "utils/Logger.h" #include "utils/Units.h" @@ -24,9 +26,80 @@ #include #include +#include +#include + using CalamaresUtils::Partition::untranslatedFS; using CalamaresUtils::Partition::userVisibleFS; +/** @brief Create + * + * Uses sfdisk to remove @p partition. This should only be used in cases + * where using kpmcore to remove the partition would not be appropriate + * + */ +static Calamares::JobResult +createZfs( Partition* partition, Device* device ) +{ + auto r = CalamaresUtils::System::instance()->runCommand( + { "sh", + "-c", + "echo start=" + QString::number( partition->firstSector() ) + " size=" + + QString::number( partition->length() ) + " | sfdisk --append --force " + partition->devicePath() }, + std::chrono::seconds( 5 ) ); + if ( r.getExitCode() != 0 ) + { + return Calamares::JobResult::error( + QCoreApplication::translate( CreatePartitionJob::staticMetaObject.className(), + "Failed to create partition" ), + QCoreApplication::translate( CreatePartitionJob::staticMetaObject.className(), + "Failed to create zfs partition with output: " + + r.getOutput().toLocal8Bit() ) ); + } + + // Now we need to do some things that would normally be done by kpmcore + + // First we get the device node from the output and set it as the partition path + QRegularExpression re( QStringLiteral( "Created a new partition (\\d+)" ) ); + QRegularExpressionMatch rem = re.match( r.getOutput() ); + + QString deviceNode; + if ( rem.hasMatch() ) + { + if ( partition->devicePath().back().isDigit() ) + { + deviceNode = partition->devicePath() + QLatin1Char( 'p' ) + rem.captured( 1 ); + } + else + { + deviceNode = partition->devicePath() + rem.captured( 1 ); + } + } + + partition->setPartitionPath( deviceNode ); + partition->setState( Partition::State::None ); + + // If it is a gpt device, set the partition UUID + if ( device->partitionTable()->type() == PartitionTable::gpt && partition->uuid().isEmpty() ) + { + r = CalamaresUtils::System::instance()->runCommand( + { "sfdisk", "--list", "--output", "Device,UUID", partition->devicePath() }, std::chrono::seconds( 5 ) ); + if ( r.getExitCode() == 0 ) + { + QRegularExpression re( deviceNode + QStringLiteral( " +(.+)" ) ); + QRegularExpressionMatch rem = re.match( r.getOutput() ); + + if ( rem.hasMatch() ) + { + partition->setUUID( rem.captured( 1 ) ); + } + } + } + + return Calamares::JobResult::ok(); +} + + CreatePartitionJob::CreatePartitionJob( Device* device, Partition* partition ) : PartitionJob( partition ) , m_device( device ) @@ -194,6 +267,13 @@ CreatePartitionJob::prettyStatusMessage() const Calamares::JobResult CreatePartitionJob::exec() { + // kpmcore doesn't currently handle this case properly so for now, we manually create the partion + // The zfs module can later deal with creating a zpool in the partition + if ( m_partition->fileSystem().type() == FileSystem::Type::Zfs ) + { + return createZfs( m_partition, m_device ); + } + Report report( nullptr ); NewOperation op( *m_device, m_partition ); op.setStatus( Operation::StatusRunning ); From e24d14c5126f50c0776c8a9da6000763bfb67453 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 6 Nov 2021 09:44:27 -0500 Subject: [PATCH 03/23] [zfs] Initial commit for zfs module --- src/modules/zfs/CMakeLists.txt | 13 ++++ src/modules/zfs/ZfsJob.cpp | 118 ++++++++++++++++++++++++++++++++ src/modules/zfs/ZfsJob.h | 50 ++++++++++++++ src/modules/zfs/zfs.conf | 38 ++++++++++ src/modules/zfs/zfs.schema.yaml | 22 ++++++ 5 files changed, 241 insertions(+) create mode 100644 src/modules/zfs/CMakeLists.txt create mode 100644 src/modules/zfs/ZfsJob.cpp create mode 100644 src/modules/zfs/ZfsJob.h create mode 100644 src/modules/zfs/zfs.conf create mode 100644 src/modules/zfs/zfs.schema.yaml diff --git a/src/modules/zfs/CMakeLists.txt b/src/modules/zfs/CMakeLists.txt new file mode 100644 index 000000000..2feb911d0 --- /dev/null +++ b/src/modules/zfs/CMakeLists.txt @@ -0,0 +1,13 @@ +# === This file is part of Calamares - === +# +# SPDX-FileCopyrightText: 2020 Adriaan de Groot +# SPDX-License-Identifier: BSD-2-Clause +# +calamares_add_plugin( zfs + TYPE job + EXPORT_MACRO PLUGINDLLEXPORT_PRO + SOURCES + ZfsJob.cpp + SHARED_LIB +) + diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp new file mode 100644 index 000000000..2602c5417 --- /dev/null +++ b/src/modules/zfs/ZfsJob.cpp @@ -0,0 +1,118 @@ +/* === This file is part of Calamares - === + * + * SPDX-FileCopyrightText: 2021 Evan James + * SPDX-License-Identifier: GPL-3.0-or-later + * + * Calamares is Free Software: see the License-Identifier above. + * + */ + +#include "ZfsJob.h" + +#include "utils/CalamaresUtilsSystem.h" +#include "utils/Logger.h" +#include "utils/Variant.h" + +#include "GlobalStorage.h" +#include "JobQueue.h" +#include "Settings.h" + +ZfsJob::ZfsJob( QObject* parent ) + : Calamares::CppJob( parent ) +{ +} + +ZfsJob::~ZfsJob() {} + +QString +ZfsJob::prettyName() const +{ + return tr( "Create ZFS pools and datasets" ); +} + +Calamares::JobResult +ZfsJob::exec() +{ + QList< QVariant > partitions; + Calamares::GlobalStorage* gs = Calamares::JobQueue::instance()->globalStorage(); + if ( gs && gs->contains( "partitions" ) && gs->value( "partitions" ).canConvert( QVariant::List ) ) + { + partitions = gs->value( "partitions" ).toList(); + } + else + { + cWarning() << "No *partitions* defined."; + return Calamares::JobResult::internalError( tr( "Configuration Error" ), + tr( "No partitions are available for Zfs." ), + Calamares::JobResult::InvalidConfiguration ); + } + + const CalamaresUtils::System* system = CalamaresUtils::System::instance(); + + for ( auto& partition : qAsConst( partitions ) ) + { + QVariantMap pMap; + if ( partition.canConvert( QVariant::Map ) ) + pMap = partition.toMap(); + + // If it isn't a zfs partition, ignore it + if ( pMap[ "fsName" ] != "zfs" ) + continue; + + // Find the best device identifier, if one isn't available, skip this partition + QString deviceName; + if ( pMap[ "partuuid" ].toString() != "" ) + deviceName = "/dev/disk/by-partuuid/" + pMap[ "partuuid" ].toString().toLower(); + else if ( pMap[ "device" ].toString() != "" ) + deviceName = pMap[ "device" ].toString().toLower(); + else + continue; + + // Create the zpool + auto r + = system->runCommand( { "sh", "-c", "zpool create " + m_poolOptions + " " + m_poolName + " " + deviceName }, + std::chrono::seconds( 10 ) ); + if ( r.getExitCode() != 0 ) + return Calamares::JobResult::error( "message", "Failed to create zpool on " + deviceName ); + + // Create the datasets + for ( const auto& dataset : qAsConst( m_datasets ) ) + { + QVariantMap dsMap = dataset.toMap(); + + // Make sure all values are valid + if ( dsMap[ "dsName" ].toString().isEmpty() || dsMap[ "mountpoint" ].toString().isEmpty() + || dsMap[ "canMount" ].toString().isEmpty() ) + { + cWarning() << "Bad dataset entry"; + continue; + } + + // Create the dataset. We set canmount=no regardless of the setting for now. + // It is modified to the correct value in the mount module to ensure mount order is maintained + r = system->runCommand( { "sh", + "-c", + "zfs create " + m_datasetOptions + + " -o canmount=off -o mountpoint=" + dsMap[ "mountpoint" ].toString() + " " + + m_poolName + "/" + dsMap[ "dsName" ].toString() }, + std::chrono::seconds( 10 ) ); + if ( r.getExitCode() != 0 ) + cWarning() << "Failed to create dataset" << dsMap[ "dsName" ].toString(); + } + } + + return Calamares::JobResult::ok(); +} + + +void +ZfsJob::setConfigurationMap( const QVariantMap& map ) +{ + m_poolName = CalamaresUtils::getString( map, "poolName" ); + m_poolOptions = CalamaresUtils::getString( map, "poolOptions" ); + m_datasetOptions = CalamaresUtils::getString( map, "datasetOptions" ); + + m_datasets = CalamaresUtils::getList( map, "datasets" ); +} + +CALAMARES_PLUGIN_FACTORY_DEFINITION( ZfsJobFactory, registerPlugin< ZfsJob >(); ) diff --git a/src/modules/zfs/ZfsJob.h b/src/modules/zfs/ZfsJob.h new file mode 100644 index 000000000..87646a227 --- /dev/null +++ b/src/modules/zfs/ZfsJob.h @@ -0,0 +1,50 @@ +/* === This file is part of Calamares - === + * + * SPDX-FileCopyrightText: 2021 Evan James + * SPDX-License-Identifier: GPL-3.0-or-later + * + * Calamares is Free Software: see the License-Identifier above. + * + */ + +#ifndef ZFSJOB_H +#define ZFSJOB_H + +#include +#include +#include + +#include "CppJob.h" + +#include "utils/PluginFactory.h" + +#include "DllMacro.h" + +/** @brief Create zpools and zfs datasets + * + */ +class PLUGINDLLEXPORT ZfsJob : public Calamares::CppJob +{ + Q_OBJECT + +public: + explicit ZfsJob( QObject* parent = nullptr ); + ~ZfsJob() override; + + QString prettyName() const override; + + Calamares::JobResult exec() override; + + void setConfigurationMap( const QVariantMap& configurationMap ) override; + +private: + QString m_poolName; + QString m_poolOptions; + QString m_datasetOptions; + + QList m_datasets; +}; + +CALAMARES_PLUGIN_FACTORY_DECLARATION( ZfsJobFactory ) + +#endif // ZFSJOB_H diff --git a/src/modules/zfs/zfs.conf b/src/modules/zfs/zfs.conf new file mode 100644 index 000000000..f2f8f52b0 --- /dev/null +++ b/src/modules/zfs/zfs.conf @@ -0,0 +1,38 @@ +# SPDX-FileCopyrightText: no +# SPDX-License-Identifier: CC0-1.0 +# +# The zfs module creates the zfs pools and datasets +# +# +# +--- +# The name to be used for the zpool +poolName: zpcala + +# A list of options that will be passed to zpool create +poolOptions: "-f -o ashift=12 -O mountpoint=none -O acltype=posixacl -O relatime=on" + +# A list of options that will be passed to zfs create when creating each dataset +# Do not include "canmount" or "mountpoint" as those are set below in the datasets array +datasetOptions: "-o compression=lz4 -o atime=off -o xattr=sa" + +# An array of datasets that will be created on the zpool mounted at / +datasets: + - dsName: ROOT + mountpoint: none + canMount: off + - dsName: ROOT/distro + mountpoint: none + canMount: off + - dsName: ROOT/distro/root + mountpoint: / + canMount: noauto + - dsName: ROOT/distro/home + mountpoint: /home + canMount: on + - dsName: ROOT/distro/varcache + mountpoint: /var/cache + canMount: on + - dsName: ROOT/distro/varlog + mountpoint: /var/log + canMount: on diff --git a/src/modules/zfs/zfs.schema.yaml b/src/modules/zfs/zfs.schema.yaml new file mode 100644 index 000000000..fb83778ad --- /dev/null +++ b/src/modules/zfs/zfs.schema.yaml @@ -0,0 +1,22 @@ +# SPDX-FileCopyrightText: 2020 Adriaan de Groot +# SPDX-License-Identifier: GPL-3.0-or-later +--- +$schema: https://json-schema.org/schema# +$id: https://calamares.io/schemas/zfs +additionalProperties: false +type: object +properties: + poolName: { type: string } + poolOptions: { type: string } + datasetOptions: { type: string } + datasets: + type: array + items: + type: object + additionalProperties: false + properties: + dsName: { type: string } + mountpoint: { type: string } + canMount: { type: string } + required: [ dsName, mountpoint, canmount ] +required: [ poolName, datasets ] From 69ef13ef0c4c84b87d7fd0cbc91860f771041679 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 6 Nov 2021 09:48:38 -0500 Subject: [PATCH 04/23] [initcpiocfg] Add support for zfs --- src/modules/initcpiocfg/main.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/modules/initcpiocfg/main.py b/src/modules/initcpiocfg/main.py index 99168dcde..755039c0e 100644 --- a/src/modules/initcpiocfg/main.py +++ b/src/modules/initcpiocfg/main.py @@ -150,6 +150,7 @@ def find_initcpio_features(partitions, root_mount_point): swap_uuid = "" uses_btrfs = False + uses_zfs = False uses_lvm2 = False encrypt_hook = False openswap_hook = False @@ -172,6 +173,9 @@ def find_initcpio_features(partitions, root_mount_point): if partition["fs"] == "btrfs": uses_btrfs = True + if partition["fs"] == "zfs": + uses_zfs = True + if "lvm2" in partition["fs"]: uses_lvm2 = True @@ -198,6 +202,9 @@ def find_initcpio_features(partitions, root_mount_point): if uses_lvm2: hooks.append("lvm2") + if uses_zfs: + hooks.append("zfs") + if swap_uuid != "": if encrypt_hook and openswap_hook: hooks.extend(["openswap"]) From 7108d4a509782cb5be3e730dc7b2324ee52c0d5d Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 6 Nov 2021 10:30:49 -0500 Subject: [PATCH 05/23] [zfs] Update to Calamares coding standards --- src/modules/zfs/ZfsJob.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp index 2602c5417..0227ead07 100644 --- a/src/modules/zfs/ZfsJob.cpp +++ b/src/modules/zfs/ZfsJob.cpp @@ -53,27 +53,40 @@ ZfsJob::exec() { QVariantMap pMap; if ( partition.canConvert( QVariant::Map ) ) + { pMap = partition.toMap(); + } // If it isn't a zfs partition, ignore it if ( pMap[ "fsName" ] != "zfs" ) + { continue; + } // Find the best device identifier, if one isn't available, skip this partition QString deviceName; if ( pMap[ "partuuid" ].toString() != "" ) + { deviceName = "/dev/disk/by-partuuid/" + pMap[ "partuuid" ].toString().toLower(); + } else if ( pMap[ "device" ].toString() != "" ) + { deviceName = pMap[ "device" ].toString().toLower(); + } else + { continue; + } // Create the zpool auto r = system->runCommand( { "sh", "-c", "zpool create " + m_poolOptions + " " + m_poolName + " " + deviceName }, std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) - return Calamares::JobResult::error( "message", "Failed to create zpool on " + deviceName ); + { + return Calamares::JobResult::error( tr( "zpool failure" ), + tr( "Failed to create zpool on " + deviceName.toLocal8Bit() ) ); + } // Create the datasets for ( const auto& dataset : qAsConst( m_datasets ) ) @@ -97,7 +110,9 @@ ZfsJob::exec() + m_poolName + "/" + dsMap[ "dsName" ].toString() }, std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) + { cWarning() << "Failed to create dataset" << dsMap[ "dsName" ].toString(); + } } } From 51a5c4de0fe639b4fbf9682356ae3288c03386d2 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 6 Nov 2021 13:27:03 -0500 Subject: [PATCH 06/23] [zfs] Add datasets to global storage for other modules --- src/modules/zfs/ZfsJob.cpp | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp index 0227ead07..48521a2b9 100644 --- a/src/modules/zfs/ZfsJob.cpp +++ b/src/modules/zfs/ZfsJob.cpp @@ -89,13 +89,14 @@ ZfsJob::exec() } // Create the datasets + QVariantList datasetList; for ( const auto& dataset : qAsConst( m_datasets ) ) { - QVariantMap dsMap = dataset.toMap(); + QVariantMap datasetMap = dataset.toMap(); // Make sure all values are valid - if ( dsMap[ "dsName" ].toString().isEmpty() || dsMap[ "mountpoint" ].toString().isEmpty() - || dsMap[ "canMount" ].toString().isEmpty() ) + if ( datasetMap[ "dsName" ].toString().isEmpty() || datasetMap[ "mountpoint" ].toString().isEmpty() + || datasetMap[ "canMount" ].toString().isEmpty() ) { cWarning() << "Bad dataset entry"; continue; @@ -106,13 +107,23 @@ ZfsJob::exec() r = system->runCommand( { "sh", "-c", "zfs create " + m_datasetOptions - + " -o canmount=off -o mountpoint=" + dsMap[ "mountpoint" ].toString() + " " - + m_poolName + "/" + dsMap[ "dsName" ].toString() }, + + " -o canmount=off -o mountpoint=" + datasetMap[ "mountpoint" ].toString() + + " " + m_poolName + "/" + datasetMap[ "dsName" ].toString() }, std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) { - cWarning() << "Failed to create dataset" << dsMap[ "dsName" ].toString(); + cWarning() << "Failed to create dataset" << datasetMap[ "dsName" ].toString(); } + + // Add the dataset to the list for global storage + datasetMap[ "zpool" ] = m_poolName; + datasetList.append( datasetMap ); + } + + // If the list isn't empty, add it to global storage + if ( !datasetList.isEmpty() ) + { + Calamares::JobQueue::instance()->globalStorage()->insert( "zfs", datasetList ); } } From e3af4f3e2673f47122fcf09ed00b2900e120a648 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 6 Nov 2021 14:12:40 -0500 Subject: [PATCH 07/23] [zfs] Add delay before creating the zpool --- src/modules/zfs/ZfsJob.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp index 48521a2b9..113f9afd8 100644 --- a/src/modules/zfs/ZfsJob.cpp +++ b/src/modules/zfs/ZfsJob.cpp @@ -79,8 +79,9 @@ ZfsJob::exec() } // Create the zpool + // zfs doesn't wait for the devices so pause for 2 seconds to ensure we give time for the device files to be created auto r - = system->runCommand( { "sh", "-c", "zpool create " + m_poolOptions + " " + m_poolName + " " + deviceName }, + = system->runCommand( { "sh", "-c", "sleep 2 ; zpool create " + m_poolOptions + " " + m_poolName + " " + deviceName }, std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) { From 858e271c8ac4169f3a19f3d3d3f99bcad5bcf207 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 6 Nov 2021 14:33:43 -0500 Subject: [PATCH 08/23] [mount] Add support for zfs datasets --- src/modules/mount/main.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/modules/mount/main.py b/src/modules/mount/main.py index 2e96b6036..1b849a433 100644 --- a/src/modules/mount/main.py +++ b/src/modules/mount/main.py @@ -131,6 +131,24 @@ def mount_partition(root_mount_point, partition, partitions): ",".join([mount_option, partition.get("options", "")])) != 0: libcalamares.utils.warning("Cannot mount {}".format(device)) + if fstype == "zfs" and partition["mountPoint"] == '/': + # Get the zfs dataset list from global storage + zfs = libcalamares.globalstorage.value("zfs") + + if not zfs: + libcalamares.utils.warning("Failed to locate zfs dataset list") + + # Set the canmount property for each dataset. This will effectively mount the dataset + for dataset in zfs: + if dataset['canmount'] == 'noauto' or dataset['canmount'] == 'on': + subprocess.check_call(['zfs', 'set', 'canmount=' + dataset['canmount'], + dataset['zpool'] + '/' + dataset['dsName']]) + + # It is common for the / mountpoint to be set to noauto since it is mounted by the initrd + # If this is the case we need to manually mount it here + if dataset['mountpoint'] == '/' and dataset['canmount'] == 'noauto': + subprocess.check_call(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) + def run(): """ From 85a216009891b2930a5ac4f02aa20a495b80d314 Mon Sep 17 00:00:00 2001 From: dalto Date: Sun, 7 Nov 2021 08:01:32 -0600 Subject: [PATCH 09/23] [mount] Improve error handling for zfs --- src/modules/mount/main.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/modules/mount/main.py b/src/modules/mount/main.py index 1b849a433..492525f0e 100644 --- a/src/modules/mount/main.py +++ b/src/modules/mount/main.py @@ -136,18 +136,22 @@ def mount_partition(root_mount_point, partition, partitions): zfs = libcalamares.globalstorage.value("zfs") if not zfs: - libcalamares.utils.warning("Failed to locate zfs dataset list") + libcalamares.utils.error("Failed to locate zfs dataset list") # Set the canmount property for each dataset. This will effectively mount the dataset for dataset in zfs: - if dataset['canmount'] == 'noauto' or dataset['canmount'] == 'on': - subprocess.check_call(['zfs', 'set', 'canmount=' + dataset['canmount'], - dataset['zpool'] + '/' + dataset['dsName']]) + try: + if dataset['canmount'] == 'noauto' or dataset['canmount'] == 'on': + subprocess.check_call(['zfs', 'set', 'canmount=' + dataset['canmount'], + dataset['zpool'] + '/' + dataset['dsName']]) - # It is common for the / mountpoint to be set to noauto since it is mounted by the initrd - # If this is the case we need to manually mount it here - if dataset['mountpoint'] == '/' and dataset['canmount'] == 'noauto': - subprocess.check_call(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) + # It is common for the / mountpoint to be set to noauto since it is mounted by the initrd + # If this is the case we need to manually mount it here + if dataset['mountpoint'] == '/' and dataset['canmount'] == 'noauto': + subprocess.check_call(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) + except KeyError: + # This should be impossible + libcalamares.utils.error("Internal error handling zfs dataset") def run(): From 0720d568033bf0d5545d756d9826cccceacc51fe Mon Sep 17 00:00:00 2001 From: dalto Date: Sun, 7 Nov 2021 09:32:52 -0600 Subject: [PATCH 10/23] [bootloader] Add initial support for zfs --- src/modules/bootloader/main.py | 70 +++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 2 deletions(-) diff --git a/src/modules/bootloader/main.py b/src/modules/bootloader/main.py index 68cbddd0e..10e7d0f47 100644 --- a/src/modules/bootloader/main.py +++ b/src/modules/bootloader/main.py @@ -92,6 +92,32 @@ def get_kernel_line(kernel_type): return "" +def get_zfs_root(): + """ + Looks in global storage to find the zfs root + + :return: A string containing the path to the zfs root or None if it is not found + """ + + zfs = libcalamares.globalstorage.value("zfs") + + if not zfs: + libcalamares.utils.warning("Failed to locate zfs dataset list") + return None + + # Find the root dataset + for dataset in zfs: + try: + if dataset['mountpoint'] == '/': + return dataset["zpool"] + "/" + dataset["dsname"] + except KeyError: + # This should be impossible + libcalamares.utils.error("Internal error handling zfs dataset") + raise + + return None + + def create_systemd_boot_conf(install_path, efi_dir, uuid, entry, entry_name, kernel_type): """ Creates systemd-boot configuration files based on given parameters. @@ -133,12 +159,21 @@ def create_systemd_boot_conf(install_path, efi_dir, uuid, entry, entry_name, ker "root=/dev/mapper/" + partition["luksMapperName"]] - # systemd-boot with a BTRFS root filesystem needs to be told - # about the root subvolume. for partition in partitions: + # systemd-boot with a BTRFS root filesystem needs to be told + # about the root subvolume. if partition["mountPoint"] == "/" and partition["fs"] == "btrfs": kernel_params.append("rootflags=subvol=@") + # zfs needs to be told the location of the root dataset + if partition["mountPoint"] == "/" and partition["fs"] == "zfs": + zfs_root = get_zfs_root + if zfs_root is not None: + kernel_params.append("root=ZFS=" + zfs_root) + else: + # Something is really broken if we get to this point + libcalamares.utils.error("Internal error handling zfs dataset") + if cryptdevice_params: kernel_params.extend(cryptdevice_params) else: @@ -314,6 +349,37 @@ def get_grub_efi_parameters(): return None +def run_grub_mkconfig(output_file): + """ + Runs grub-mkconfig in the target environment + + :param output_file: A string containing the path to the generating grub config file + :return: + """ + + # get the partition from global storage + partitions = libcalamares.globalstorage.value("partitions") + if not partitions: + libcalamares.utils.error("Failed to run grub-mkconfig, no partitions defined in global storage") + return + + # check for zfs + is_zfs = False + for partition in partitions: + if partition["mountPoint"] == "/" and partition["fs"] == "zfs": + is_zfs = True + + # zfs needs an environment variable set for grub-mkconfig + if is_zfs: + check_target_env_call(["sh", "-c", "echo ZPOOL_VDEV_NAME_PATH=1 >> /etc/environment"]) + check_target_env_call(["sh", "-c", "ZPOOL_VDEV_NAME_PATH=1 " + + libcalamares.job.configuration["grubMkconfig"] + " -o " + output_file]) + else: + # The input file /etc/default/grub should already be filled out by the + # grubcfg job module. + check_target_env_call([libcalamares.job.configuration["grubMkconfig"], "-o", output_file]) + + def install_grub(efi_directory, fw_type): """ Installs grub as bootloader, either in pc or efi mode. From 2f145fcf44b2657e21f89fcff2cc784b307718aa Mon Sep 17 00:00:00 2001 From: dalto Date: Mon, 8 Nov 2021 17:26:08 -0600 Subject: [PATCH 11/23] [partition][zfs] Add support for zfs encryption --- .../partition/gui/CreatePartitionDialog.cpp | 10 ++- src/modules/zfs/ZfsJob.cpp | 87 +++++++++++++++---- src/modules/zfs/ZfsJob.h | 19 ++++ 3 files changed, 100 insertions(+), 16 deletions(-) diff --git a/src/modules/partition/gui/CreatePartitionDialog.cpp b/src/modules/partition/gui/CreatePartitionDialog.cpp index 0727ee935..e7d7751a1 100644 --- a/src/modules/partition/gui/CreatePartitionDialog.cpp +++ b/src/modules/partition/gui/CreatePartitionDialog.cpp @@ -243,7 +243,8 @@ CreatePartitionDialog::getNewlyCreatedPartition() // does so, to set up the partition for create-and-then-set-flags. Partition* partition = nullptr; QString luksPassphrase = m_ui->encryptWidget->passphrase(); - if ( m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty() ) + if ( m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty() + && fsType != FileSystem::Zfs ) { partition = KPMHelpers::createNewEncryptedPartition( m_parent, *m_device, m_role, fsType, fsLabel, first, last, luksPassphrase, PartitionTable::Flags() ); @@ -254,6 +255,13 @@ CreatePartitionDialog::getNewlyCreatedPartition() m_parent, *m_device, m_role, fsType, fsLabel, first, last, PartitionTable::Flags() ); } + // For zfs, we let the zfs module handle the encryption but we need to make the passphrase available to that module + if( m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty() + && fsType == FileSystem::Zfs ) + { + Calamares::JobQueue::instance()->globalStorage()->insert( "encryptphrase", luksPassphrase ); + } + if ( m_device->type() == Device::Type::LVM_Device ) { partition->setPartitionPath( m_device->deviceNode() + QStringLiteral( "/" ) diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp index 113f9afd8..af57fa5cc 100644 --- a/src/modules/zfs/ZfsJob.cpp +++ b/src/modules/zfs/ZfsJob.cpp @@ -17,6 +17,8 @@ #include "JobQueue.h" #include "Settings.h" +#include + ZfsJob::ZfsJob( QObject* parent ) : Calamares::CppJob( parent ) { @@ -30,6 +32,56 @@ ZfsJob::prettyName() const return tr( "Create ZFS pools and datasets" ); } +ZfsResult +ZfsJob::CreateZpool( QString deviceName, QString poolName, QString poolOptions, bool encrypt, QString passphrase ) const +{ + // zfs doesn't wait for the devices so pause for 2 seconds to ensure we give time for the device files to be created + QString command; + if ( encrypt ) + { + command = "sleep 2 ; echo \"" + passphrase + "\" | zpool create " + poolOptions + + " -O encryption=aes-256-gcm -O keyformat=passphrase " + poolName + " " + deviceName; + } + else + { + command = "sleep 2 ; zpool create " + poolOptions + " " + poolName + " " + deviceName; + } + + // We use a qProcess instead of runCommand so the password will not end up in the logs + QProcess process; + + process.setProcessChannelMode( QProcess::MergedChannels ); + cDebug() << Logger::SubEntry << "Running zpool create"; + + process.start( "sh", QStringList() << "-c" << command ); + + if ( !process.waitForStarted() ) + { + return { false, tr( "zpool create process failed to start" ) }; + } + + if ( !process.waitForFinished( 5000 ) ) + { + return { false, tr( "Process for zpool create timed out" ) }; + } + + QString output = QString::fromLocal8Bit( process.readAllStandardOutput() ).trimmed(); + + if ( process.exitStatus() == QProcess::CrashExit ) + { + return { false, tr( "The output from the crash was: " ) + output }; + } + + auto exitcode = process.exitCode(); + if ( exitcode != 0 ) + { + cWarning() << "Failed to run zpool create. The output was: " + output; + return { false, tr( "Failed to create zpool on " ) + deviceName }; + } + + return { true, QString() }; +} + Calamares::JobResult ZfsJob::exec() { @@ -78,15 +130,20 @@ ZfsJob::exec() continue; } - // Create the zpool - // zfs doesn't wait for the devices so pause for 2 seconds to ensure we give time for the device files to be created - auto r - = system->runCommand( { "sh", "-c", "sleep 2 ; zpool create " + m_poolOptions + " " + m_poolName + " " + deviceName }, - std::chrono::seconds( 10 ) ); - if ( r.getExitCode() != 0 ) + ZfsResult zfsResult; + if ( gs->contains( "encryptphrase" ) ) { - return Calamares::JobResult::error( tr( "zpool failure" ), - tr( "Failed to create zpool on " + deviceName.toLocal8Bit() ) ); + zfsResult + = CreateZpool( deviceName, m_poolName, m_poolOptions, true, gs->value( "encryptphrase" ).toString() ); + } + else + { + zfsResult = CreateZpool( deviceName, m_poolName, m_poolOptions, false ); + } + + if ( !zfsResult.success ) + { + return Calamares::JobResult::error( tr( "Failed to create zpool" ), zfsResult.failureMessage ); } // Create the datasets @@ -105,12 +162,12 @@ ZfsJob::exec() // Create the dataset. We set canmount=no regardless of the setting for now. // It is modified to the correct value in the mount module to ensure mount order is maintained - r = system->runCommand( { "sh", - "-c", - "zfs create " + m_datasetOptions - + " -o canmount=off -o mountpoint=" + datasetMap[ "mountpoint" ].toString() - + " " + m_poolName + "/" + datasetMap[ "dsName" ].toString() }, - std::chrono::seconds( 10 ) ); + auto r = system->runCommand( { "sh", + "-c", + "zfs create " + m_datasetOptions + " -o canmount=off -o mountpoint=" + + datasetMap[ "mountpoint" ].toString() + " " + m_poolName + "/" + + datasetMap[ "dsName" ].toString() }, + std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) { cWarning() << "Failed to create dataset" << datasetMap[ "dsName" ].toString(); @@ -124,7 +181,7 @@ ZfsJob::exec() // If the list isn't empty, add it to global storage if ( !datasetList.isEmpty() ) { - Calamares::JobQueue::instance()->globalStorage()->insert( "zfs", datasetList ); + gs->insert( "zfs", datasetList ); } } diff --git a/src/modules/zfs/ZfsJob.h b/src/modules/zfs/ZfsJob.h index 87646a227..b2feb9e87 100644 --- a/src/modules/zfs/ZfsJob.h +++ b/src/modules/zfs/ZfsJob.h @@ -20,6 +20,11 @@ #include "DllMacro.h" +struct ZfsResult { + bool success; + QString failureMessage; +}; + /** @brief Create zpools and zfs datasets * */ @@ -43,6 +48,20 @@ private: QString m_datasetOptions; QList m_datasets; + + /** @brief Creates a zpool based on the provided arguments + * + * Creates a zpool + * @p deviceName is a full path to the device the zpool should be created on + * @p poolName is a string containing the name of the pool to create + * @p poolOptions are the options to pass to zpool create + * @p encrypt is a boolean which determines if the pool should be encrypted + * @p passphrase is a string continaing the passphrase + * + */ + ZfsResult CreateZpool(QString deviceName, QString poolName, QString poolOptions, bool encrypt, QString passphrase = QString() ) const; + + }; CALAMARES_PLUGIN_FACTORY_DECLARATION( ZfsJobFactory ) From 06b6263c24894e9c12d7296a3bf580e100ef7822 Mon Sep 17 00:00:00 2001 From: dalto Date: Tue, 9 Nov 2021 07:42:39 -0600 Subject: [PATCH 12/23] [zfs] Export zpool so it can later be mounted at the correct location --- src/modules/zfs/ZfsJob.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp index af57fa5cc..b04dfd473 100644 --- a/src/modules/zfs/ZfsJob.cpp +++ b/src/modules/zfs/ZfsJob.cpp @@ -183,6 +183,13 @@ ZfsJob::exec() { gs->insert( "zfs", datasetList ); } + + // Export the zpool so it can be reimported at the correct local later + auto r = system->runCommand( { "zpool", "export", m_poolName }, std::chrono::seconds( 10 ) ); + if ( r.getExitCode() != 0 ) + { + cWarning() << "Failed to export pool" << m_poolName; + } } return Calamares::JobResult::ok(); From 90452147a343979c9fe7d03672e5bd5db1a08482 Mon Sep 17 00:00:00 2001 From: dalto Date: Tue, 9 Nov 2021 14:53:44 -0600 Subject: [PATCH 13/23] [mount] Fix zfs code and add support for encryption --- src/modules/mount/main.py | 62 +++++++++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 12 deletions(-) diff --git a/src/modules/mount/main.py b/src/modules/mount/main.py index 492525f0e..813114f0a 100644 --- a/src/modules/mount/main.py +++ b/src/modules/mount/main.py @@ -61,6 +61,21 @@ def get_btrfs_subvolumes(partitions): return btrfs_subvolumes +def parse_global_storage(gs_value): + """ + Something in the chain is converting on and off to true and false. This converts it back. + + :param gs_value: The value from global storage + :return: + """ + if gs_value is True: + return "on" + elif gs_value is False: + return "off" + else: + return gs_value + + def mount_partition(root_mount_point, partition, partitions): """ Do a single mount of @p partition inside @p root_mount_point. @@ -136,22 +151,45 @@ def mount_partition(root_mount_point, partition, partitions): zfs = libcalamares.globalstorage.value("zfs") if not zfs: - libcalamares.utils.error("Failed to locate zfs dataset list") + libcalamares.utils.warning("Failed to locate zfs dataset list") + raise Exception("Internal error mounting zfs datasets") + + # import the zpool + import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, zfs[0]['zpool']]) + if import_result.returncode != 0: + raise Exception("Failed to import zpool") + + passphrase = libcalamares.globalstorage.value("encryptphrase") + if passphrase: + # The zpool is encrypted, we need to unlock it + loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + zfs[0]['zpool']]) + if loadkey_result.returncode != 0: + raise Exception("Failed to unlock zpool") + + # first we handle the / dataset if there is one + for dataset in zfs: + if dataset['mountpoint'] == '/': + # Properly set the canmount field from global storage + can_mount = parse_global_storage(dataset['canMount']) + set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, + dataset['zpool'] + '/' + dataset['dsName']]) + if set_result.returncode != 0: + raise Exception("Failed to set zfs mountpoint") + if dataset['canMount'] == 'noauto': + mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) + if mount_result.returncode != 0: + raise Exception("Failed to mount root dataset") # Set the canmount property for each dataset. This will effectively mount the dataset for dataset in zfs: - try: - if dataset['canmount'] == 'noauto' or dataset['canmount'] == 'on': - subprocess.check_call(['zfs', 'set', 'canmount=' + dataset['canmount'], - dataset['zpool'] + '/' + dataset['dsName']]) + # We already handled the / mountpoint above + if dataset['mountpoint'] != '/': + can_mount = parse_global_storage(dataset['canMount']) - # It is common for the / mountpoint to be set to noauto since it is mounted by the initrd - # If this is the case we need to manually mount it here - if dataset['mountpoint'] == '/' and dataset['canmount'] == 'noauto': - subprocess.check_call(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) - except KeyError: - # This should be impossible - libcalamares.utils.error("Internal error handling zfs dataset") + set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, + dataset['zpool'] + '/' + dataset['dsName']]) + if set_result.returncode != 0: + raise Exception("Failed to set zfs mountpoint") def run(): From 91762e3df4f44b4bca47b9a8ae280a96109d9e0c Mon Sep 17 00:00:00 2001 From: dalto Date: Tue, 9 Nov 2021 14:54:46 -0600 Subject: [PATCH 14/23] [zfs] Fix typo and add missing continue --- src/modules/zfs/ZfsJob.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp index b04dfd473..8790b1fff 100644 --- a/src/modules/zfs/ZfsJob.cpp +++ b/src/modules/zfs/ZfsJob.cpp @@ -171,6 +171,7 @@ ZfsJob::exec() if ( r.getExitCode() != 0 ) { cWarning() << "Failed to create dataset" << datasetMap[ "dsName" ].toString(); + continue; } // Add the dataset to the list for global storage @@ -184,7 +185,7 @@ ZfsJob::exec() gs->insert( "zfs", datasetList ); } - // Export the zpool so it can be reimported at the correct local later + // Export the zpool so it can be reimported at the correct location later auto r = system->runCommand( { "zpool", "export", m_poolName }, std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) { From 4bed079ebf57ba1c17585cade52298bd2910f931 Mon Sep 17 00:00:00 2001 From: dalto Date: Fri, 12 Nov 2021 16:06:06 -0600 Subject: [PATCH 15/23] Add support for multiple zpools --- src/modules/mount/main.py | 123 ++++++++++------- .../partition/gui/CreatePartitionDialog.cpp | 26 +++- src/modules/zfs/ZfsJob.cpp | 124 ++++++++++++++---- 3 files changed, 192 insertions(+), 81 deletions(-) diff --git a/src/modules/mount/main.py b/src/modules/mount/main.py index 813114f0a..20815969a 100644 --- a/src/modules/mount/main.py +++ b/src/modules/mount/main.py @@ -111,11 +111,79 @@ def mount_partition(root_mount_point, partition, partitions): if "luksMapperName" in partition: device = os.path.join("/dev/mapper", partition["luksMapperName"]) - if libcalamares.utils.mount(device, - mount_point, - fstype, - partition.get("options", "")) != 0: - libcalamares.utils.warning("Cannot mount {}".format(device)) + if fstype == "zfs": + zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo") + if not zfs_pool_list: + libcalamares.utils.warning("Failed to locate zfsPoolInfo data in global storage") + raise Exception("Internal error mounting zfs datasets") + + for zfs_pool in zfs_pool_list: + libcalamares.utils.warning("Poolname: " + zfs_pool["poolName"] + " mountpoint: " + zfs_pool["mountpoint"]) + if zfs_pool["mountpoint"] == partition["mountPoint"]: + pool_name = zfs_pool["poolName"] + ds_name = zfs_pool["dsName"]; + + # import the zpool + import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, pool_name]) + if import_result.returncode != 0: + raise Exception("Failed to import zpool") + + zfs_info_list = libcalamares.globalstorage.value("zfsInfo") + encrypt = False + if zfs_info_list: + for zfs_info in zfs_info_list: + if zfs_info["mountpoint"] == partition["mountPoint"] and zfs_info["encrypted"] is True: + encrypt = True + passphrase = zfs_info["passphrase"] + + if encrypt is True: + # The zpool is encrypted, we need to unlock it + loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + pool_name]) + if loadkey_result.returncode != 0: + raise Exception("Failed to unlock zpool") + + if partition["mountPoint"] == '/': + # Get the zfs dataset list from global storage + zfs = libcalamares.globalstorage.value("zfsDatasets") + + if not zfs: + libcalamares.utils.warning("Failed to locate zfs dataset list") + raise Exception("Internal error mounting zfs datasets") + + # first we handle the / dataset if there is one + for dataset in zfs: + if dataset['mountpoint'] == '/': + # Properly set the canmount field from global storage + can_mount = parse_global_storage(dataset['canMount']) + set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, + dataset['zpool'] + '/' + dataset['dsName']]) + if set_result.returncode != 0: + raise Exception("Failed to set zfs mountpoint") + if dataset['canMount'] == 'noauto': + mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) + if mount_result.returncode != 0: + raise Exception("Failed to mount root dataset") + + # Set the canmount property for each dataset. This will effectively mount the dataset + for dataset in zfs: + # We already handled the / mountpoint above + if dataset['mountpoint'] != '/': + can_mount = parse_global_storage(dataset['canMount']) + + set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, + dataset['zpool'] + '/' + dataset['dsName']]) + if set_result.returncode != 0: + raise Exception("Failed to set zfs mountpoint") + else: + set_result = subprocess.run(['zfs', 'set', 'canmount=on', pool_name + '/' + ds_name]) + if set_result.returncode != 0: + raise Exception("Failed to set zfs mountpoint") + else: # fstype == "zfs" + if libcalamares.utils.mount(device, + mount_point, + fstype, + partition.get("options", "")) != 0: + libcalamares.utils.warning("Cannot mount {}".format(device)) # Special handling for btrfs subvolumes. Create the subvolumes listed in mount.conf if fstype == "btrfs" and partition["mountPoint"] == '/': @@ -146,51 +214,6 @@ def mount_partition(root_mount_point, partition, partitions): ",".join([mount_option, partition.get("options", "")])) != 0: libcalamares.utils.warning("Cannot mount {}".format(device)) - if fstype == "zfs" and partition["mountPoint"] == '/': - # Get the zfs dataset list from global storage - zfs = libcalamares.globalstorage.value("zfs") - - if not zfs: - libcalamares.utils.warning("Failed to locate zfs dataset list") - raise Exception("Internal error mounting zfs datasets") - - # import the zpool - import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, zfs[0]['zpool']]) - if import_result.returncode != 0: - raise Exception("Failed to import zpool") - - passphrase = libcalamares.globalstorage.value("encryptphrase") - if passphrase: - # The zpool is encrypted, we need to unlock it - loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + zfs[0]['zpool']]) - if loadkey_result.returncode != 0: - raise Exception("Failed to unlock zpool") - - # first we handle the / dataset if there is one - for dataset in zfs: - if dataset['mountpoint'] == '/': - # Properly set the canmount field from global storage - can_mount = parse_global_storage(dataset['canMount']) - set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, - dataset['zpool'] + '/' + dataset['dsName']]) - if set_result.returncode != 0: - raise Exception("Failed to set zfs mountpoint") - if dataset['canMount'] == 'noauto': - mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) - if mount_result.returncode != 0: - raise Exception("Failed to mount root dataset") - - # Set the canmount property for each dataset. This will effectively mount the dataset - for dataset in zfs: - # We already handled the / mountpoint above - if dataset['mountpoint'] != '/': - can_mount = parse_global_storage(dataset['canMount']) - - set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, - dataset['zpool'] + '/' + dataset['dsName']]) - if set_result.returncode != 0: - raise Exception("Failed to set zfs mountpoint") - def run(): """ diff --git a/src/modules/partition/gui/CreatePartitionDialog.cpp b/src/modules/partition/gui/CreatePartitionDialog.cpp index e7d7751a1..6bde9a148 100644 --- a/src/modules/partition/gui/CreatePartitionDialog.cpp +++ b/src/modules/partition/gui/CreatePartitionDialog.cpp @@ -255,11 +255,29 @@ CreatePartitionDialog::getNewlyCreatedPartition() m_parent, *m_device, m_role, fsType, fsLabel, first, last, PartitionTable::Flags() ); } - // For zfs, we let the zfs module handle the encryption but we need to make the passphrase available to that module - if( m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty() - && fsType == FileSystem::Zfs ) + // For zfs, we let the zfs module handle the encryption but we need to make the passphrase available to later modules + if ( fsType == FileSystem::Zfs ) { - Calamares::JobQueue::instance()->globalStorage()->insert( "encryptphrase", luksPassphrase ); + Calamares::GlobalStorage* storage = Calamares::JobQueue::instance()->globalStorage(); + QList< QVariant > zfsInfoList; + QVariantMap zfsInfo; + + // If this is not the first encrypted zfs partition, get the old list first + if ( storage->contains( "zfsInfo" ) ) + { + zfsInfoList = storage->value( "zfsInfo" ).toList(); + storage->remove( "zfsInfo" ); + } + + // Save the information subsequent modules will need + zfsInfo[ "encrypted" ] + = m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty(); + zfsInfo[ "passphrase" ] = luksPassphrase; + zfsInfo[ "mountpoint" ] = selectedMountPoint( m_ui->mountPointComboBox ); + + // Add it to the list and insert it into global storage + zfsInfoList.append( zfsInfo ); + storage->insert( "zfsInfo", zfsInfoList ); } if ( m_device->type() == Device::Type::LVM_Device ) diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp index 8790b1fff..26d96183c 100644 --- a/src/modules/zfs/ZfsJob.cpp +++ b/src/modules/zfs/ZfsJob.cpp @@ -101,6 +101,8 @@ ZfsJob::exec() const CalamaresUtils::System* system = CalamaresUtils::System::instance(); + QVariantList poolNames; + for ( auto& partition : qAsConst( partitions ) ) { QVariantMap pMap; @@ -130,15 +132,48 @@ ZfsJob::exec() continue; } + QString mountpoint = pMap[ "mountPoint" ].toString(); + if ( mountpoint.isEmpty() ) + { + continue; + } + + // Build a poolname off the mountpoint, this is not ideal but should work until there is UI built for zfs + QString poolName = m_poolName; + if ( mountpoint != '/' ) + { + QString suffix = mountpoint; + poolName += suffix.remove( QRegExp( "[^a-zA-Z\\d\\s]" ) ); + } + + if ( !gs->contains( "zfsInfo" ) && gs->value( "zfsInfo" ).canConvert( QVariant::List ) ) + { + return Calamares::JobResult::error( tr( "Internal data missing" ), tr( "Failed to create zpool" ) ); + } + + QVariantList zfsInfoList = gs->value( "zfsInfo" ).toList(); + + bool encrypt = false; + QString passphrase; + for ( const QVariant& zfsInfo : qAsConst( zfsInfoList ) ) + { + if ( zfsInfo.canConvert( QVariant::Map ) && zfsInfo.toMap().value( "encrypted" ).toBool() + && mountpoint == zfsInfo.toMap().value( "mountpoint" ) ) + { + encrypt = true; + passphrase = zfsInfo.toMap().value( "passphrase" ).toString(); + } + } + ZfsResult zfsResult; - if ( gs->contains( "encryptphrase" ) ) + if ( encrypt ) { zfsResult - = CreateZpool( deviceName, m_poolName, m_poolOptions, true, gs->value( "encryptphrase" ).toString() ); + = CreateZpool( deviceName, poolName, m_poolOptions, true, passphrase ); } else { - zfsResult = CreateZpool( deviceName, m_poolName, m_poolOptions, false ); + zfsResult = CreateZpool( deviceName, poolName, m_poolOptions, false ); } if ( !zfsResult.success ) @@ -146,53 +181,88 @@ ZfsJob::exec() return Calamares::JobResult::error( tr( "Failed to create zpool" ), zfsResult.failureMessage ); } - // Create the datasets - QVariantList datasetList; - for ( const auto& dataset : qAsConst( m_datasets ) ) - { - QVariantMap datasetMap = dataset.toMap(); + // Add the poolname, dataset name and mountpoint to the list + QVariantMap poolNameEntry; + poolNameEntry["poolName"] = poolName; + poolNameEntry["mountpoint"] = mountpoint; + poolNameEntry["dsName"] = "none"; - // Make sure all values are valid - if ( datasetMap[ "dsName" ].toString().isEmpty() || datasetMap[ "mountpoint" ].toString().isEmpty() - || datasetMap[ "canMount" ].toString().isEmpty() ) + + if ( mountpoint == '/' ) + { + // Create the datasets + QVariantList datasetList; + for ( const auto& dataset : qAsConst( m_datasets ) ) { - cWarning() << "Bad dataset entry"; - continue; + QVariantMap datasetMap = dataset.toMap(); + + // Make sure all values are valid + if ( datasetMap[ "dsName" ].toString().isEmpty() || datasetMap[ "mountpoint" ].toString().isEmpty() + || datasetMap[ "canMount" ].toString().isEmpty() ) + { + cWarning() << "Bad dataset entry"; + continue; + } + + // Create the dataset. We set canmount=no regardless of the setting for now. + // It is modified to the correct value in the mount module to ensure mount order is maintained + auto r = system->runCommand( { "sh", + "-c", + "zfs create " + m_datasetOptions + " -o canmount=off -o mountpoint=" + + datasetMap[ "mountpoint" ].toString() + " " + poolName + "/" + + datasetMap[ "dsName" ].toString() }, + std::chrono::seconds( 10 ) ); + if ( r.getExitCode() != 0 ) + { + cWarning() << "Failed to create dataset" << datasetMap[ "dsName" ].toString(); + continue; + } + + // Add the dataset to the list for global storage + datasetMap[ "zpool" ] = m_poolName; + datasetList.append( datasetMap ); } + // If the list isn't empty, add it to global storage + if ( !datasetList.isEmpty() ) + { + gs->insert( "zfsDatasets", datasetList ); + } + } + else + { // Create the dataset. We set canmount=no regardless of the setting for now. // It is modified to the correct value in the mount module to ensure mount order is maintained + QString dsName = mountpoint; + dsName.remove( QRegExp( "[^a-zA-Z\\d\\s]" ) ); auto r = system->runCommand( { "sh", "-c", "zfs create " + m_datasetOptions + " -o canmount=off -o mountpoint=" - + datasetMap[ "mountpoint" ].toString() + " " + m_poolName + "/" - + datasetMap[ "dsName" ].toString() }, + + mountpoint + " " + poolName + "/" + + dsName }, std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) { - cWarning() << "Failed to create dataset" << datasetMap[ "dsName" ].toString(); - continue; + cWarning() << "Failed to create dataset" << dsName; } - - // Add the dataset to the list for global storage - datasetMap[ "zpool" ] = m_poolName; - datasetList.append( datasetMap ); + poolNameEntry["dsName"] = dsName; } - // If the list isn't empty, add it to global storage - if ( !datasetList.isEmpty() ) - { - gs->insert( "zfs", datasetList ); - } + poolNames.append(poolNameEntry); // Export the zpool so it can be reimported at the correct location later - auto r = system->runCommand( { "zpool", "export", m_poolName }, std::chrono::seconds( 10 ) ); + auto r = system->runCommand( { "zpool", "export", poolName }, std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) { cWarning() << "Failed to export pool" << m_poolName; } } + if (!poolNames.isEmpty()) + { + gs->insert("zfsPoolInfo", poolNames); + } + return Calamares::JobResult::ok(); } From 8bdfcac0fbae62623c2cedc92439d0472ea72865 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 13 Nov 2021 09:31:23 -0600 Subject: [PATCH 16/23] [partition] Add support for zfs encryption when erase disk is selected --- src/modules/mount/main.py | 1 - .../partition/core/PartitionLayout.cpp | 22 ++++++++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/modules/mount/main.py b/src/modules/mount/main.py index 20815969a..f58b90e9f 100644 --- a/src/modules/mount/main.py +++ b/src/modules/mount/main.py @@ -118,7 +118,6 @@ def mount_partition(root_mount_point, partition, partitions): raise Exception("Internal error mounting zfs datasets") for zfs_pool in zfs_pool_list: - libcalamares.utils.warning("Poolname: " + zfs_pool["poolName"] + " mountpoint: " + zfs_pool["mountpoint"]) if zfs_pool["mountpoint"] == partition["mountPoint"]: pool_name = zfs_pool["poolName"] ds_name = zfs_pool["dsName"]; diff --git a/src/modules/partition/core/PartitionLayout.cpp b/src/modules/partition/core/PartitionLayout.cpp index 8ae904e92..f60952643 100644 --- a/src/modules/partition/core/PartitionLayout.cpp +++ b/src/modules/partition/core/PartitionLayout.cpp @@ -296,7 +296,9 @@ PartitionLayout::createPartitions( Device* dev, } Partition* part = nullptr; - if ( luksPassphrase.isEmpty() ) + + // Encryption for zfs is handled in the zfs module + if ( luksPassphrase.isEmpty() || correctFS( entry.partFileSystem ) == FileSystem::Zfs ) { part = KPMHelpers::createNewPartition( parent, *dev, @@ -319,6 +321,24 @@ PartitionLayout::createPartitions( Device* dev, luksPassphrase, KPM_PARTITION_FLAG( None ) ); } + + // For zfs, we need to make the passphrase available to later modules + if ( correctFS( entry.partFileSystem ) == FileSystem::Zfs ) + { + Calamares::GlobalStorage* storage = Calamares::JobQueue::instance()->globalStorage(); + QList< QVariant > zfsInfoList; + QVariantMap zfsInfo; + + // Save the information subsequent modules will need + zfsInfo[ "encrypted" ] = !luksPassphrase.isEmpty(); + zfsInfo[ "passphrase" ] = luksPassphrase; + zfsInfo[ "mountpoint" ] = entry.partMountPoint; + + // Add it to the list and insert it into global storage + zfsInfoList.append( zfsInfo ); + storage->insert( "zfsInfo", zfsInfoList ); + } + PartitionInfo::setFormat( part, true ); PartitionInfo::setMountPoint( part, entry.partMountPoint ); if ( !entry.partLabel.isEmpty() ) From cf20d6495bc72578fb2ba89509fd85b867c85aa7 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 13 Nov 2021 10:43:07 -0600 Subject: [PATCH 17/23] [partition] Ensure format is selected for existing zfs partitions --- src/modules/partition/gui/EditExistingPartitionDialog.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/modules/partition/gui/EditExistingPartitionDialog.cpp b/src/modules/partition/gui/EditExistingPartitionDialog.cpp index 04b9527f1..a3052b3b7 100644 --- a/src/modules/partition/gui/EditExistingPartitionDialog.cpp +++ b/src/modules/partition/gui/EditExistingPartitionDialog.cpp @@ -120,6 +120,12 @@ EditExistingPartitionDialog::EditExistingPartitionDialog( Device* device, m_ui->fileSystemLabel->setEnabled( m_ui->formatRadioButton->isChecked() ); m_ui->fileSystemComboBox->setEnabled( m_ui->formatRadioButton->isChecked() ); + // Force a format if the existing device is a zfs device since reusing a zpool isn't currently supported + m_ui->formatRadioButton->setChecked( m_partition->fileSystem().type() == FileSystem::Type::Zfs ); + m_ui->formatRadioButton->setEnabled( !( m_partition->fileSystem().type() == FileSystem::Type::Zfs ) ); + m_ui->keepRadioButton->setChecked( !( m_partition->fileSystem().type() == FileSystem::Type::Zfs ) ); + m_ui->keepRadioButton->setEnabled( !( m_partition->fileSystem().type() == FileSystem::Type::Zfs ) ); + setFlagList( *( m_ui->m_listFlags ), m_partition->availableFlags(), PartitionInfo::flags( m_partition ) ); } From cca38695ed8ac877537cd995c03289498abbb9cc Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 13 Nov 2021 11:13:39 -0600 Subject: [PATCH 18/23] [umount] Export zpools after unmounting --- src/modules/umount/main.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/modules/umount/main.py b/src/modules/umount/main.py index 0035a6b0f..795eb8d2b 100644 --- a/src/modules/umount/main.py +++ b/src/modules/umount/main.py @@ -49,6 +49,26 @@ def list_mounts(root_mount_point): return lst +def export_zpools(root_mount_point): + """ Exports the zpools if defined in global storage + + :param root_mount_point: The absolute path to the root of the install + :return: + """ + try: + zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo") + zfs_pool_list.sort(reverse=True, key=lambda x: x["poolName"]) + if zfs_pool_list: + for zfs_pool in zfs_pool_list: + import_result = subprocess.run(['zpool', 'export', root_mount_point, zfs_pool["poolName"]]) + if import_result.returncode != 0: + libcalamares.utils.warning("Failed to export zpool") + except Exception as e: + # If this fails it shouldn't cause the installation to fail + libcalamares.utils.warning("Received exception while exporting zpools: " + format(e)) + pass + + def run(): """ Unmounts given mountpoints in decreasing order. @@ -94,6 +114,8 @@ def run(): # in the exception object. subprocess.check_output(["umount", "-lv", mount_point], stderr=subprocess.STDOUT) + export_zpools(root_mount_point) + os.rmdir(root_mount_point) return None From c3524c07adf9574677fdffc062dd08425dcac819 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 13 Nov 2021 13:43:26 -0600 Subject: [PATCH 19/23] [zfs] Ensure overlapping datasets don't get created and code cleanup --- src/modules/zfs/ZfsJob.cpp | 90 ++++++++++++++++++++++++++++---------- src/modules/zfs/ZfsJob.h | 35 +++++++++++++-- 2 files changed, 99 insertions(+), 26 deletions(-) diff --git a/src/modules/zfs/ZfsJob.cpp b/src/modules/zfs/ZfsJob.cpp index 26d96183c..d0367f5f6 100644 --- a/src/modules/zfs/ZfsJob.cpp +++ b/src/modules/zfs/ZfsJob.cpp @@ -32,6 +32,43 @@ ZfsJob::prettyName() const return tr( "Create ZFS pools and datasets" ); } +QString +ZfsJob::AlphaNumeric( QString input ) const +{ + return input.remove( QRegExp( "[^a-zA-Z\\d\\s]" ) ); +} + +void +ZfsJob::CollectMountpoints( const QVariantList& partitions ) +{ + m_mountpoints.empty(); + for ( const QVariant& partition : partitions ) + { + if ( partition.canConvert( QVariant::Map ) ) + { + QString mountpoint = partition.toMap().value( "mountPoint" ).toString(); + if ( !mountpoint.isEmpty() ) + { + m_mountpoints.append( mountpoint ); + } + } + } +} + +bool +ZfsJob::IsMountpointOverlapping( const QString& targetMountpoint ) const +{ + for ( const QString& mountpoint : m_mountpoints ) + { + if ( mountpoint != '/' && targetMountpoint.startsWith( mountpoint ) ) + { + return true; + } + } + return false; +} + + ZfsResult ZfsJob::CreateZpool( QString deviceName, QString poolName, QString poolOptions, bool encrypt, QString passphrase ) const { @@ -85,7 +122,7 @@ ZfsJob::CreateZpool( QString deviceName, QString poolName, QString poolOptions, Calamares::JobResult ZfsJob::exec() { - QList< QVariant > partitions; + QVariantList partitions; Calamares::GlobalStorage* gs = Calamares::JobQueue::instance()->globalStorage(); if ( gs && gs->contains( "partitions" ) && gs->value( "partitions" ).canConvert( QVariant::List ) ) { @@ -132,27 +169,28 @@ ZfsJob::exec() continue; } + // If the partition doesn't have a mountpoint, skip it QString mountpoint = pMap[ "mountPoint" ].toString(); if ( mountpoint.isEmpty() ) { continue; } - // Build a poolname off the mountpoint, this is not ideal but should work until there is UI built for zfs + // Build a poolname off config pool name and the mountpoint, this is not ideal but should work until there is UI built for zfs QString poolName = m_poolName; if ( mountpoint != '/' ) { - QString suffix = mountpoint; - poolName += suffix.remove( QRegExp( "[^a-zA-Z\\d\\s]" ) ); + poolName += AlphaNumeric( mountpoint ); } + // Check to ensure the list of zfs info from the partition module is available and convert it to a list if ( !gs->contains( "zfsInfo" ) && gs->value( "zfsInfo" ).canConvert( QVariant::List ) ) { return Calamares::JobResult::error( tr( "Internal data missing" ), tr( "Failed to create zpool" ) ); } - QVariantList zfsInfoList = gs->value( "zfsInfo" ).toList(); + // Look in the zfs info list to see if this partition should be encrypted bool encrypt = false; QString passphrase; for ( const QVariant& zfsInfo : qAsConst( zfsInfoList ) ) @@ -165,11 +203,11 @@ ZfsJob::exec() } } + // Create the zpool ZfsResult zfsResult; if ( encrypt ) { - zfsResult - = CreateZpool( deviceName, poolName, m_poolOptions, true, passphrase ); + zfsResult = CreateZpool( deviceName, poolName, m_poolOptions, true, passphrase ); } else { @@ -181,16 +219,17 @@ ZfsJob::exec() return Calamares::JobResult::error( tr( "Failed to create zpool" ), zfsResult.failureMessage ); } - // Add the poolname, dataset name and mountpoint to the list + // Save the poolname, dataset name and mountpoint. It will later be added to a list and placed in global storage. + // This will be used by later modules including mount and umount QVariantMap poolNameEntry; - poolNameEntry["poolName"] = poolName; - poolNameEntry["mountpoint"] = mountpoint; - poolNameEntry["dsName"] = "none"; - + poolNameEntry[ "poolName" ] = poolName; + poolNameEntry[ "mountpoint" ] = mountpoint; + poolNameEntry[ "dsName" ] = "none"; + // If the mountpoint is /, create datasets per the config file. If not, create a single dataset mounted at the partitions mountpoint if ( mountpoint == '/' ) { - // Create the datasets + CollectMountpoints( partitions ); QVariantList datasetList; for ( const auto& dataset : qAsConst( m_datasets ) ) { @@ -204,6 +243,12 @@ ZfsJob::exec() continue; } + // We should skip this dataset if it conflicts with a permanent mountpoint + if ( IsMountpointOverlapping( datasetMap[ "mountpoint" ].toString() ) ) + { + continue; + } + // Create the dataset. We set canmount=no regardless of the setting for now. // It is modified to the correct value in the mount module to ensure mount order is maintained auto r = system->runCommand( { "sh", @@ -218,7 +263,8 @@ ZfsJob::exec() continue; } - // Add the dataset to the list for global storage + // Add the dataset to the list for global storage this information is used later to properly set + // the mount options on each dataset datasetMap[ "zpool" ] = m_poolName; datasetList.append( datasetMap ); } @@ -231,24 +277,23 @@ ZfsJob::exec() } else { - // Create the dataset. We set canmount=no regardless of the setting for now. + // This is a zpool with a single dataset We again set canmount=no regardless of the desired setting. // It is modified to the correct value in the mount module to ensure mount order is maintained QString dsName = mountpoint; - dsName.remove( QRegExp( "[^a-zA-Z\\d\\s]" ) ); + dsName = AlphaNumeric( mountpoint ); auto r = system->runCommand( { "sh", "-c", "zfs create " + m_datasetOptions + " -o canmount=off -o mountpoint=" - + mountpoint + " " + poolName + "/" - + dsName }, + + mountpoint + " " + poolName + "/" + dsName }, std::chrono::seconds( 10 ) ); if ( r.getExitCode() != 0 ) { cWarning() << "Failed to create dataset" << dsName; } - poolNameEntry["dsName"] = dsName; + poolNameEntry[ "dsName" ] = dsName; } - poolNames.append(poolNameEntry); + poolNames.append( poolNameEntry ); // Export the zpool so it can be reimported at the correct location later auto r = system->runCommand( { "zpool", "export", poolName }, std::chrono::seconds( 10 ) ); @@ -258,9 +303,10 @@ ZfsJob::exec() } } - if (!poolNames.isEmpty()) + // Put the list of zpools into global storage + if ( !poolNames.isEmpty() ) { - gs->insert("zfsPoolInfo", poolNames); + gs->insert( "zfsPoolInfo", poolNames ); } return Calamares::JobResult::ok(); diff --git a/src/modules/zfs/ZfsJob.h b/src/modules/zfs/ZfsJob.h index b2feb9e87..4744954c2 100644 --- a/src/modules/zfs/ZfsJob.h +++ b/src/modules/zfs/ZfsJob.h @@ -20,7 +20,8 @@ #include "DllMacro.h" -struct ZfsResult { +struct ZfsResult +{ bool success; QString failureMessage; }; @@ -46,12 +47,12 @@ private: QString m_poolName; QString m_poolOptions; QString m_datasetOptions; + QStringList m_mountpoints; - QList m_datasets; + QList< QVariant > m_datasets; /** @brief Creates a zpool based on the provided arguments * - * Creates a zpool * @p deviceName is a full path to the device the zpool should be created on * @p poolName is a string containing the name of the pool to create * @p poolOptions are the options to pass to zpool create @@ -59,9 +60,35 @@ private: * @p passphrase is a string continaing the passphrase * */ - ZfsResult CreateZpool(QString deviceName, QString poolName, QString poolOptions, bool encrypt, QString passphrase = QString() ) const; + ZfsResult CreateZpool( QString deviceName, + QString poolName, + QString poolOptions, + bool encrypt, + QString passphrase = QString() ) const; + /** @brief Returns the alphanumeric portion of a string + * + * @p input is the input string + * + */ + QString AlphaNumeric( QString input ) const; + /** @brief Collects all the mountpoints from the partitions + * + * Iterates over @p partitions to gather each mountpoint present + * in the list of maps and populates m_mountpoints + * + */ + void CollectMountpoints( const QVariantList& partitions ); + + /** @brief Check to see if a given mountpoint overlaps with one of the defined moutnpoints + * + * Iterates over m_partitions and checks if @p targetMountpoint overlaps with them by comparing + * the beginning of targetMountpoint with all the values in m_mountpoints. Of course, / is excluded + * since all the mountpoints would begin with / + * + */ + bool IsMountpointOverlapping( const QString& targetMountpoint ) const; }; CALAMARES_PLUGIN_FACTORY_DECLARATION( ZfsJobFactory ) From af4b87a4cccd14548a0cfac47aa70ce8dd56f412 Mon Sep 17 00:00:00 2001 From: dalto Date: Sat, 13 Nov 2021 14:09:16 -0600 Subject: [PATCH 20/23] [mount] Move zfs code into a seperate function to improve readability --- src/modules/mount/main.py | 148 ++++++++++++++++++++------------------ 1 file changed, 80 insertions(+), 68 deletions(-) diff --git a/src/modules/mount/main.py b/src/modules/mount/main.py index f58b90e9f..72c2015c6 100644 --- a/src/modules/mount/main.py +++ b/src/modules/mount/main.py @@ -62,10 +62,9 @@ def get_btrfs_subvolumes(partitions): def parse_global_storage(gs_value): - """ - Something in the chain is converting on and off to true and false. This converts it back. + """ Something in the chain is converting on and off to true and false. This converts it back. - :param gs_value: The value from global storage + :param gs_value: The value from global storage which needs to be fixed :return: """ if gs_value is True: @@ -76,6 +75,83 @@ def parse_global_storage(gs_value): return gs_value +def mount_zfs(root_mount_point, partition): + """ Mounts a zfs partition at @p root_mount_point + + :param root_mount_point: The absolute path to the root of the install + :param partition: The partition map from global storage for this partition + :return: + """ + # Get the list of zpools from global storage + zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo") + if not zfs_pool_list: + libcalamares.utils.warning("Failed to locate zfsPoolInfo data in global storage") + raise Exception("Internal error mounting zfs datasets") + + # Find the zpool matching this partition + for zfs_pool in zfs_pool_list: + if zfs_pool["mountpoint"] == partition["mountPoint"]: + pool_name = zfs_pool["poolName"] + ds_name = zfs_pool["dsName"] + + # import the zpool + import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, pool_name]) + if import_result.returncode != 0: + raise Exception("Failed to import zpool") + + # Get the encrpytion information from global storage + zfs_info_list = libcalamares.globalstorage.value("zfsInfo") + encrypt = False + if zfs_info_list: + for zfs_info in zfs_info_list: + if zfs_info["mountpoint"] == partition["mountPoint"] and zfs_info["encrypted"] is True: + encrypt = True + passphrase = zfs_info["passphrase"] + + if encrypt is True: + # The zpool is encrypted, we need to unlock it + loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + pool_name]) + if loadkey_result.returncode != 0: + raise Exception("Failed to unlock zpool") + + if partition["mountPoint"] == '/': + # Get the zfs dataset list from global storage + zfs = libcalamares.globalstorage.value("zfsDatasets") + + if not zfs: + libcalamares.utils.warning("Failed to locate zfs dataset list") + raise Exception("Internal error mounting zfs datasets") + + # first we handle the / dataset if there is one + for dataset in zfs: + if dataset['mountpoint'] == '/': + # Properly set the canmount field from global storage + can_mount = parse_global_storage(dataset['canMount']) + set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, + dataset['zpool'] + '/' + dataset['dsName']]) + if set_result.returncode != 0: + raise Exception("Failed to set zfs mountpoint") + if dataset['canMount'] == 'noauto': + mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) + if mount_result.returncode != 0: + raise Exception("Failed to mount root dataset") + + # Set the canmount property for each dataset. This will effectively mount the dataset + for dataset in zfs: + # We already handled the / mountpoint above + if dataset['mountpoint'] != '/': + can_mount = parse_global_storage(dataset['canMount']) + + set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, + dataset['zpool'] + '/' + dataset['dsName']]) + if set_result.returncode != 0: + raise Exception("Failed to set zfs mountpoint") + else: + set_result = subprocess.run(['zfs', 'set', 'canmount=on', pool_name + '/' + ds_name]) + if set_result.returncode != 0: + raise Exception("Failed to set zfs mountpoint") + + def mount_partition(root_mount_point, partition, partitions): """ Do a single mount of @p partition inside @p root_mount_point. @@ -112,71 +188,7 @@ def mount_partition(root_mount_point, partition, partitions): device = os.path.join("/dev/mapper", partition["luksMapperName"]) if fstype == "zfs": - zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo") - if not zfs_pool_list: - libcalamares.utils.warning("Failed to locate zfsPoolInfo data in global storage") - raise Exception("Internal error mounting zfs datasets") - - for zfs_pool in zfs_pool_list: - if zfs_pool["mountpoint"] == partition["mountPoint"]: - pool_name = zfs_pool["poolName"] - ds_name = zfs_pool["dsName"]; - - # import the zpool - import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, pool_name]) - if import_result.returncode != 0: - raise Exception("Failed to import zpool") - - zfs_info_list = libcalamares.globalstorage.value("zfsInfo") - encrypt = False - if zfs_info_list: - for zfs_info in zfs_info_list: - if zfs_info["mountpoint"] == partition["mountPoint"] and zfs_info["encrypted"] is True: - encrypt = True - passphrase = zfs_info["passphrase"] - - if encrypt is True: - # The zpool is encrypted, we need to unlock it - loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + pool_name]) - if loadkey_result.returncode != 0: - raise Exception("Failed to unlock zpool") - - if partition["mountPoint"] == '/': - # Get the zfs dataset list from global storage - zfs = libcalamares.globalstorage.value("zfsDatasets") - - if not zfs: - libcalamares.utils.warning("Failed to locate zfs dataset list") - raise Exception("Internal error mounting zfs datasets") - - # first we handle the / dataset if there is one - for dataset in zfs: - if dataset['mountpoint'] == '/': - # Properly set the canmount field from global storage - can_mount = parse_global_storage(dataset['canMount']) - set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, - dataset['zpool'] + '/' + dataset['dsName']]) - if set_result.returncode != 0: - raise Exception("Failed to set zfs mountpoint") - if dataset['canMount'] == 'noauto': - mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) - if mount_result.returncode != 0: - raise Exception("Failed to mount root dataset") - - # Set the canmount property for each dataset. This will effectively mount the dataset - for dataset in zfs: - # We already handled the / mountpoint above - if dataset['mountpoint'] != '/': - can_mount = parse_global_storage(dataset['canMount']) - - set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, - dataset['zpool'] + '/' + dataset['dsName']]) - if set_result.returncode != 0: - raise Exception("Failed to set zfs mountpoint") - else: - set_result = subprocess.run(['zfs', 'set', 'canmount=on', pool_name + '/' + ds_name]) - if set_result.returncode != 0: - raise Exception("Failed to set zfs mountpoint") + mount_zfs(root_mount_point, partition) else: # fstype == "zfs" if libcalamares.utils.mount(device, mount_point, From b6692341e7bc2d7444620cde0f52dffd69eba011 Mon Sep 17 00:00:00 2001 From: dalto Date: Sun, 14 Nov 2021 09:07:58 -0600 Subject: [PATCH 21/23] [fstab] Exclude zfs partitions from fstab --- src/modules/fstab/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/modules/fstab/main.py b/src/modules/fstab/main.py index 5312e7f5b..8ca4cf557 100644 --- a/src/modules/fstab/main.py +++ b/src/modules/fstab/main.py @@ -196,7 +196,7 @@ class FstabGenerator(object): dct = self.generate_fstab_line_info(mount_entry) if dct: self.print_fstab_line(dct, file=fstab_file) - else: + elif partition["fs"] != "zfs": # zfs partitions don't need an entry in fstab dct = self.generate_fstab_line_info(partition) if dct: self.print_fstab_line(dct, file=fstab_file) From abb6f73725073ef7f44c25de6b9400d84a48151b Mon Sep 17 00:00:00 2001 From: dalto Date: Mon, 15 Nov 2021 17:59:33 -0600 Subject: [PATCH 22/23] [partition] zfs changes from review feedback --- src/modules/bootloader/main.py | 43 ++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/src/modules/bootloader/main.py b/src/modules/bootloader/main.py index 10e7d0f47..54914beb7 100644 --- a/src/modules/bootloader/main.py +++ b/src/modules/bootloader/main.py @@ -108,16 +108,34 @@ def get_zfs_root(): # Find the root dataset for dataset in zfs: try: - if dataset['mountpoint'] == '/': + if dataset["mountpoint"] == "/": return dataset["zpool"] + "/" + dataset["dsname"] except KeyError: # This should be impossible - libcalamares.utils.error("Internal error handling zfs dataset") + libcalamares.utils.warning("Internal error handling zfs dataset") raise return None +def is_btrfs_root(partition): + """ Returns True if the partition object refers to a btrfs root filesystem + + :param partition: A partition map from global storage + :return: True if btrfs and root, False otherwise + """ + return partition["mountPoint"] == "/" and partition["fs"] == "btrfs" + + +def is_zfs_root(partition): + """ Returns True if the partition object refers to a zfs root filesystem + + :param partition: A partition map from global storage + :return: True if zfs and root, False otherwise + """ + return partition["mountPoint"] == "/" and partition["fs"] == "zfs" + + def create_systemd_boot_conf(install_path, efi_dir, uuid, entry, entry_name, kernel_type): """ Creates systemd-boot configuration files based on given parameters. @@ -162,17 +180,18 @@ def create_systemd_boot_conf(install_path, efi_dir, uuid, entry, entry_name, ker for partition in partitions: # systemd-boot with a BTRFS root filesystem needs to be told # about the root subvolume. - if partition["mountPoint"] == "/" and partition["fs"] == "btrfs": + if is_btrfs_root(partition): kernel_params.append("rootflags=subvol=@") # zfs needs to be told the location of the root dataset - if partition["mountPoint"] == "/" and partition["fs"] == "zfs": - zfs_root = get_zfs_root - if zfs_root is not None: - kernel_params.append("root=ZFS=" + zfs_root) + if is_zfs_root(partition): + zfs_root_path = get_zfs_root() + if zfs_root_path is not None: + kernel_params.append("root=ZFS=" + zfs_root_path) else: # Something is really broken if we get to this point - libcalamares.utils.error("Internal error handling zfs dataset") + libcalamares.utils.warning("Internal error handling zfs dataset") + raise Exception("Internal zfs data missing, please contact your distribution") if cryptdevice_params: kernel_params.extend(cryptdevice_params) @@ -363,14 +382,8 @@ def run_grub_mkconfig(output_file): libcalamares.utils.error("Failed to run grub-mkconfig, no partitions defined in global storage") return - # check for zfs - is_zfs = False - for partition in partitions: - if partition["mountPoint"] == "/" and partition["fs"] == "zfs": - is_zfs = True - # zfs needs an environment variable set for grub-mkconfig - if is_zfs: + if any([is_zfs_root(partition) for partition in partitions]): check_target_env_call(["sh", "-c", "echo ZPOOL_VDEV_NAME_PATH=1 >> /etc/environment"]) check_target_env_call(["sh", "-c", "ZPOOL_VDEV_NAME_PATH=1 " + libcalamares.job.configuration["grubMkconfig"] + " -o " + output_file]) From e861d8b319d1507e114c76159e32509522975f49 Mon Sep 17 00:00:00 2001 From: dalto Date: Mon, 15 Nov 2021 18:00:04 -0600 Subject: [PATCH 23/23] [mount] zfs changes from review feedback --- src/modules/mount/main.py | 76 ++++++++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 28 deletions(-) diff --git a/src/modules/mount/main.py b/src/modules/mount/main.py index 72c2015c6..056b0b415 100644 --- a/src/modules/mount/main.py +++ b/src/modules/mount/main.py @@ -26,6 +26,17 @@ _ = gettext.translation("calamares-python", fallback=True).gettext +class ZfsException(Exception): + """Exception raised when there is a problem with zfs + + Attributes: + message -- explanation of the error + """ + + def __init__(self, message): + self.message = message + + def pretty_name(): return _("Mounting partitions.") @@ -61,7 +72,7 @@ def get_btrfs_subvolumes(partitions): return btrfs_subvolumes -def parse_global_storage(gs_value): +def bool_to_zfs_command(gs_value): """ Something in the chain is converting on and off to true and false. This converts it back. :param gs_value: The value from global storage which needs to be fixed @@ -86,7 +97,7 @@ def mount_zfs(root_mount_point, partition): zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo") if not zfs_pool_list: libcalamares.utils.warning("Failed to locate zfsPoolInfo data in global storage") - raise Exception("Internal error mounting zfs datasets") + raise ZfsException(_("Internal error mounting zfs datasets")) # Find the zpool matching this partition for zfs_pool in zfs_pool_list: @@ -95,9 +106,10 @@ def mount_zfs(root_mount_point, partition): ds_name = zfs_pool["dsName"] # import the zpool - import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, pool_name]) - if import_result.returncode != 0: - raise Exception("Failed to import zpool") + try: + libcalamares.utils.host_env_process_output(["zpool", "import", "-R", root_mount_point, pool_name], None) + except subprocess.CalledProcessError: + raise ZfsException(_("Failed to import zpool")) # Get the encrpytion information from global storage zfs_info_list = libcalamares.globalstorage.value("zfsInfo") @@ -110,9 +122,11 @@ def mount_zfs(root_mount_point, partition): if encrypt is True: # The zpool is encrypted, we need to unlock it - loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + pool_name]) - if loadkey_result.returncode != 0: - raise Exception("Failed to unlock zpool") + try: + libcalamares.utils.host_env_process_output(["sh", "-c", + "echo \"" + passphrase + "\" | zfs load-key " + pool_name], None) + except subprocess.CalledProcessError: + raise ZfsException(_("Failed to unlock zpool")) if partition["mountPoint"] == '/': # Get the zfs dataset list from global storage @@ -120,36 +134,39 @@ def mount_zfs(root_mount_point, partition): if not zfs: libcalamares.utils.warning("Failed to locate zfs dataset list") - raise Exception("Internal error mounting zfs datasets") + raise ZfsException(_("Internal error mounting zfs datasets")) # first we handle the / dataset if there is one for dataset in zfs: if dataset['mountpoint'] == '/': # Properly set the canmount field from global storage - can_mount = parse_global_storage(dataset['canMount']) - set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, - dataset['zpool'] + '/' + dataset['dsName']]) - if set_result.returncode != 0: - raise Exception("Failed to set zfs mountpoint") - if dataset['canMount'] == 'noauto': - mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']]) + can_mount = bool_to_zfs_command(dataset['canMount']) + try: + libcalamares.utils.host_env_process_output(["zfs", "set", "canmount=" + can_mount, + dataset["zpool"] + "/" + dataset["dsName"]], None) + except subprocess.CalledProcessError: + raise ZfsException(_("Failed to set zfs mountpoint")) + if dataset["canMount"] == "noauto": + mount_result = subprocess.run(["zfs", "mount", dataset["zpool"] + '/' + dataset["dsName"]]) if mount_result.returncode != 0: - raise Exception("Failed to mount root dataset") + raise ZfsException(_("Failed to mount root dataset")) # Set the canmount property for each dataset. This will effectively mount the dataset for dataset in zfs: # We already handled the / mountpoint above - if dataset['mountpoint'] != '/': - can_mount = parse_global_storage(dataset['canMount']) + if dataset["mountpoint"] != '/': + can_mount = bool_to_zfs_command(dataset["canMount"]) - set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount, - dataset['zpool'] + '/' + dataset['dsName']]) - if set_result.returncode != 0: - raise Exception("Failed to set zfs mountpoint") + try: + libcalamares.utils.host_env_process_output(["zfs", "set", "canmount=" + can_mount, + dataset["zpool"] + "/" + dataset["dsName"]], None) + except subprocess.CalledProcessError: + raise ZfsException(_("Failed to set zfs mountpoint")) else: - set_result = subprocess.run(['zfs', 'set', 'canmount=on', pool_name + '/' + ds_name]) - if set_result.returncode != 0: - raise Exception("Failed to set zfs mountpoint") + try: + libcalamares.utils.host_env_process_output(["zfs", "set", "canmount=on", pool_name + "/" + ds_name], None) + except subprocess.CalledProcessError: + raise ZfsException(_("Failed to set zfs mountpoint")) def mount_partition(root_mount_point, partition, partitions): @@ -255,8 +272,11 @@ def run(): # under /tmp, we make sure /tmp is mounted before the partition) mountable_partitions = [ p for p in partitions + extra_mounts if "mountPoint" in p and p["mountPoint"] ] mountable_partitions.sort(key=lambda x: x["mountPoint"]) - for partition in mountable_partitions: - mount_partition(root_mount_point, partition, partitions) + try: + for partition in mountable_partitions: + mount_partition(root_mount_point, partition, partitions) + except ZfsException as ze: + return _("zfs mounting error"), ze.message libcalamares.globalstorage.insert("rootMountPoint", root_mount_point)