Add support for multiple zpools

This commit is contained in:
dalto 2021-11-12 16:06:06 -06:00
parent 91762e3df4
commit 4bed079ebf
3 changed files with 192 additions and 81 deletions

View File

@ -111,11 +111,79 @@ def mount_partition(root_mount_point, partition, partitions):
if "luksMapperName" in partition:
device = os.path.join("/dev/mapper", partition["luksMapperName"])
if libcalamares.utils.mount(device,
mount_point,
fstype,
partition.get("options", "")) != 0:
libcalamares.utils.warning("Cannot mount {}".format(device))
if fstype == "zfs":
zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo")
if not zfs_pool_list:
libcalamares.utils.warning("Failed to locate zfsPoolInfo data in global storage")
raise Exception("Internal error mounting zfs datasets")
for zfs_pool in zfs_pool_list:
libcalamares.utils.warning("Poolname: " + zfs_pool["poolName"] + " mountpoint: " + zfs_pool["mountpoint"])
if zfs_pool["mountpoint"] == partition["mountPoint"]:
pool_name = zfs_pool["poolName"]
ds_name = zfs_pool["dsName"];
# import the zpool
import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, pool_name])
if import_result.returncode != 0:
raise Exception("Failed to import zpool")
zfs_info_list = libcalamares.globalstorage.value("zfsInfo")
encrypt = False
if zfs_info_list:
for zfs_info in zfs_info_list:
if zfs_info["mountpoint"] == partition["mountPoint"] and zfs_info["encrypted"] is True:
encrypt = True
passphrase = zfs_info["passphrase"]
if encrypt is True:
# The zpool is encrypted, we need to unlock it
loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + pool_name])
if loadkey_result.returncode != 0:
raise Exception("Failed to unlock zpool")
if partition["mountPoint"] == '/':
# Get the zfs dataset list from global storage
zfs = libcalamares.globalstorage.value("zfsDatasets")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
raise Exception("Internal error mounting zfs datasets")
# first we handle the / dataset if there is one
for dataset in zfs:
if dataset['mountpoint'] == '/':
# Properly set the canmount field from global storage
can_mount = parse_global_storage(dataset['canMount'])
set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount,
dataset['zpool'] + '/' + dataset['dsName']])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
if dataset['canMount'] == 'noauto':
mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']])
if mount_result.returncode != 0:
raise Exception("Failed to mount root dataset")
# Set the canmount property for each dataset. This will effectively mount the dataset
for dataset in zfs:
# We already handled the / mountpoint above
if dataset['mountpoint'] != '/':
can_mount = parse_global_storage(dataset['canMount'])
set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount,
dataset['zpool'] + '/' + dataset['dsName']])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
else:
set_result = subprocess.run(['zfs', 'set', 'canmount=on', pool_name + '/' + ds_name])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
else: # fstype == "zfs"
if libcalamares.utils.mount(device,
mount_point,
fstype,
partition.get("options", "")) != 0:
libcalamares.utils.warning("Cannot mount {}".format(device))
# Special handling for btrfs subvolumes. Create the subvolumes listed in mount.conf
if fstype == "btrfs" and partition["mountPoint"] == '/':
@ -146,51 +214,6 @@ def mount_partition(root_mount_point, partition, partitions):
",".join([mount_option, partition.get("options", "")])) != 0:
libcalamares.utils.warning("Cannot mount {}".format(device))
if fstype == "zfs" and partition["mountPoint"] == '/':
# Get the zfs dataset list from global storage
zfs = libcalamares.globalstorage.value("zfs")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
raise Exception("Internal error mounting zfs datasets")
# import the zpool
import_result = subprocess.run(['zpool', 'import', '-R', root_mount_point, zfs[0]['zpool']])
if import_result.returncode != 0:
raise Exception("Failed to import zpool")
passphrase = libcalamares.globalstorage.value("encryptphrase")
if passphrase:
# The zpool is encrypted, we need to unlock it
loadkey_result = subprocess.run(['sh', '-c', 'echo "' + passphrase + '" | zfs load-key ' + zfs[0]['zpool']])
if loadkey_result.returncode != 0:
raise Exception("Failed to unlock zpool")
# first we handle the / dataset if there is one
for dataset in zfs:
if dataset['mountpoint'] == '/':
# Properly set the canmount field from global storage
can_mount = parse_global_storage(dataset['canMount'])
set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount,
dataset['zpool'] + '/' + dataset['dsName']])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
if dataset['canMount'] == 'noauto':
mount_result = subprocess.run(['zfs', 'mount', dataset['zpool'] + '/' + dataset['dsName']])
if mount_result.returncode != 0:
raise Exception("Failed to mount root dataset")
# Set the canmount property for each dataset. This will effectively mount the dataset
for dataset in zfs:
# We already handled the / mountpoint above
if dataset['mountpoint'] != '/':
can_mount = parse_global_storage(dataset['canMount'])
set_result = subprocess.run(['zfs', 'set', 'canmount=' + can_mount,
dataset['zpool'] + '/' + dataset['dsName']])
if set_result.returncode != 0:
raise Exception("Failed to set zfs mountpoint")
def run():
"""

View File

@ -255,11 +255,29 @@ CreatePartitionDialog::getNewlyCreatedPartition()
m_parent, *m_device, m_role, fsType, fsLabel, first, last, PartitionTable::Flags() );
}
// For zfs, we let the zfs module handle the encryption but we need to make the passphrase available to that module
if( m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty()
&& fsType == FileSystem::Zfs )
// For zfs, we let the zfs module handle the encryption but we need to make the passphrase available to later modules
if ( fsType == FileSystem::Zfs )
{
Calamares::JobQueue::instance()->globalStorage()->insert( "encryptphrase", luksPassphrase );
Calamares::GlobalStorage* storage = Calamares::JobQueue::instance()->globalStorage();
QList< QVariant > zfsInfoList;
QVariantMap zfsInfo;
// If this is not the first encrypted zfs partition, get the old list first
if ( storage->contains( "zfsInfo" ) )
{
zfsInfoList = storage->value( "zfsInfo" ).toList();
storage->remove( "zfsInfo" );
}
// Save the information subsequent modules will need
zfsInfo[ "encrypted" ]
= m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty();
zfsInfo[ "passphrase" ] = luksPassphrase;
zfsInfo[ "mountpoint" ] = selectedMountPoint( m_ui->mountPointComboBox );
// Add it to the list and insert it into global storage
zfsInfoList.append( zfsInfo );
storage->insert( "zfsInfo", zfsInfoList );
}
if ( m_device->type() == Device::Type::LVM_Device )

View File

@ -101,6 +101,8 @@ ZfsJob::exec()
const CalamaresUtils::System* system = CalamaresUtils::System::instance();
QVariantList poolNames;
for ( auto& partition : qAsConst( partitions ) )
{
QVariantMap pMap;
@ -130,15 +132,48 @@ ZfsJob::exec()
continue;
}
QString mountpoint = pMap[ "mountPoint" ].toString();
if ( mountpoint.isEmpty() )
{
continue;
}
// Build a poolname off the mountpoint, this is not ideal but should work until there is UI built for zfs
QString poolName = m_poolName;
if ( mountpoint != '/' )
{
QString suffix = mountpoint;
poolName += suffix.remove( QRegExp( "[^a-zA-Z\\d\\s]" ) );
}
if ( !gs->contains( "zfsInfo" ) && gs->value( "zfsInfo" ).canConvert( QVariant::List ) )
{
return Calamares::JobResult::error( tr( "Internal data missing" ), tr( "Failed to create zpool" ) );
}
QVariantList zfsInfoList = gs->value( "zfsInfo" ).toList();
bool encrypt = false;
QString passphrase;
for ( const QVariant& zfsInfo : qAsConst( zfsInfoList ) )
{
if ( zfsInfo.canConvert( QVariant::Map ) && zfsInfo.toMap().value( "encrypted" ).toBool()
&& mountpoint == zfsInfo.toMap().value( "mountpoint" ) )
{
encrypt = true;
passphrase = zfsInfo.toMap().value( "passphrase" ).toString();
}
}
ZfsResult zfsResult;
if ( gs->contains( "encryptphrase" ) )
if ( encrypt )
{
zfsResult
= CreateZpool( deviceName, m_poolName, m_poolOptions, true, gs->value( "encryptphrase" ).toString() );
= CreateZpool( deviceName, poolName, m_poolOptions, true, passphrase );
}
else
{
zfsResult = CreateZpool( deviceName, m_poolName, m_poolOptions, false );
zfsResult = CreateZpool( deviceName, poolName, m_poolOptions, false );
}
if ( !zfsResult.success )
@ -146,53 +181,88 @@ ZfsJob::exec()
return Calamares::JobResult::error( tr( "Failed to create zpool" ), zfsResult.failureMessage );
}
// Create the datasets
QVariantList datasetList;
for ( const auto& dataset : qAsConst( m_datasets ) )
{
QVariantMap datasetMap = dataset.toMap();
// Add the poolname, dataset name and mountpoint to the list
QVariantMap poolNameEntry;
poolNameEntry["poolName"] = poolName;
poolNameEntry["mountpoint"] = mountpoint;
poolNameEntry["dsName"] = "none";
// Make sure all values are valid
if ( datasetMap[ "dsName" ].toString().isEmpty() || datasetMap[ "mountpoint" ].toString().isEmpty()
|| datasetMap[ "canMount" ].toString().isEmpty() )
if ( mountpoint == '/' )
{
// Create the datasets
QVariantList datasetList;
for ( const auto& dataset : qAsConst( m_datasets ) )
{
cWarning() << "Bad dataset entry";
continue;
QVariantMap datasetMap = dataset.toMap();
// Make sure all values are valid
if ( datasetMap[ "dsName" ].toString().isEmpty() || datasetMap[ "mountpoint" ].toString().isEmpty()
|| datasetMap[ "canMount" ].toString().isEmpty() )
{
cWarning() << "Bad dataset entry";
continue;
}
// Create the dataset. We set canmount=no regardless of the setting for now.
// It is modified to the correct value in the mount module to ensure mount order is maintained
auto r = system->runCommand( { "sh",
"-c",
"zfs create " + m_datasetOptions + " -o canmount=off -o mountpoint="
+ datasetMap[ "mountpoint" ].toString() + " " + poolName + "/"
+ datasetMap[ "dsName" ].toString() },
std::chrono::seconds( 10 ) );
if ( r.getExitCode() != 0 )
{
cWarning() << "Failed to create dataset" << datasetMap[ "dsName" ].toString();
continue;
}
// Add the dataset to the list for global storage
datasetMap[ "zpool" ] = m_poolName;
datasetList.append( datasetMap );
}
// If the list isn't empty, add it to global storage
if ( !datasetList.isEmpty() )
{
gs->insert( "zfsDatasets", datasetList );
}
}
else
{
// Create the dataset. We set canmount=no regardless of the setting for now.
// It is modified to the correct value in the mount module to ensure mount order is maintained
QString dsName = mountpoint;
dsName.remove( QRegExp( "[^a-zA-Z\\d\\s]" ) );
auto r = system->runCommand( { "sh",
"-c",
"zfs create " + m_datasetOptions + " -o canmount=off -o mountpoint="
+ datasetMap[ "mountpoint" ].toString() + " " + m_poolName + "/"
+ datasetMap[ "dsName" ].toString() },
+ mountpoint + " " + poolName + "/"
+ dsName },
std::chrono::seconds( 10 ) );
if ( r.getExitCode() != 0 )
{
cWarning() << "Failed to create dataset" << datasetMap[ "dsName" ].toString();
continue;
cWarning() << "Failed to create dataset" << dsName;
}
// Add the dataset to the list for global storage
datasetMap[ "zpool" ] = m_poolName;
datasetList.append( datasetMap );
poolNameEntry["dsName"] = dsName;
}
// If the list isn't empty, add it to global storage
if ( !datasetList.isEmpty() )
{
gs->insert( "zfs", datasetList );
}
poolNames.append(poolNameEntry);
// Export the zpool so it can be reimported at the correct location later
auto r = system->runCommand( { "zpool", "export", m_poolName }, std::chrono::seconds( 10 ) );
auto r = system->runCommand( { "zpool", "export", poolName }, std::chrono::seconds( 10 ) );
if ( r.getExitCode() != 0 )
{
cWarning() << "Failed to export pool" << m_poolName;
}
}
if (!poolNames.isEmpty())
{
gs->insert("zfsPoolInfo", poolNames);
}
return Calamares::JobResult::ok();
}