summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraqua <aqua@iserlohn-fortress.net>2023-11-25 10:46:08 +0200
committeraqua <aqua@iserlohn-fortress.net>2023-11-25 10:46:08 +0200
commit10afe26c76d9a32ed654308ae92c40023b1638fc (patch)
treed1f36488d09d3b094ef354b1c08e2dde272402bb
parentUpdate to 2.1.6 (diff)
downloadzfs-utils-master.tar.xz
Updated to 2.1.13HEADmaster
-rw-r--r--PKGBUILD8
-rw-r--r--zfs.initcpio.hook170
-rw-r--r--zfs.initcpio.install6
3 files changed, 152 insertions, 32 deletions
diff --git a/PKGBUILD b/PKGBUILD
index 3302107..66db814 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -4,7 +4,7 @@
# All my PKGBUILDs are managed at https://github.com/eli-schwartz/pkgbuilds
pkgname=zfs-utils
-pkgver=2.1.6
+pkgver=2.1.13
pkgrel=1
pkgdesc="Userspace utilities for the Zettabyte File System."
arch=("i686" "x86_64" "aarch64")
@@ -15,10 +15,10 @@ optdepends=('python: for arcstat/arc_summary/dbufstat')
source=("https://github.com/zfsonlinux/zfs/releases/download/zfs-${pkgver}/zfs-${pkgver}.tar.gz"{,.asc}
"zfs.initcpio.install"
"zfs.initcpio.hook")
-b2sums=('615fe7a2128af77c6c855ea52b6503a78f0c992ea845b02875ac19aa9dd155c5d4110b668da91c463f96a54767ab92e67e5303572337352484c055c0a0ff9e46'
+b2sums=('88f4b27a872071590294cf2a93d19633bacfaf1214fc4f06ab7ae7778376978754e0e2566d7bfe659d84426f525ecd05679b98310ddfc2d5938804c36fd03904'
'SKIP'
- '570e995bba07ea0fb424dff191180b8017b6469501964dc0b70fd51e338a4dad260f87cc313489866cbfd1583e4aac2522cf7309c067cc5314eb83c37fe14ff3'
- 'e14366cbf680e3337d3d478fe759a09be224c963cc5207bee991805312afc49a49e6691f11e5b8bbe8dde60e8d855bd96e7f4f48f24a4c6d4a8c1bab7fc2bba0')
+ 'cb774227f157573f960bdb345e5b014c043a573c987d37a1db027b852d77a5eda1ee699612e1d8f4a2770897624889f1a3808116a171cc4c796a95e3caa43012'
+ '779c864611249c3f21d1864508d60cfe5e0f5541d74fb3093c6bdfa56be2c76f386ac1690d363beaee491c5132f5f6dbc01553aa408cda579ebca74b0e0fd1d0')
validpgpkeys=('4F3BA9AB6D1F8D683DC2DFB56AD860EED4598027' # Tony Hutter (GPG key for signing ZFS releases) <hutter2@llnl.gov>
'C33DF142657ED1F7C328A2960AB9E991C6AF658B') # Brian Behlendorf <behlendorf1@llnl.gov>
diff --git a/zfs.initcpio.hook b/zfs.initcpio.hook
index e0f4cfb..4770c08 100644
--- a/zfs.initcpio.hook
+++ b/zfs.initcpio.hook
@@ -6,6 +6,7 @@
#
ZPOOL_FORCE=""
ZPOOL_IMPORT_FLAGS=""
+ZFS_BOOT_ONLY=""
zfs_get_bootfs () {
for zfs_dataset in $(zpool list -H -o bootfs); do
@@ -25,13 +26,86 @@ zfs_get_bootfs () {
return 1
}
+zfs_decrypt_fs() {
+ dataset=$1
+
+ # Make sure dataset is encrypted; get fails if ZFS does not support encryption
+ encryption="$(zfs get -H -o value encryption "${dataset}" 2>/dev/null)" || return 0
+ [ "${encryption}" != "off" ] || return 0
+
+ # Make sure the dataset is locked
+ keystatus="$(zfs get -H -o value keystatus "${dataset}")" || return 0
+ [ "${keystatus}" != "available" ] || return 0
+
+ # Make sure the encryptionroot is sensible
+ encryptionroot="$(zfs get -H -o value encryptionroot "${dataset}")" || return 0
+ [ "${encryptionroot}" != "-" ] || return 0
+
+ # Export encryption root to be used by other hooks (SSH)
+ echo "${encryptionroot}" > /.encryptionroot
+
+ prompt_override=""
+ if keylocation="$(zfs get -H -o value keylocation "${encryptionroot}")"; then
+ # If key location is a file, determine if it can by overridden by prompt
+ if [ "${keylocation}" != "prompt" ]; then
+ if keyformat="$(zfs get -H -o value keyformat "${encryptionroot}")"; then
+ [ "${keyformat}" = "passphrase" ] && prompt_override="yes"
+ fi
+ fi
+
+ # If key location is a local file, check if file exists
+ if [ "${keylocation%%://*}" = "file" ]; then
+ keyfile="${keylocation#file://}"
+
+ # If file not yet exist, wait for udev to create device nodes
+ if [ ! -r "${keyfile}" ]; then
+ udevadm settle
+
+ # Wait for udev up to 10 seconds
+ if [ ! -r "${keyfile}" ]; then
+ echo "Waiting for key ${keyfile} for ${encryptionroot}..."
+ for _ in $(seq 1 20); do
+ sleep 0.5s
+ [ -r "${keyfile}" ] && break
+ done
+ fi
+
+ if [ ! -r "${keyfile}" ]; then
+ echo "Key ${keyfile} for ${encryptionroot} hasn't appeared. Trying anyway."
+ fi
+ fi
+ fi
+ fi
+
+ # Loop until key is loaded here or by another vector (SSH, for instance)
+ while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ]; do
+ # Try the default loading mechanism
+ zfs load-key "${encryptionroot}" && break
+
+ # Load failed, try a prompt if the failure was not a prompt
+ if [ -n "${prompt_override}" ]; then
+ echo "Unable to load key ${keylocation}; please type the passphrase"
+ echo "To retry the file, interrupt now or repeatedly input a wrong passphrase"
+ zfs load-key -L prompt "${encryptionroot}" && break
+ fi
+
+ # Throttle retry attempts
+ sleep 2
+ done
+
+ if [ -f /.encryptionroot ]; then
+ rm /.encryptionroot
+ fi
+}
+
zfs_mount_handler () {
if [ "${ZFS_DATASET}" = "bootfs" ] ; then
if ! zfs_get_bootfs ; then
# Lets import everything and try again
zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
if ! zfs_get_bootfs ; then
- die "ZFS: Cannot find bootfs."
+ err "ZFS: Cannot find bootfs."
+ exit 1
fi
fi
fi
@@ -39,7 +113,7 @@ zfs_mount_handler () {
local pool="${ZFS_DATASET%%/*}"
local rwopt_exp="${rwopt:-ro}"
- if ! zpool list -H "${pool}" >/dev/null 2>&1; then
+ if ! zpool list -H "${pool}" > /dev/null 2>&1; then
if [ ! "${rwopt_exp}" = "rw" ]; then
msg "ZFS: Importing pool ${pool} readonly."
ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
@@ -48,48 +122,68 @@ zfs_mount_handler () {
fi
if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
- die "ZFS: Unable to import pool ${pool}."
+ err "ZFS: Unable to import pool ${pool}."
+ exit 1
fi
fi
local node="$1"
- local tab_file="${node}/etc/fstab"
+ local rootmnt=$(zfs get -H -o value mountpoint "${ZFS_DATASET}")
+ local tab_file="/etc/fstab"
local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})"
# Mount the root, and any child datasets
for dataset in ${zfs_datasets}; do
mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
- case ${mountpoint} in
- "none")
- # skip this line/dataset.
- ;;
- "legacy")
- if [ -f "${tab_file}" ]; then
- if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
- opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
- mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
- mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
- fi
+ canmount=$(zfs get -H -o value canmount "${dataset}")
+ # skip dataset
+ [ ${dataset} != "${ZFS_DATASET}" -a \( ${canmount} = "off" -o ${canmount} = "noauto" -o ${mountpoint} = "none" \) ] && continue
+ if [ ${mountpoint} = "legacy" ]; then
+ if [ -f "${tab_file}" ]; then
+ if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
+ opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
+ mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
fi
- ;;
- *)
- mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}${mountpoint}"
- ;;
- esac
+ fi
+ else
+ zfs_decrypt_fs "${dataset}"
+ mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}"
+ fi
done
}
-run_hook() {
+set_flags() {
# Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
[ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
+ # Disable late hook, useful if we want to use zfs-import-cache.service instead
+ [ ! "${zfs_boot_only}" = "" ] && ZFS_BOOT_ONLY="1"
+
# Add import directory to import command flags
[ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
+ [ "${zfs_import_dir}" = "" ] && [ -f /etc/zfs/zpool.cache.org ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -c /etc/zfs/zpool.cache.org"
+}
+
+run_hook() {
+ set_flags
# Wait 15 seconds for ZFS devices to show up
[ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
- [ "${root}" = "zfs" ] && mount_handler="zfs_mount_handler"
+ case ${root} in
+ # root=zfs
+ "zfs")
+ ZFS_DATASET="bootfs"
+ mount_handler="zfs_mount_handler"
+ ;;
+ # root=ZFS=... syntax (grub)
+ "ZFS="*)
+ mount_handler="zfs_mount_handler"
+ ZFS_DATASET="${root#*[=]}"
+ ;;
+ esac
case ${zfs} in
"")
@@ -98,22 +192,46 @@ run_hook() {
auto|bootfs)
ZFS_DATASET="bootfs"
mount_handler="zfs_mount_handler"
+ local pool="[a-zA-Z][^ ]*"
;;
*)
ZFS_DATASET="${zfs}"
mount_handler="zfs_mount_handler"
+ local pool="${ZFS_DATASET%%/*}"
;;
esac
- # Allow up to n seconds for zfs device to show up
- for i in $(seq 1 ${ZFS_WAIT}); do
- [ -c "/dev/zfs" ] && break
+ # Allow at least n seconds for zfs device to show up. Especially
+ # when using zfs_import_dir instead of zpool.cache, the listing of
+ # available pools can be slow, so this loop must be top-tested to
+ # ensure we do one 'zpool import' pass after the timer has expired.
+ sleep ${ZFS_WAIT} & pid=$!
+ local break_after=0
+ while :; do
+ kill -0 $pid > /dev/null 2>&1 || break_after=1
+ if [ -c "/dev/zfs" ]; then
+ zpool import ${ZPOOL_IMPORT_FLAGS} | awk "
+ BEGIN { pool_found=0; online=0; unavail=0 }
+ /^ ${pool} .*/ { pool_found=1 }
+ /^\$/ { pool_found=0 }
+ /UNAVAIL/ { if (pool_found == 1) { unavail=1 } }
+ /ONLINE/ { if (pool_found == 1) { online=1 } }
+ END { if (online == 1 && unavail != 1)
+ { exit 0 }
+ else
+ { exit 1 }
+ }" && break
+ fi
+ [ $break_after == 1 ] && break
sleep 1
done
+ kill $pid > /dev/null 2>&1
}
run_latehook () {
- zpool import -N -a ${ZPOOL_FORCE}
+ set_flags
+ # only run zpool import, if flags were set (cache file found / zfs_import_dir specified) and zfs_boot_only is not set
+ [ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && [ "${ZFS_BOOT_ONLY}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
}
# vim:set ts=4 sw=4 ft=sh et:
diff --git a/zfs.initcpio.install b/zfs.initcpio.install
index 589b46b..27e11f5 100644
--- a/zfs.initcpio.install
+++ b/zfs.initcpio.install
@@ -22,7 +22,8 @@ build() {
zstreamdump \
/lib/udev/vdev_id \
/lib/udev/zvol_id \
- findmnt
+ findmnt \
+ udevadm
map add_file \
/lib/udev/rules.d/60-zvol.rules \
@@ -38,9 +39,10 @@ build() {
# allow mount(8) to "autodetect" ZFS
echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
- [[ -f /etc/zfs/zpool.cache ]] && add_file "/etc/zfs/zpool.cache"
+ [[ -f /etc/zfs/zpool.cache ]] && cp "/etc/zfs/zpool.cache" "${BUILDROOT}/etc/zfs/zpool.cache.org"
[[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"
[[ -f /etc/hostid ]] && add_file "/etc/hostid"
+ [[ -f /etc/fstab ]] && add_file "/etc/fstab"
}
help() {