1 # ZFS boot stub for initramfs-tools.
3 # In the initramfs environment, the /init script sources this stub to
4 # override the default functions in the /scripts/local script.
6 # Enable this by passing boot=zfs on the kernel command line.
9 # Source the common functions
10 . /etc/zfs/zfs-functions
12 # Start interactive shell.
13 # Use debian's panic() if defined, because it allows to prevent shell access
14 # by setting panic in cmdline (e.g. panic=0 or panic=15).
15 # See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
16 # https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
18 if type panic > /dev/null 2>&1; then
25 # This runs any scripts that should run before we start importing
26 # pools and mounting any filesystems.
29 if type run_scripts > /dev/null 2>&1 && \
30 [ -f "/scripts/local-top" -o -d "/scripts/local-top" ]
32 [ "$quiet" != "y" ] && \
33 zfs_log_begin_msg "Running /scripts/local-top"
34 run_scripts /scripts/local-top
35 [ "$quiet" != "y" ] && zfs_log_end_msg
38 if type run_scripts > /dev/null 2>&1 && \
39 [ -f "/scripts/local-premount" -o -d "/scripts/local-premount" ]
41 [ "$quiet" != "y" ] && \
42 zfs_log_begin_msg "Running /scripts/local-premount"
43 run_scripts /scripts/local-premount
44 [ "$quiet" != "y" ] && zfs_log_end_msg
48 # If plymouth is available, hide the splash image.
51 if [ -x /bin/plymouth ] && /bin/plymouth --ping
53 /bin/plymouth hide-splash >/dev/null 2>&1
57 # Get a ZFS filesystem property value.
63 "${ZFS}" get -H -ovalue $value "$fs" 2> /dev/null
66 # Find the 'bootfs' property on pool $1.
67 # If the property does not contain '/', then ignore this
68 # pool by exporting it again.
73 # If 'POOL_IMPORTED' isn't set, no pool imported and therefore
74 # we won't be able to find a root fs.
75 [ -z "${POOL_IMPORTED}" ] && return 1
77 # If it's already specified, just keep it mounted and exit
78 # User (kernel command line) must be correct.
79 [ -n "${ZFS_BOOTFS}" ] && return 0
81 # Not set, try to find it in the 'bootfs' property of the pool.
82 # NOTE: zpool does not support 'get -H -ovalue bootfs'...
83 ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
85 # Make sure it's not '-' and that it starts with /.
86 if [ "${ZFS_BOOTFS}" != "-" ] && \
87 $(get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$')
94 # Not boot fs here, export it and later try again..
95 "${ZPOOL}" export "$pool"
101 # Support function to get a list of all pools, separated with ';'
107 pools=$($CMD 2> /dev/null | \
108 grep -E "pool:|^[a-zA-Z0-9]" | \
110 while read pool; do \
114 echo "${pools%%;}" # Return without the last ';'.
117 # Get a list of all available pools
120 local available_pools npools
122 if [ -n "${ZFS_POOL_IMPORT}" ]; then
123 echo "$ZFS_POOL_IMPORT"
127 # Get the base list of available pools.
128 available_pools=$(find_pools "$ZPOOL" import)
130 # Just in case - seen it happen (that a pool isn't visible/found
131 # with a simple "zpool import" but only when using the "-d"
132 # option or setting ZPOOL_IMPORT_PATH).
133 if [ -d "/dev/disk/by-id" ]
135 npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
138 # Because we have found extra pool(s) here, which wasn't
139 # found 'normally', we need to force USE_DISK_BY_ID to
140 # make sure we're able to actually import it/them later.
143 if [ -n "$available_pools" ]
145 # Filter out duplicates (pools found with the simple
146 # "zpool import" but which is also found with the
147 # "zpool import -d ...").
148 npools=$(echo "$npools" | sed "s,$available_pools,,")
150 # Add the list to the existing list of
152 available_pools="$available_pools;$npools"
154 available_pools="$npools"
159 # Filter out any exceptions...
160 if [ -n "$ZFS_POOL_EXCEPTIONS" ]
165 OLD_IFS="$IFS" ; IFS=";"
167 for pool in $available_pools
169 for exception in $ZFS_POOL_EXCEPTIONS
171 [ "$pool" = "$exception" ] && continue 2
179 apools="$apools;$pool"
187 available_pools="$apools"
190 # Return list of available pools.
191 echo "$available_pools"
194 # Import given pool $1
200 # Verify that the pool isn't already imported
201 # Make as sure as we can to not require '-f' to import.
202 "${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
204 # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
205 # to something we can use later with the real import(s). We want to
206 # make sure we find all by* dirs, BUT by-vdev should be first (if it
208 if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ]
210 dirs="$(for dir in $(echo /dev/disk/by-*)
212 # Ignore by-vdev here - we want it first!
213 echo "$dir" | grep -q /by-vdev && continue
214 [ ! -d "$dir" ] && continue
217 done | sed 's,:$,,g')"
219 if [ -d "/dev/disk/by-vdev" ]
221 # Add by-vdev at the beginning.
222 ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
225 # ... and /dev at the very end, just for good measure.
226 ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
229 # Needs to be exported for "zpool" to catch it.
230 [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
233 [ "$quiet" != "y" ] && zfs_log_begin_msg \
234 "Importing pool '${pool}' using defaults"
236 ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
237 ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
239 if [ "${ZFS_ERROR}" != 0 ]
241 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
243 if [ -f "${ZPOOL_CACHE}" ]
245 [ "$quiet" != "y" ] && zfs_log_begin_msg \
246 "Importing pool '${pool}' using cachefile."
248 ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
249 ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
253 if [ "${ZFS_ERROR}" != 0 ]
255 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
259 echo "Command: ${ZFS_CMD} '$pool'"
260 echo "Message: $ZFS_STDERR"
261 echo "Error: $ZFS_ERROR"
263 echo "Failed to import pool '$pool'."
264 echo "Manually import the pool and exit."
269 [ "$quiet" != "y" ] && zfs_log_end_msg
276 # Loading a module in a initrd require a slightly different approach,
277 # with more logging etc.
280 if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" > 0 ]
282 if [ "$quiet" != "y" ]; then
283 zfs_log_begin_msg "Sleeping for" \
284 "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
286 sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
287 [ "$quiet" != "y" ] && zfs_log_end_msg
290 # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
291 if type wait_for_udev > /dev/null 2>&1 ; then
293 elif type wait_for_dev > /dev/null 2>&1 ; then
297 # zpool import refuse to import without a valid /proc/self/mounts
298 [ ! -f /proc/self/mounts ] && mount proc /proc
301 load_module "zfs" || return 1
303 if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" > 0 ]
305 if [ "$quiet" != "y" ]; then
306 zfs_log_begin_msg "Sleeping for" \
307 "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
309 sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
310 [ "$quiet" != "y" ] && zfs_log_end_msg
316 # Mount a given filesystem
322 # Check that the filesystem exists
323 "${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1
324 [ "$?" -ne 0 ] && return 1
326 # Skip filesystems with canmount=off. The root fs should not have
327 # canmount=off, but ignore it for backwards compatibility just in case.
328 if [ "$fs" != "${ZFS_BOOTFS}" ]
330 canmount=$(get_fs_value "$fs" canmount)
331 [ "$canmount" = "off" ] && return 0
334 # Need the _original_ datasets mountpoint!
335 mountpoint=$(get_fs_value "$fs" mountpoint)
336 if [ "$mountpoint" = "legacy" -o "$mountpoint" = "none" ]; then
337 # Can't use the mountpoint property. Might be one of our
338 # clones. Check the 'org.zol:mountpoint' property set in
339 # clone_snap() if that's usable.
340 mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
341 if [ "$mountpoint" = "legacy" -o \
342 "$mountpoint" = "none" -o \
343 "$mountpoint" = "-" ]
345 if [ "$fs" != "${ZFS_BOOTFS}" ]; then
346 # We don't have a proper mountpoint and this
350 # Last hail-mary: Hope 'rootmnt' is set!
355 if [ "$mountpoint" = "legacy" ]; then
356 ZFS_CMD="mount -t zfs"
358 # If it's not a legacy filesystem, it can only be a
360 ZFS_CMD="mount -o zfsutil -t zfs"
363 ZFS_CMD="mount -o zfsutil -t zfs"
366 # Possibly decrypt a filesystem using native encryption.
369 [ "$quiet" != "y" ] && \
370 zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
371 [ -n "${ZFS_DEBUG}" ] && \
372 zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
374 ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
376 if [ "${ZFS_ERROR}" != 0 ]
378 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
382 echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
383 echo "Message: $ZFS_STDERR"
384 echo "Error: $ZFS_ERROR"
386 echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
387 echo "Manually mount the filesystem and exit."
390 [ "$quiet" != "y" ] && zfs_log_end_msg
396 # Unlock a ZFS native encrypted filesystem.
401 # If pool encryption is active and the zfs command understands '-o encryption'
402 if [ "$(zpool list -H -o feature@encryption $(echo "${fs}" | awk -F\/ '{print $1}'))" = 'active' ]; then
404 # Determine dataset that holds key for root dataset
405 ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
406 KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
408 echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
410 # If root dataset is encrypted...
411 if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
412 KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
413 # Continue only if the key needs to be loaded
414 [ "$KEYSTATUS" = "unavailable" ] || return 0
417 # If key is stored in a file, do not prompt
418 if ! [ "${KEYLOCATION}" = "prompt" ]; then
419 $ZFS load-key "${ENCRYPTIONROOT}"
421 # Prompt with plymouth, if active
422 elif [ -e /bin/plymouth ] && /bin/plymouth --ping 2>/dev/null; then
423 echo "plymouth" > /run/zfs_console_askpwd_cmd
424 while [ $TRY_COUNT -gt 0 ]; do
425 plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
426 $ZFS load-key "${ENCRYPTIONROOT}" && break
427 TRY_COUNT=$((TRY_COUNT - 1))
430 # Prompt with systemd, if active
431 elif [ -e /run/systemd/system ]; then
432 echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
433 while [ $TRY_COUNT -gt 0 ]; do
434 systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \
435 $ZFS load-key "${ENCRYPTIONROOT}" && break
436 TRY_COUNT=$((TRY_COUNT - 1))
439 # Prompt with ZFS tty, otherwise
441 # Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used
442 echo "load-key" > /run/zfs_console_askpwd_cmd
443 storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)"
444 echo 7 > /proc/sys/kernel/printk
445 $ZFS load-key "${ENCRYPTIONROOT}"
446 echo "$storeprintk" > /proc/sys/kernel/printk
454 # Destroy a given filesystem.
459 [ "$quiet" != "y" ] && \
460 zfs_log_begin_msg "Destroying '$fs'"
462 ZFS_CMD="${ZFS} destroy $fs"
463 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
465 if [ "${ZFS_ERROR}" != 0 ]
467 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
471 echo "Command: $ZFS_CMD"
472 echo "Message: $ZFS_STDERR"
473 echo "Error: $ZFS_ERROR"
475 echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
476 echo "Hint: Try: zfs destroy -Rfn $fs"
477 echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
480 [ "$quiet" != "y" ] && zfs_log_end_msg
486 # Clone snapshot $1 to destination filesystem $2
487 # Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
488 # manual control over it's mounting (i.e., make sure it's not automatically
489 # mounted with a 'zfs mount -a' in the init/systemd scripts).
494 local mountpoint="$3"
496 [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
498 # Clone the snapshot into a dataset we can boot from
499 # + We don't want this filesystem to be automatically mounted, we
500 # want control over this here and nowhere else.
501 # + We don't need any mountpoint set for the same reason.
502 # We use the 'org.zol:mountpoint' property to remember the mountpoint.
503 ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
504 ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
505 ZFS_CMD="${ZFS_CMD} $snap $destfs"
506 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
508 if [ "${ZFS_ERROR}" != 0 ]
510 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
514 echo "Command: $ZFS_CMD"
515 echo "Message: $ZFS_STDERR"
516 echo "Error: $ZFS_ERROR"
518 echo "Failed to clone snapshot."
519 echo "Make sure that the any problems are corrected and then make sure"
520 echo "that the dataset '$destfs' exists and is bootable."
523 [ "$quiet" != "y" ] && zfs_log_end_msg
529 # Rollback a given snapshot.
534 [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
536 ZFS_CMD="${ZFS} rollback -Rf $snap"
537 ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
539 if [ "${ZFS_ERROR}" != 0 ]
541 [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
545 echo "Command: $ZFS_CMD"
546 echo "Message: $ZFS_STDERR"
547 echo "Error: $ZFS_ERROR"
549 echo "Failed to rollback snapshot."
552 [ "$quiet" != "y" ] && zfs_log_end_msg
558 # Get a list of snapshots, give them as a numbered list
559 # to the user to choose from.
564 local SNAP snapnr snap debug
566 # We need to temporarily disable debugging. Set 'debug' so we
567 # remember to enabled it again.
568 if [ -n "${ZFS_DEBUG}" ]; then
574 # Because we need the resulting snapshot, which is sent on
575 # stdout to the caller, we use stderr for our questions.
576 echo "What snapshot do you want to boot from?" > /dev/stderr
578 echo " $i: ${snap}" > /dev/stderr
579 eval `echo SNAP_$i=$snap`
582 $("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
585 echo -n " Snap nr [1-$((i-1))]? " > /dev/stderr
588 # Re-enable debugging.
589 if [ -n "${debug}" ]; then
594 echo "$(eval echo "$"SNAP_$snapnr)"
597 setup_snapshot_booting()
600 local s destfs subfs mountpoint retval=0 filesystems fs
602 # Make sure that the snapshot specified actually exist.
603 if [ ! $(get_fs_value "${snap}" type) ]
605 # Snapshot does not exist (...@<null> ?)
606 # ask the user for a snapshot to use.
607 snap="$(ask_user_snap "${snap%%@*}")"
610 # Separate the full snapshot ('$snap') into it's filesystem and
611 # snapshot names. Would have been nice with a split() function..
613 snapname="${snap##*@}"
614 ZFS_BOOTFS="${rootfs}_${snapname}"
616 if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
618 # If the destination dataset for the clone
619 # already exists, destroy it. Recursively
620 if [ $(get_fs_value "${rootfs}_${snapname}" type) ]; then
621 filesystems=$("${ZFS}" list -oname -tfilesystem -H \
622 -r -Sname "${ZFS_BOOTFS}")
623 for fs in $filesystems; do
629 # Get all snapshots, recursively (might need to clone /usr, /var etc
631 for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
634 if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
637 rollback_snap "$s" || retval=$((retval + 1))
639 # Setup a destination filesystem name.
640 # Ex: Called with 'rpool/ROOT/debian@snap2'
641 # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
642 # rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot
643 # rpool/ROOT/debian/usr@snap2 => rpool/ROOT/debian_snap2/usr
644 # rpool/ROOT/debian/var@snap2 => rpool/ROOT/debian_snap2/var
645 subfs="${s##$rootfs}"
646 subfs="${subfs%%@$snapname}"
648 destfs="${rootfs}_${snapname}" # base fs.
649 [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
651 # Get the mountpoint of the filesystem, to be used
652 # with clone_snap(). If legacy or none, then use
654 mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
655 if [ "$mountpoint" = "legacy" -o \
656 "$mountpoint" = "none" ]
658 if [ -n "${subfs}" ]; then
659 mountpoint="${subfs}"
665 # Clone the snapshot into its own
667 clone_snap "$s" "${destfs}" "${mountpoint}" || \
668 retval=$((retval + 1))
672 # If we haven't return yet, we have a problem...
676 # ================================================================
678 # This is the main function.
681 local snaporig snapsub destfs pool POOLS
683 # ----------------------------------------------------------------
684 # I N I T I A L S E T U P
687 # Run the pre-mount scripts from /scripts/local-top.
691 # Source the default setup variables.
692 [ -r '/etc/default/zfs' ] && . /etc/default/zfs
695 # Support debug option
696 if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
700 #exec 2> /var/log/boot.debug
705 # Load ZFS module etc.
706 if ! load_module_initrd; then
709 echo "Failed to load ZFS modules."
710 echo "Manually load the modules and exit."
715 # Look for the cache file (if any).
716 [ ! -f ${ZPOOL_CACHE} ] && unset ZPOOL_CACHE
719 # Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
720 # 'root' is for Redhat/Fedora (etc),
721 # 'REAL_ROOT' is for Gentoo
724 [ -n "$root" ] && ROOT=${root}
726 [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
730 # Where to mount the root fs in the initrd - set outside this script
731 # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
732 # 'NEWROOT' is for RedHat/Fedora (etc),
733 # 'NEW_ROOT' is for Gentoo
736 [ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
738 [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
742 # No longer set in the defaults file, but it could have been set in
743 # get_pools() in some circumstances. If it's something, but not 'yes',
744 # it's no good to us.
745 [ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] && \
748 # ----------------------------------------------------------------
749 # P A R S E C O M M A N D L I N E O P T I O N S
751 # This part is the really ugly part - there's so many options and permutations
752 # 'out there', and if we should make this the 'primary' source for ZFS initrd
753 # scripting, we need/should support them all.
755 # Supports the following kernel command line argument combinations
756 # (in this order - first match win):
758 # rpool=<pool> (tries to finds bootfs automatically)
759 # bootfs=<pool>/<dataset> (uses this for rpool - first part)
760 # rpool=<pool> bootfs=<pool>/<dataset>
761 # -B zfs-bootfs=<pool>/<fs> (uses this for rpool - first part)
762 # rpool=rpool (default if none of the above is used)
763 # root=<pool>/<dataset> (uses this for rpool - first part)
764 # root=ZFS=<pool>/<dataset> (uses this for rpool - first part, without 'ZFS=')
765 # root=zfs:AUTO (tries to detect both pool and rootfs
766 # root=zfs:<pool>/<dataset> (uses this for rpool - first part, without 'zfs:')
768 # Option <dataset> could also be <snapshot>
769 # Option <pool> could also be <guid>
772 # Support force option
773 # In addition, setting one of zfs_force, zfs.force or zfsforce to
774 # 'yes', 'on' or '1' will make sure we force import the pool.
775 # This should (almost) never be needed, but it's here for
778 if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
784 # Look for 'rpool' and 'bootfs' parameter
785 [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
786 [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
789 # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
791 [ -n "$ROOT" -a -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
794 # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
795 # NOTE: Only use the pool name and dataset. The rest is not
796 # supported by ZoL (whatever it's for).
797 if [ -z "$ZFS_RPOOL" ]
799 # The ${zfs-bootfs} variable is set at the kernel command
800 # line, usually by GRUB, but it cannot be referenced here
801 # directly because bourne variable names cannot contain a
804 # Reassign the variable by dumping the environment and
805 # stripping the zfs-bootfs= prefix. Let the shell handle
806 # quoting through the eval command.
807 eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
811 # No root fs or pool specified - do auto detect.
812 if [ -z "$ZFS_RPOOL" -a -z "${ZFS_BOOTFS}" ]
814 # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
815 # which will be caught later
819 # ----------------------------------------------------------------
820 # F I N D A N D I M P O R T C O R R E C T P O O L
823 if [ "$ROOT" = "zfs:AUTO" ]
825 # Try to detect both pool and root fs.
827 [ "$quiet" != "y" ] && \
828 zfs_log_begin_msg "Attempting to import additional pools."
830 # Get a list of pools available for import
831 if [ -n "$ZFS_RPOOL" ]
833 # We've specified a pool - check only that
839 OLD_IFS="$IFS" ; IFS=";"
842 [ -z "$pool" ] && continue
849 [ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
851 # No auto - use value from the command line option.
853 # Strip 'zfs:' and 'ZFS='.
854 ZFS_BOOTFS="${ROOT#*[:=]}"
856 # Strip everything after the first slash.
857 ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
860 # Import the pool (if not already done so in the AUTO check above).
861 if [ -n "$ZFS_RPOOL" -a -z "${POOL_IMPORTED}" ]
863 [ "$quiet" != "y" ] && \
864 zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
866 import_pool "${ZFS_RPOOL}"
867 find_rootfs "${ZFS_RPOOL}"
869 [ "$quiet" != "y" ] && zfs_log_end_msg
872 if [ -z "${POOL_IMPORTED}" ]
874 # No pool imported, this is serious!
877 echo "Command: $ZFS_CMD"
878 echo "Message: $ZFS_STDERR"
879 echo "Error: $ZFS_ERROR"
881 echo "No pool imported. Manually import the root pool"
882 echo "at the command prompt and then exit."
883 echo "Hint: Try: zpool import -R ${rootmnt} -N ${ZFS_RPOOL}"
887 # In case the pool was specified as guid, resolve guid to name
888 pool="$("${ZPOOL}" get name,guid -o name,value -H | \
889 awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
890 if [ -n "$pool" ]; then
891 # If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
892 ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
893 sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g")
897 # Set the no-op scheduler on the disks containing the vdevs of
898 # the root pool. For single-queue devices, this scheduler is
899 # "noop", for multi-queue devices, it is "none".
900 # ZFS already does this for wholedisk vdevs (for all pools), so this
901 # is only important for partitions.
902 "${ZPOOL}" status -L "${ZFS_RPOOL}" 2> /dev/null |
903 awk '/^\t / && !/(mirror|raidz)/ {
905 sub(/[0-9]+$/, "", dev);
910 SCHEDULER=/sys/block/$i/queue/scheduler
911 if [ -e "${SCHEDULER}" ]
913 # Query to see what schedulers are available
914 case "$(cat "${SCHEDULER}")" in
915 *noop*) echo noop > "${SCHEDULER}" ;;
916 *none*) echo none > "${SCHEDULER}" ;;
922 # ----------------------------------------------------------------
923 # P R E P A R E R O O T F I L E S Y S T E M
925 if [ -n "${ZFS_BOOTFS}" ]
927 # Booting from a snapshot?
928 # Will overwrite the ZFS_BOOTFS variable like so:
929 # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
930 echo "${ZFS_BOOTFS}" | grep -q '@' && \
931 setup_snapshot_booting "${ZFS_BOOTFS}"
934 if [ -z "${ZFS_BOOTFS}" ]
936 # Still nothing! Let the user sort this out.
939 echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
940 echo " not specified on the kernel command line."
942 echo "Manually mount the root filesystem on $rootmnt and then exit."
943 echo "Hint: Try: mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
947 # ----------------------------------------------------------------
948 # M O U N T F I L E S Y S T E M S
950 # * Ideally, the root filesystem would be mounted like this:
952 # zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
953 # zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
955 # but the MOUNTPOINT prefix is preserved on descendent filesystem
956 # after the pivot into the regular root, which later breaks things
957 # like `zfs mount -a` and the /proc/self/mounts refresh.
959 # * Mount additional filesystems required
960 # Such as /usr, /var, /usr/local etc.
961 # NOTE: Mounted in the order specified in the
962 # ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
964 # Go through the complete list (recursively) of all filesystems below
965 # the real root dataset
966 filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
967 for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS
972 touch /run/zfs_unlock_complete
973 if [ -e /run/zfs_unlock_complete_notify ]; then
974 read zfs_unlock_complete_notify < /run/zfs_unlock_complete_notify
978 # Debugging information
979 if [ -n "${ZFS_DEBUG}" ]
983 echo "DEBUG: imported pools:"
987 echo "DEBUG: mounted ZFS filesystems:"
991 echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
992 echo -n " 'c' for shell, 'r' for reboot, 'ENTER' to continue. "
995 [ "$b" = "c" ] && /bin/sh
996 [ "$b" = "r" ] && reboot -f
1002 # Run local bottom script
1003 if type run_scripts > /dev/null 2>&1 && \
1004 [ -f "/scripts/local-bottom" -o -d "/scripts/local-bottom" ]
1006 [ "$quiet" != "y" ] && \
1007 zfs_log_begin_msg "Running /scripts/local-bottom"
1008 run_scripts /scripts/local-bottom
1009 [ "$quiet" != "y" ] && zfs_log_end_msg