]> granicus.if.org Git - zfs/commitdiff
Initramfs scripts for ZoL.
authorTurbo Fredriksson <turbo@bayour.com>
Thu, 30 Jan 2014 16:26:48 +0000 (16:26 +0000)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Thu, 9 Jul 2015 01:14:34 +0000 (18:14 -0700)
* Supports booting of a ZFS snapshot.
  Do this by cloning the snapshot into a dataset. If this, the resulting
  dataset, already exists, destroy it. Then mount it on root.
  * If snapshot does not exist, use base dataset (the part before '@')
    as boot filesystem instead.
  * If no snapshot is specified on the 'root=' kernel command line, but there
    is an '@', then get a list of snapshots below that filesystem and ask the
    user which to use.
  * Clone with 'mountpoint=none' and 'canmount=noauto' - we mount manually
    and explicitly.
    * For sub-filesystems, that doesn't have a mountpoint property set, we use
      the 'org.zol:mountpoint' to keep track of it's mountpoint.
  * Allow rollback of snapshots instead of clone it and boot from the clone.
* Allow mounting a root- and subfs with mountpoint=legacy set
* Allow mounting a filesystem which is using nativ encryption.
* Support all currently used kernel command line arguments
  All the different distributions have their own standard on what to specify
  on the kernel command line to boot of a ZFS filesystem.
  * Extra options:
    * zfsdebug=(on,yes,1) Show extra debugging information
    * zfsforce=(on,yes,1) Force import the pool
    * rollback=(on,yes,1) Rollback (instead of clone) the snapshot
* Only try to import pool if it haven't already been imported
  * This will negate the need to force import a pool that have not been exported cleanly.
  * Support exclusion of pools to import by setting ZFS_POOL_EXCEPTIONS in /etc/default/zfs.
* Support additional configuration variable ZFS_INITRD_ADDITIONAL_DATASETS
  to mount additional filesystems not located under your root dataset.
* Include /etc/modprobe.d/{zfs,spl}.conf in the initrd if it/they exist.
* Include the udev rule to use by-vdev for pool imports.
* Include the /etc/default/zfs file to the initrd.
* Only try /dev/disk/by-* in the initrd if USE_DISK_BY_ID is set.
  * Use /dev/disk/by-vdev before anything.
  * Add /dev as a last ditch attempt.
  * Fallback to using the cache file if that exist if nothing else worked.
* Use /sbin/modprobe instead of built-in (BusyBox) modprobe.
  This gets rid of the message "modprobe: can't load module zcommon".
  Thanx to pcoultha for finding this.

Signed-off-by: Turbo Fredriksson <turbo@bayour.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #2116
Closes #2114

config/zfs-build.m4
configure.ac
contrib/Makefile.am
contrib/initramfs/Makefile.am [new file with mode: 0644]
contrib/initramfs/README.initramfs.markdown [new file with mode: 0644]
contrib/initramfs/conf-hooks.d/zfs [new file with mode: 0644]
contrib/initramfs/hooks/zfs [new file with mode: 0755]
contrib/initramfs/scripts/zfs [new file with mode: 0644]
rpm/generic/zfs.spec.in

index b8d81d4fad71253797e76bb140465a02d31657f2..ccc31d59d5979a14e166a070d4a136a345e8edea 100644 (file)
@@ -139,7 +139,7 @@ AC_DEFUN([ZFS_AC_RPM], [
        ])
 
        RPM_DEFINE_COMMON='--define "$(DEBUG_ZFS) 1" --define "$(DEBUG_DMU_TX) 1"'
-       RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)" --define "_udevdir $(udevdir)" --define "_udevruledir $(udevruledir)" --define "_initconfdir $(DEFAULT_INITCONF_DIR)"'
+       RPM_DEFINE_UTIL='--define "_dracutdir $(dracutdir)" --define "_udevdir $(udevdir)" --define "_udevruledir $(udevruledir)" --define "_initconfdir $(DEFAULT_INITCONF_DIR)" $(DEFINE_INITRAMFS)'
        RPM_DEFINE_KMOD='--define "kernels $(LINUX_VERSION)" --define "require_spldir $(SPL)" --define "require_splobj $(SPL_OBJ)" --define "ksrc $(LINUX)" --define "kobj $(LINUX_OBJ)"'
        RPM_DEFINE_DKMS=
 
@@ -285,7 +285,6 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
                debian)     DEFAULT_PACKAGE=deb  ;;
                *)          DEFAULT_PACKAGE=rpm  ;;
        esac
-
        AC_MSG_RESULT([$DEFAULT_PACKAGE])
        AC_SUBST(DEFAULT_PACKAGE)
 
@@ -308,7 +307,6 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
                debian)     DEFAULT_INIT_SCRIPT=lsb    ;;
                *)          DEFAULT_INIT_SCRIPT=lsb    ;;
        esac
-
        AC_MSG_RESULT([$DEFAULT_INIT_SCRIPT])
        AC_SUBST(DEFAULT_INIT_SCRIPT)
 
@@ -323,9 +321,18 @@ AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
                debian)     DEFAULT_INITCONF_DIR=/etc/default   ;;
                *)          DEFAULT_INITCONF_DIR=/etc/default   ;;
        esac
-
        AC_MSG_RESULT([$DEFAULT_INITCONF_DIR])
        AC_SUBST(DEFAULT_INITCONF_DIR)
+
+       AC_MSG_CHECKING([whether initramfs-tools is available])
+       if test -d /usr/share/initramfs-tools ; then
+               DEFINE_INITRAMFS='--define "_initramfs 1"'
+               AC_MSG_RESULT([yes])
+       else
+               DEFINE_INITRAMFS=''
+               AC_MSG_RESULT([no])
+       fi
+       AC_SUBST(DEFINE_INITRAMFS)
 ])
 
 dnl #
index 63d0073e9a137c1cf60c80c8f5a4603062b80606..0e79a491cdc5b24f43e2fdda07616153147c3b39 100644 (file)
@@ -115,6 +115,7 @@ AC_CONFIG_FILES([
        cmd/zed/Makefile
        contrib/Makefile
        contrib/bash_completion.d/Makefile
+       contrib/initramfs/Makefile
        module/Makefile
        module/avl/Makefile
        module/nvpair/Makefile
index 5fe60973ce9fe00a7a03041d4b53fc3b06449244..4a13cfa98019205874a1bf80662ed9cba2523732 100644 (file)
@@ -1,2 +1,2 @@
-SUBDIRS = bash_completion.d
-DIST_SUBDIRS = bash_completion.d
+SUBDIRS = bash_completion.d initramfs
+DIST_SUBDIRS = bash_completion.d initramfs
diff --git a/contrib/initramfs/Makefile.am b/contrib/initramfs/Makefile.am
new file mode 100644 (file)
index 0000000..fa2b5a2
--- /dev/null
@@ -0,0 +1,21 @@
+initrddir = $(datarootdir)/initramfs-tools
+
+initrd_SCRIPTS = conf-hooks.d/zfs hooks/zfs scripts/zfs
+
+EXTRA_DIST = \
+       $(top_srcdir)/contrib/initramfs/conf-hooks.d/zfs \
+       $(top_srcdir)/contrib/initramfs/hooks/zfs \
+       $(top_srcdir)/contrib/initramfs/scripts/zfs \
+       $(top_srcdir)/contrib/initramfs/README.initramfs.markdown
+
+install-initrdSCRIPTS: $(EXTRA_DIST)
+       for d in conf-hooks.d hooks scripts; do \
+         $(MKDIR_P) $(DESTDIR)$(initrddir)/$$d; \
+         cp $(top_srcdir)/contrib/initramfs/$$d/zfs \
+           $(DESTDIR)$(initrddir)/$$d/; \
+       done
+       if [ -f etc/init.d/zfs ]; then \
+         $(MKDIR_P) $(DESTDIR)$(DEFAULT_INITCONF_DIR); \
+         cp $(top_srcdir)/etc/init.d/zfs \
+           $(DESTDIR)$(DEFAULT_INITCONF_DIR)/; \
+       fi
diff --git a/contrib/initramfs/README.initramfs.markdown b/contrib/initramfs/README.initramfs.markdown
new file mode 100644 (file)
index 0000000..f6f3a79
--- /dev/null
@@ -0,0 +1,94 @@
+DESCRIPTION
+  These scripts is intended to be used with initramfs-tools, which is a similar
+  software product to "dracut" (which is more used in RedHat based distributions,
+  and is mainly used by Debian GNU/Linux and derivates to create a initramfs so
+  that the system can be booted of a ZFS filesystem. If you have no need or
+  interest for this, then it can safely be ignored.
+
+  These script were written with the primary intention of being portable and
+  usable on as many systems as possible.
+
+  This is, in practice, usually not possible. But the intention is there.
+  And it is a good one.
+
+  They have been tested successfully on:
+
+    * Debian GNU/Linux Wheezy
+    * Debian GNU/Linux Jessie
+
+  It uses some functionality common with the SYSV init scripts, primarily
+  the "/etc/zfs/zfs-functions" script.
+
+FUNCTIONALITY
+  * Supports booting of a ZFS snapshot.
+    Do this by cloning the snapshot into a dataset. If this, the resulting
+    dataset, already exists, destroy it. Then mount it as the root filesystem.
+    * If snapshot does not exist, use base dataset (the part before '@')
+      as boot filesystem instead.
+    * Clone with 'mountpoint=none' and 'canmount=noauto' - we mount manually
+      and explicitly.
+    * Allow rollback of snapshots instead of clone it and boot from the clone.
+    * If no snapshot is specified on the 'root=' kernel command line, but
+      there is an '@', then get a list of snapshots below that filesystem
+      and ask the user which to use.
+
+  * Support all currently used kernel command line arguments
+    * Core options:
+      All the different distributions have their own standard on what to specify
+      on the kernel command line to boot of a ZFS filesystem.
+
+      Supports the following kernel command line argument combinations
+      (in this order - first match win):
+      * rpool=<pool>                   (tries to finds bootfs automatically)
+      * bootfs=<pool>/<dataset>                (uses this for rpool - first part)
+      * rpool=<pool> bootfs=<pool>/<dataset>
+      * -B zfs-bootfs=<pool>/<fs>      (uses this for rpool - first part)
+      * rpool=rpool                    (default if none of the above is used)
+      * root=<pool>/<dataset>          (uses this for rpool - first part)
+      * root=ZFS=<pool>/<dataset>      (uses this for rpool - first part, without 'ZFS=')
+      * root=zfs:AUTO                  (tries to detect both pool and rootfs
+      * root=zfs:<pool>/<dataset>      (uses this for rpool - first part, without 'zfs:')
+
+      Option <dataset> could also be <snapshot>
+    * Extra (control) options:
+      * zfsdebug=(on,yes,1)   Show extra debugging information
+      * zfsforce=(on,yes,1)   Force import the pool
+      * rollback=(on,yes,1)   Rollback (instead of clone) the snapshot
+
+  * 'Smarter' way to import pools. Don't just try cache file or /dev.
+    * Try to use /dev/disk/by-vdev (if /etc/zfs/vdev_id.conf exists),
+    * Try /dev/mapper (to be able to use LUKS backed pools as well as
+      multi-path devices).
+    * /dev/disk/by-id and any other /dev/disk/by-* directory that may exist.
+    * Use /dev as a last ditch attempt.
+    * Fallback to using the cache file if that exist if nothing else worked.
+    * Only try to import pool if it haven't already been imported
+      * This will negate the need to force import a pool that have not been
+        exported cleanly.
+      * Support exclusion of pools to import by setting ZFS_POOL_EXCEPTIONS
+         in /etc/default/zfs.
+
+    Controlling in which order devices is searched for is controlled by
+    ZPOOL_IMPORT_PATH variable set in /etc/defaults/zfs.
+
+  * Support additional configuration variable ZFS_INITRD_ADDITIONAL_DATASETS
+    to mount additional filesystems not located under your root dataset.
+
+    For example, if the root fs is specified as 'rpool/ROOT/rootfs', it will
+    automatically and without specific configuration mount any filesystems
+    below this on the mount point specified in the 'mountpoint' property.
+    Such as 'rpool/root/rootfs/var', 'rpool/root/rootfs/usr' etc)
+
+    However, if one prefer to have separate filesystems, not located below
+    the root fs (such as 'rpool/var', 'rpool/ROOT/opt' etc), special
+    configuration needs to be done. This is what the variable, set in
+    /etc/defaults/zfs file, needs to be configured. The 'mountpoint'
+    property needs to be correct for this to work though.
+
+  * Allows mounting a rootfs with mountpoint=legacy set.
+
+  * Include /etc/modprobe.d/{zfs,spl}.conf in the initrd if it/they exist.
+
+  * Include the udev rule to use by-vdev for pool imports.
+
+  * Include the /etc/default/zfs file to the initrd.
diff --git a/contrib/initramfs/conf-hooks.d/zfs b/contrib/initramfs/conf-hooks.d/zfs
new file mode 100644 (file)
index 0000000..29950ca
--- /dev/null
@@ -0,0 +1,2 @@
+# Force the inclusion of Busybox in the initramfs.
+BUSYBOX=y
diff --git a/contrib/initramfs/hooks/zfs b/contrib/initramfs/hooks/zfs
new file mode 100755 (executable)
index 0000000..c4f4332
--- /dev/null
@@ -0,0 +1,103 @@
+#!/bin/sh
+#
+# Add ZoL filesystem capabilities to an initrd, usually for a native ZFS root.
+#
+
+# This hook installs udev rules for ZoL.
+PREREQ="zdev"
+
+# These prerequisites are provided by the zfsutils package. The zdb utility is
+# not strictly required, but it can be useful at the initramfs recovery prompt.
+COPY_EXEC_LIST="/sbin/zdb /sbin/zpool /sbin/zfs /sbin/mount.zfs"
+COPY_EXEC_LIST="$COPY_EXEC_LIST /usr/bin/dirname"
+COPY_FILE_LIST="/etc/hostid /etc/zfs/zpool.cache /etc/default/zfs"
+COPY_FILE_LIST="$COPY_FILE_LIST /etc/zfs/zfs-functions /etc/zfs/vdev_id.conf"
+COPY_FILE_LIST="$COPY_FILE_LIST /lib/udev/rules.d/69-vdev.rules"
+
+# These prerequisites are provided by the base system.
+COPY_EXEC_LIST="$COPY_EXEC_LIST /bin/hostname /sbin/blkid"
+
+# Explicitly specify all kernel modules because automatic dependency resolution
+# is unreliable on many systems.
+BASE_MODULES="zlib_deflate spl zavl zcommon znvpair zunicode zfs"
+CRPT_MODULES="sun-ccm sun-gcm sun-ctr"
+MANUAL_ADD_MODULES_LIST="$BASE_MODULES"
+
+# Generic result code.
+RC=0
+
+case $1 in
+prereqs)
+       echo "$PREREQ"
+       exit 0
+       ;;
+esac
+
+for ii in $COPY_EXEC_LIST
+do
+       if [ ! -x "$ii" ]
+       then
+               echo "Error: $ii is not executable."
+               RC=2
+       fi
+done
+
+if [ "$RC" -ne 0 ]
+then
+       exit "$RC"
+fi
+
+. /usr/share/initramfs-tools/hook-functions
+
+mkdir -p "$DESTDIR/etc/"
+
+# ZDB uses pthreads for some functions, but the library dependency is not
+# automatically detected. The `find` utility and extended `cp` options are
+# used here because libgcc_s.so could be in a subdirectory of /lib for
+# multi-arch installations.
+cp --target-directory="$DESTDIR" --parents $(find /lib -type f -name libgcc_s.so.1)
+
+for ii in $COPY_EXEC_LIST
+do
+       copy_exec "$ii"
+done
+
+for ii in $COPY_FILE_LIST
+do
+       dir=$(dirname "$ii")
+       [ -d "$dir" ] || mkdir -p "$dir"
+       [ -f "$ii" ] && cp -p "$ii" "$DESTDIR/$ii"
+done
+
+for ii in $MANUAL_ADD_MODULES_LIST
+do
+       manual_add_modules "$ii"
+done
+
+if [ -f "/etc/hostname" ]
+then
+       cp -p "/etc/hostname" "$DESTDIR/etc/"
+else
+       hostname >"$DESTDIR/etc/hostname"
+fi
+
+for ii in zfs zfs.conf spl spl.conf
+do  
+       if [ -f "/etc/modprobe.d/$ii" ]; then
+               if [ ! -d "$DESTDIR/etc/modprobe.d" ]; then
+                       mkdir -p $DESTDIR/etc/modprobe.d
+               fi
+               cp -p "/etc/modprobe.d/$ii" $DESTDIR/etc/modprobe.d/
+       fi
+done
+
+# With pull request #1476 (not yet merged) comes a verbose warning
+# if /usr/bin/net doesn't exist or isn't executable. Just create
+# a dummy...
+[ ! -d "$DESTDIR/usr/bin" ] && mkdir -p "$DESTDIR/usr/bin"
+if [ ! -x "$DESTDIR/usr/bin/net" ]; then
+    touch "$DESTDIR/usr/bin/net"
+    chmod +x "$DESTDIR/usr/bin/net"
+fi
+
+exit 0
diff --git a/contrib/initramfs/scripts/zfs b/contrib/initramfs/scripts/zfs
new file mode 100644 (file)
index 0000000..43107c4
--- /dev/null
@@ -0,0 +1,962 @@
+# ZFS boot stub for initramfs-tools.
+#
+# In the initramfs environment, the /init script sources this stub to
+# override the default functions in the /scripts/local script.
+#
+# Enable this by passing boot=zfs on the kernel command line.
+#
+
+# Source the common init script
+. /etc/zfs/zfs-functions
+
+# Paths to what we need - in the initrd, these paths are hardcoded,
+# so override the defines in zfs-functions.
+ZFS="/sbin/zfs"
+ZPOOL="/sbin/zpool"
+ZPOOL_CACHE="/etc/zfs/zpool.cache"
+export ZFS ZPOOL ZPOOL_CACHE
+
+# This runs any scripts that should run before we start importing
+# pools and mounting any filesystems.
+pre_mountroot()
+{
+       if type run_scripts > /dev/null 2>&1 && \
+           [ -f "/scripts/local-top" -o -d "/scripts/local-top" ]
+       then
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Running /scripts/local-top"
+               run_scripts /scripts/local-top
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       if type run_scripts > /dev/null 2>&1 && \
+           [ -f "/scripts/local-premount" -o -d "/scripts/local-premount" ]
+       then
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Running /scripts/local-premount"
+               run_scripts /scripts/local-premount
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+}
+
+# If plymouth is availible, hide the splash image.
+disable_plymouth()
+{
+       if [ -x /bin/plymouth ] && /bin/plymouth --ping
+       then
+               /bin/plymouth hide-splash >/dev/null 2>&1
+       fi
+}
+
+# Get a ZFS filesystem property value.
+get_fs_value()
+{
+       local fs="$1"
+       local value=$2
+
+       "${ZFS}" get -H -ovalue $value "$fs" 2> /dev/null
+}
+
+# Find the 'bootfs' property on pool $1.
+# If the property does not contain '/', then ignore this
+# pool by exporting it again.
+find_rootfs()
+{
+       local pool="$1"
+
+       # If it's already specified, just keep it mounted and exit
+       # User (kernel command line) must be correct.
+       if [ -n "${ZFS_BOOTFS}" ]
+       then
+               POOL_IMPORTED=1
+               return 0
+       fi
+
+       # Not set, try to find it in the 'bootfs' property of the pool.
+       # NOTE: zpool does not support 'get -H -ovalue bootfs'...
+       ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
+
+       # Make sure it's not '-' and that it starts with /.
+       if [ "${ZFS_BOOTFS}" != "-" ] && \
+               $(get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$')
+       then
+               # Keep it mounted
+               POOL_IMPORTED=1
+               return 0
+       fi
+
+       # Not boot fs here, export it and later try again..
+       "${ZPOOL}" export "$pool"
+
+       return 1
+}
+
+# Support function to get a list of all pools, separated with ';'
+find_pools()
+{
+       local CMD="$*"
+       local pools
+
+       pools=$($CMD 2> /dev/null | \
+               grep -E "pool:|^[a-zA-Z0-9]" | \
+               sed 's@.*: @@' | \
+               sort | \
+               while read pool; do \
+                   echo -n "$pool;"
+               done)
+
+       echo "${pools%%;}" # Return without the last ';'.
+}
+
+# Get a list of all availible pools
+get_pools()
+{
+       local available_pools npools
+
+       # Get the base list of availible pools.
+       available_pools=$(find_pools "$ZPOOL" import)
+
+       # Just in case - seen it happen (that a pool isn't visable/found
+       # with a simple "zpool import" but only when using the "-d"
+       # option or setting ZPOOL_IMPORT_PATH).
+       if [ -d "/dev/disk/by-id" ]
+       then
+               npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
+               if [ -n "$npools" ]
+               then
+                       # Because we have found extra pool(s) here, which wasn't
+                       # found 'normaly', we need to force USE_DISK_BY_ID to
+                       # make sure we're able to actually import it/them later.
+                       USE_DISK_BY_ID='yes'
+
+                       if [ -n "$available_pools" ]
+                       then
+                               # Filter out duplicates (pools found with the simple
+                               # "zpool import" but which is also found with the
+                               # "zpool import -d ...").
+                               npools=$(echo "$npools" | sed "s,$available_pools,,")
+
+                               # Add the list to the existing list of
+                               # available pools
+                               available_pools="$available_pools;$npools"
+                       else
+                               available_pools="$npools"
+                       fi
+               fi
+       fi
+
+        # Filter out any exceptions...
+       if [ -n "$ZFS_POOL_EXCEPTIONS" ]
+       then
+               local found=""
+               local apools=""
+               local pool exception
+               OLD_IFS="$IFS" ; IFS=";"
+
+               for pool in $available_pools
+               do
+                       for exception in $ZFS_POOL_EXCEPTIONS
+                       do
+                               [ "$pool" = "$exception" ] && continue 2
+                               found="$pool"
+                       done
+
+                       if [ -n "$found" ]
+                       then
+                               if [ -n "$apools" ]
+                               then
+                                       apools="$apools;$pool"
+                               else
+                                       apools="$pool"
+                               fi
+                       fi
+               done
+
+               IFS="$OLD_IFS"
+               available_pools="$apools"
+       fi
+
+       # Return list of availible pools.
+       echo "$available_pools"
+}
+
+# Import given pool $1
+import_pool()
+{
+       local pool="$1"
+       local dirs dir ZFS_CMD ZFS_STDERR ZFS_ERROR
+
+       # Verify that the pool isn't already imported
+       # Make as sure as we can to not require '-f' to import.
+       "${ZPOOL}" status "$pool" > /dev/null 2>&1 && return 0
+
+       # For backwards compability, make sure that ZPOOL_IMPORT_PATH is set
+       # to something we can use later with the real import(s). We want to
+       # make sure we find all by* dirs, BUT by-vdev should be first (if it
+       # exists).
+       if [ -n "$USE_DISK_BY_ID" -a -z "$ZPOOL_IMPORT_PATH" ]
+       then
+               dirs="$(for dir in $(echo /dev/disk/by-*)
+               do
+                       # Ignore by-vdev here - we want it first!
+                       echo "$dir" | grep -q /by-vdev && continue
+                       [ ! -d "$dir" ] && continue
+
+                       echo -n "$dir:"
+               done | sed 's,:$,,g')"
+
+               if [ -d "/dev/disk/by-vdev" ]
+               then
+                       # Add by-vdev at the beginning.
+                       ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
+               fi
+
+               # ... and /dev at the very end, just for good measure.
+               ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
+       fi
+
+       # Needs to be exported for "zpool" to catch it.
+       [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
+
+
+       [ "$quiet" != "y" ] && zfs_log_begin_msg \
+               "Importing pool '${pool}' using defaults"
+
+       ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE}"
+       ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
+       ZFS_ERROR="$?"
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               [ "$quiet" != "y" ] && zfs_log_begin_msg \
+                       "Importing pool '${pool}' using cachefile."
+
+               ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE}"
+               ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
+               ZFS_ERROR="$?"
+               if [ "${ZFS_ERROR}" != 0 ]
+               then
+                       [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+                       disable_plymouth
+                       echo ""
+                       echo "Command: ${ZFS_CMD} '$pool'"
+                       echo "Message: $ZFS_STDERR"
+                       echo "Error: $ZFS_ERROR"
+                       echo ""
+                       echo "Failed to import pool '$pool'."
+                       echo "Manually import the pool and exit."
+                       /bin/sh
+               fi
+       fi
+
+       [ "$quiet" != "y" ] && zfs_log_end_msg
+       return 0
+}
+
+# Load ZFS modules
+# Loading a module in a initrd require a slightly different approach,
+# with more logging etc.
+load_module_initrd()
+{
+       if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" > 0 ]
+       then
+               if [ "$quiet" != "y" ]; then
+                       zfs_log_begin_msg "Sleeping for" \
+                               "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
+               fi
+               sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
+       if type wait_for_udev > /dev/null 2>&1 ; then
+               wait_for_udev 10
+       elif type wait_for_dev > /dev/null 2>&1 ; then
+               wait_for_dev
+       fi
+
+       # zpool import refuse to import without a valid mtab
+       [ ! -f /proc/mounts ] && mount proc /proc
+       [ ! -f /etc/mtab ] && cat /proc/mounts > /etc/mtab
+
+       # Load the module
+       load_module || return 1
+
+       if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" > 0 ]
+       then
+               if [ "$quiet" != "y" ]; then
+                       zfs_log_begin_msg "Sleeping for" \
+                               "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
+               fi
+               sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Mount a given filesystem
+mount_fs()
+{
+       local fs="$1"
+       local mountpoint ZFS_CMD ZFS_STDERR ZFS_ERROR
+
+       # Need the _original_ datasets mountpoint!
+       mountpoint=$(get_fs_value "$fs" mountpoint)
+       if [ "$mountpoint" = "legacy" -o "$mountpoint" = "none" ]; then
+               # Can't use the mountpoint property. Might be one of our
+               # clones. Check the 'org.zol:mountpoint' property set in
+               # clone_snap() if that's usable.
+               mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
+               if [ "$mountpoint" = "legacy" -o \
+                   "$mountpoint" = "none" -o \
+                   "$mountpoint" = "-" ]
+               then
+                       if [ "$fs" != "${ZFS_BOOTFS}" ]; then
+                               # We don't have a proper mountpoint, this
+                               # isn't the root fs. So extract the root fs
+                               # value from the filesystem, and we should
+                               # (hopefully!) have a mountpoint we can use.
+                               mountpoint="${fs##$ZFS_BOOTFS}"
+                       else
+                               # Last hail-mary: Hope 'rootmnt' is set!
+                               mountpoint=""
+                       fi
+               fi
+
+               if [ "$mountpoint" = "legacy" ]; then
+                       ZFS_CMD="mount -t zfs"
+               else
+                       # If it's not a legacy filesystem, it can only be a
+                       # native one...
+                       ZFS_CMD="mount -o zfsutil -t zfs"
+               fi
+       else
+               ZFS_CMD="mount -o zfsutil -t zfs"
+       fi
+
+       # Possibly decrypt a filesystem using native encryption.
+       decrypt_fs "$fs"
+
+       [ "$quiet" != "y" ] && \
+           zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
+       [ -n "${ZFS_DEBUG}" ] && \
+           zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
+
+       ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
+       ZFS_ERROR=$?
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
+               echo "Manually mount the filesystem and exit."
+               /bin/sh
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Unlock a ZFS native crypted filesystem.
+decrypt_fs()
+{
+       local fs="$1"
+       local ZFS_CMD ZFS_STDERR ZFS_ERROR
+
+       # If the 'zfs key' command isn't availible, exit right here.
+       "${ZFS}" 2>&1 | grep -q 'key -l ' || return 0
+
+       # Check if filesystem is encrypted. If not, exit right here.
+       [ "$(get_fs_value "$fs" encryption)" != "off" ] || return 0
+
+       [ "$quiet" != "y" ] && \
+           zfs_log_begin_msg "Loading crypto wrapper key for $fs"
+
+       # Just make sure that ALL crypto modules module is loaded.
+       # Simplest just to load all...
+       for mod in sun-ccm sun-gcm sun-ctr
+       do
+               [ "$quiet" != "y" ] && zfs_log_progress_msg "${mod} "
+
+               ZFS_CMD="/sbin/modprobe $mod"
+               ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
+               ZFS_ERROR="$?"
+
+               if [ "${ZFS_ERROR}" != 0 ]
+               then
+                       [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+                       disable_plymouth
+                       echo ""
+                       echo "Command: $ZFS_CMD"
+                       echo "Message: $ZFS_STDERR"
+                       echo "Error: $ZFS_ERROR"
+                       echo ""
+                       echo "Failed to load $mod module."
+                       echo "Please verify that it is availible on the initrd image"
+                       echo "(without it it won't be possible to unlock the filesystem)"
+                       echo "and rerun:  $ZFS_CMD"
+                       /bin/sh
+               else
+                       [ "$quiet" != "y" ] && zfs_log_end_msg
+               fi
+       done
+
+       # If the key isn't availible, then this will fail!
+       ZFS_CMD="${ZFS} key -l -r $fs"
+       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
+       ZFS_ERROR="$?"
+
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to load zfs encryption wrapper key (s)."
+               echo "Please verify dataset property 'keysource' for datasets"
+               echo "and rerun:  $ZFS_CMD"
+               /bin/sh
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Destroy a given filesystem.
+destroy_fs()
+{
+       local fs="$1"
+       local ZFS_CMD ZFS_STDERR ZFS_ERROR
+
+       [ "$quiet" != "y" ] && \
+           zfs_log_begin_msg "Destroying '$fs'"
+
+       ZFS_CMD="${ZFS} destroy $fs"
+       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
+       ZFS_ERROR="$?"
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to destroy '$fs'. Please make sure that '$fs' is not availible."
+               echo "Hint: Try:  zfs destroy -Rfn $fs"
+               echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
+               /bin/sh
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Clone snapshot $1 to destination filesystem $2
+# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
+# manual controll over it's mounting (i.e., make sure it's not automatically
+# mounted with a 'zfs mount -a' in the init/systemd scripts).
+clone_snap()
+{
+       local snap="$1"
+       local destfs="$2"
+       local mountpoint="$3"
+       local ZFS_CMD ZFS_STDERR ZFS_ERROR
+
+       [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
+
+       # Clone the snapshot into a dataset we can boot from
+       # + We don't want this filesystem to be automatically mounted, we
+       #   want controll over this here and nowhere else.
+       # + We don't need any mountpoint set for the same reason.
+       # We use the 'org.zol:mountpoint' property to remember the mountpoint.
+       ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
+       ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
+       ZFS_CMD="${ZFS_CMD} $snap $destfs"
+       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
+       ZFS_ERROR="$?"
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to clone snapshot."
+               echo "Make sure that the any problems are corrected and then make sure"
+               echo "that the dataset '$destfs' exists and is bootable."
+               /bin/sh
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Rollback a given snapshot.
+rollback_snap()
+{
+       local snap="$1"
+       local ZFS_CMD ZFS_STDERR ZFS_ERROR
+
+       [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
+
+       ZFS_CMD="${ZFS} rollback -Rf $snap"
+       ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
+       ZFS_ERROR="$?"
+       if [ "${ZFS_ERROR}" != 0 ]
+       then
+               [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
+
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "Failed to rollback snapshot."
+               /bin/sh
+       else
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       return 0
+}
+
+# Get a list of snapshots, give them as a numbered list
+# to the user to choose from.
+ask_user_snap()
+{
+       local fs="$1"
+       local i=1
+       local SNAP snapnr snap debug
+
+       # We need to temporarily disable debugging. Set 'debug' so we
+       # remember to enabled it again.
+       if [ -n "${ZFS_DEBUG}" ]; then
+               unset ZFS_DEBUG
+               set +x
+               debug=1
+       fi
+
+       # Because we need the resulting snapshot, which is sent on
+       # stdout to the caller, we use stderr for our questions.
+       echo "What snapshot do you want to boot from?" > /dev/stderr
+       while read snap; do
+           echo "  $i: ${snap}" > /dev/stderr
+           eval `echo SNAP_$i=$snap`
+           i=$((i + 1))
+       done <<EOT
+$("${ZFS}" list -H -oname -tsnapshot "${fs}")
+EOT
+
+       echo -n "  Snap nr [0-$((i-1))]? " > /dev/stderr
+       read snapnr
+
+       # Reenable debugging.
+       if [ -n "${debug}" ]; then
+               ZFS_DEBUG=1
+               set -x
+       fi
+
+       echo "$(eval echo "$"SNAP_$snapnr)"
+}
+
+setup_snapshot_booting()
+{
+       local snap="$1"
+       local s destfs subfs mountpoint retval=0 filesystems fs
+
+       # Make sure that the snapshot specified actually exist.
+       if [ ! $(get_fs_value "${snap}" type) ]
+       then
+               # Snapshot does not exist (...@<null> ?)
+               # ask the user for a snapshot to use.
+               snap="$(ask_user_snap "${snap%%@*}")"
+       fi
+
+       # Separate the full snapshot ('$snap') into it's filesystem and
+       # snapshot names. Would have been nice with a split() function..
+       rootfs="${snap%%@*}"
+       snapname="${snap##*@}"  
+       ZFS_BOOTFS="${rootfs}_${snapname}"
+
+       if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
+       then
+               # If the destination dataset for the clone
+               # already exists, destroy it. Recursivly
+               if [ $(get_fs_value "${rootfs}_${snapname}" type) ]; then
+                       filesystems=$("${ZFS}" list -oname -tfilesystem -H \
+                           -r "${ZFS_BOOTFS}" | sort -r)
+                       for fs in $filesystems; do
+                               destroy_fs "${fs}"
+                       done
+               fi
+       fi
+
+       # Get all snapshots, recursivly (might need to clone /usr, /var etc
+       # as well).
+       for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
+           grep "${snapname}")
+       do
+               if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
+               then
+                       # Rollback snapshot
+                       rollback_snap "$s" || retval=$((retval + 1))
+               else
+                       # Setup a destination filesystem name.
+                       # Ex: Called with 'rpool/ROOT/debian@snap2'
+                       #       rpool/ROOT/debian@snap2         => rpool/ROOT/debian_snap2
+                       #       rpool/ROOT/debian/boot@snap2    => rpool/ROOT/debian_snap2/boot
+                       #       rpool/ROOT/debian/usr@snap2     => rpool/ROOT/debian_snap2/usr
+                       #       rpool/ROOT/debian/var@snap2     => rpool/ROOT/debian_snap2/var
+                       subfs="${s##$rootfs}"
+                       subfs="${subfs%%@$snapname}"
+
+                       destfs="${rootfs}_${snapname}" # base fs.
+                       [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
+
+                       # Get the mountpoint of the filesystem, to be used
+                       # with clone_snap(). If legacy or none, then use
+                       # the sub fs value.
+                       mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
+                       if [ "$mountpoint" = "legacy" -o \
+                           "$mountpoint" = "none" ]
+                       then
+                               if [ -n "${subfs}" ]; then
+                                       mountpoint="${subfs}"
+                               else
+                                       mountpoint="/"
+                               fi
+                       fi
+
+                       # Clone the snapshot into its own
+                       # filesystem
+                       clone_snap "$s" "${destfs}" "${mountpoint}" || \
+                           retval=$((retval + 1))
+               fi
+       done
+
+       # If we haven't return yet, we have a problem...
+       return "${retval}"
+}
+
+# ================================================================
+
+# This is the main function.
+mountroot()
+{
+       local snaporig
+       local snapsub
+       local destfs
+
+       # ----------------------------------------------------------------
+       # I N I T I A L   S E T U P
+
+       # ------------
+       # Run the pre-mount scripts from /scripts/local-top.
+       pre_mountroot
+
+       # ------------
+       # Source the default setup variables.
+       [ -r '/etc/default/zfs' ] && . /etc/default/zfs
+
+       # ------------
+       # Support debug option
+       if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
+       then
+               ZFS_DEBUG=1
+               mkdir /var/log
+               #exec 2> /var/log/boot.debug
+               set -x
+       fi
+
+       # ------------
+       # Load ZFS module etc.
+       if ! load_module_initrd; then
+               disable_plymouth
+               echo ""
+               echo "Failed to load ZFS modules."
+               echo "Manually load the modules and exit."
+               /bin/sh
+       fi
+
+       # ------------
+       # Look for the cache file (if any).
+       [ ! -f ${ZPOOL_CACHE} ] && unset ZPOOL_CACHE
+
+       # ------------
+       # Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
+       #                'root' is for Redhat/Fedora (etc),
+       #                'REAL_ROOT' is for Gentoo
+       if [ -z "$ROOT" ]
+       then
+               [ -n "$root" ] && ROOT=${root}
+
+               [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
+       fi
+
+       # ------------
+       # Where to mount the root fs in the initrd - set outside this script
+       # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
+       #                'NEWROOT' is for RedHat/Fedora (etc),
+       #                'NEW_ROOT' is for Gentoo
+       if [ -z "$rootmnt" ]
+       then
+               [ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
+
+               [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
+       fi
+
+       # ------------
+       # No longer set in the defaults file, but it could have been set in
+       # get_pools() in some circumstances. If it's something, but not 'yes',
+       # it's no good to us.
+       [ -n "$USE_DISK_BY_ID" -a "$USE_DISK_BY_ID" != 'yes' ] && \
+           unset USE_DISK_BY_ID
+
+       # ----------------------------------------------------------------
+       # P A R S E   C O M M A N D   L I N E   O P T I O N S
+
+       # This part is the really ugly part - there's so many options and permutations
+       # 'out there', and if we should make this the 'primary' source for ZFS initrd
+       # scripting, we need/should support them all.
+       #
+       # Supports the following kernel command line argument combinations
+       # (in this order - first match win):
+       #
+       #       rpool=<pool>                    (tries to finds bootfs automatically)
+       #       bootfs=<pool>/<dataset>         (uses this for rpool - first part)
+       #       rpool=<pool> bootfs=<pool>/<dataset>
+       #       -B zfs-bootfs=<pool>/<fs>       (uses this for rpool - first part)
+       #       rpool=rpool                     (default if none of the above is used)
+       #       root=<pool>/<dataset>           (uses this for rpool - first part)
+       #       root=ZFS=<pool>/<dataset>       (uses this for rpool - first part, without 'ZFS=')
+       #       root=zfs:AUTO                   (tries to detect both pool and rootfs
+       #       root=zfs:<pool>/<dataset>       (uses this for rpool - first part, without 'zfs:')
+       #
+       # Option <dataset> could also be <snapshot>
+
+       # ------------
+       # Support force option
+       # In addition, setting one of zfs_force, zfs.force or zfsforce to
+       # 'yes', 'on' or '1' will make sure we force import the pool.
+       # This should (almost) never be needed, but it's here for
+       # completeness.
+       ZPOOL_FORCE=""
+       if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
+       then
+               ZPOOL_FORCE="-f"
+       fi
+
+       # ------------
+       # Look for 'rpool' and 'bootfs' parameter
+       [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
+       [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
+
+       # ------------
+       # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
+       # 'ROOT'
+       [ -n "$ROOT" -a -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
+
+       # ------------
+       # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
+       # NOTE: Only use the pool name and dataset. The rest is not
+       #       supported by ZoL (whatever it's for).
+       if [ -z "$ZFS_RPOOL" ]
+       then
+               # The ${zfs-bootfs} variable is set at the kernel commmand
+               # line, usually by GRUB, but it cannot be referenced here
+               # directly because bourne variable names cannot contain a
+               # hyphen.
+               #
+               # Reassign the variable by dumping the environment and
+               # stripping the zfs-bootfs= prefix.  Let the shell handle
+               # quoting through the eval command.
+               eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
+       fi
+
+       # ------------
+       # No root fs or pool specified - do auto detect.
+       if [ -z "$ZFS_RPOOL" -a -z "${ZFS_BOOTFS}" ]
+       then
+               # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
+               # which will be caught later
+               ROOT=zfs:AUTO
+       fi
+
+       # ----------------------------------------------------------------
+       # F I N D   A N D   I M P O R T   C O R R E C T   P O O L
+
+       # ------------
+       if [ "$ROOT" = "zfs:AUTO" ]
+       then
+               # Try to detect both pool and root fs.
+
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Attempting to import additional pools."
+
+               # Get a list of pools available for import
+               if [ -n "$ZFS_RPOOL" ]
+               then
+                       # We've specified a pool - check only that
+                       POOLS=$ZFS_RPOOL
+               else
+                       POOLS=$(get_pools)
+               fi
+
+               OLD_IFS="$IFS" ; IFS=";"
+               for pool in $POOLS
+               do
+                       [ -z "$pool" ] && continue
+
+                       import_pool "$pool"
+                       find_rootfs "$pool"
+               done
+               IFS="$OLD_IFS"
+
+               [ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
+       else
+               # No auto - use value from the command line option.
+
+               # Strip 'zfs:' and 'ZFS='.
+               ZFS_BOOTFS="${ROOT#*[:=]}"
+
+               # Stip everything after the first slash.
+               ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
+       fi
+
+       # Import the pool (if not already done so in the AUTO check above).
+       if [ -n "$ZFS_RPOOL" -a -z "${POOL_IMPORTED}" ]
+       then
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
+
+               import_pool "${ZFS_RPOOL}"
+               find_rootfs "${ZFS_RPOOL}"
+
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+
+       if [ -z "${POOL_IMPORTED}" ]
+       then
+               # No pool imported, this is serious!
+               disable_plymouth
+               echo ""
+               echo "Command: $ZFS_CMD"
+               echo "Message: $ZFS_STDERR"
+               echo "Error: $ZFS_ERROR"
+               echo ""
+               echo "No pool imported. Manually import the root pool"
+               echo "at the command prompt and then exit."
+               echo "Hint: Try:  zpool import -R ${rootmnt} -N ${ZFS_RPOOL}"
+               /bin/sh
+       fi
+
+       # ----------------------------------------------------------------
+       # P R E P A R E   R O O T   F I L E S Y S T E M
+
+       if [ -n "${ZFS_BOOTFS}" ]
+       then
+               # Booting from a snapshot?
+               # Will overwrite the ZFS_BOOTFS variable like so:
+               #   rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
+               echo "${ZFS_BOOTFS}" | grep -q '@' && \
+                   setup_snapshot_booting "${ZFS_BOOTFS}"
+       fi
+
+       if [ -z "${ZFS_BOOTFS}" ]
+       then
+               # Still nothing! Let the user sort this out.
+               disable_plymouth
+               echo ""
+               echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
+               echo "       not specified on the kernel command line."
+               echo ""
+               echo "Manually mount the root filesystem on $rootmnt and then exit."
+               echo "Hint: Try:  mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
+               /bin/sh
+       fi
+
+       # ----------------------------------------------------------------
+       # M O U N T   F I L E S Y S T E M S
+
+       # * Ideally, the root filesystem would be mounted like this:
+       #
+       #     zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
+       #     zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
+       #
+       #   but the MOUNTPOINT prefix is preserved on descendent filesystem
+       #   after the pivot into the regular root, which later breaks things
+       #   like `zfs mount -a` and the /etc/mtab refresh.
+       #
+       # * Mount additional filesystems required
+       #   Such as /usr, /var, /usr/local etc.
+       #   NOTE: Mounted in the order specified in the
+       #         ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
+
+       # Go through the complete list (recursivly) of all filesystems below
+       # the real root dataset
+       filesystems=$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")
+       for fs in $filesystems $ZFS_INITRD_ADDITIONAL_DATASETS
+       do
+               mount_fs "$fs"
+       done
+
+       # ------------
+       # Debugging information
+       if [ -n "${ZFS_DEBUG}" ]
+       then
+               #exec 2>&1-
+
+               echo "DEBUG: imported pools:"
+               "${ZPOOL}" list -H
+               echo
+
+               echo "DEBUG: mounted ZFS filesystems:"
+               mount | grep zfs
+               echo
+
+               echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
+               echo -n "   'c' for shell, 'r' for reboot, 'ENTER' to continue. "
+               read b
+
+               [ "$b" = "c" ] && /bin/sh
+               [ "$b" = "r" ] && reboot -f
+
+               set +x
+       fi
+
+       # ------------
+       # Run local bottom script
+       if type run_scripts > /dev/null 2>&1 && \
+           [ -f "/scripts/local-bottom" -o -d "/scripts/local-bottom" ]
+       then
+               [ "$quiet" != "y" ] && \
+                   zfs_log_begin_msg "Running /scripts/local-bottom"
+               run_scripts /scripts/local-bottom
+               [ "$quiet" != "y" ] && zfs_log_end_msg
+       fi
+}
index 04b0033f6d30350d5f8ff54fded0248a4a554a8d..50b61f01074be5657bd7f620270422426d723800 100644 (file)
@@ -189,6 +189,19 @@ Requires:       dracut
 This package contains a dracut module used to construct an initramfs
 image which is ZFS aware.
 
+%if 0%{?_initramfs}
+%package initramfs
+Summary:        Initramfs module
+Group:          System Environment/Kernel
+Requires:       %{name}%{?_isa} = %{version}-%{release}
+Requires:       %{name} = %{version}-%{release}
+Requires:       initramfs-tools
+
+%description initramfs
+This package contains a initramfs module used to construct an initramfs
+image which is ZFS aware.
+%endif
+
 %prep
 %if %{with debug}
     %define debug --enable-debug
@@ -303,6 +316,16 @@ exit 0
 %doc dracut/README.dracut.markdown
 %{_dracutdir}/modules.d/*
 
+%if 0%{?_initramfs}
+%files initramfs
+%doc contrib/initramfs/README.initramfs.markdown
+/usr/share/initramfs-tools/*
+%else
+# Since we're not building the initramfs package,
+# ignore those files.
+%exclude /usr/share/initramfs-tools
+%endif
+
 %changelog
 * Wed Apr  8 2015 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.4-1
 - Released 0.6.4-1