diff --git a/.SRCINFO b/.SRCINFO
index b4ca226..287923e 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -18,11 +18,11 @@ pkgbase = zfs-utils
 	validpgpkeys = C33DF142657ED1F7C328A2960AB9E991C6AF658B
 	sha256sums = 6462e63e185de6ff10c64ffa6ed773201a082f9dd13e603d7e8136fcb4aca71b
 	sha256sums = SKIP
-	sha256sums = da1cdc045d144d2109ec7b5d97c53a69823759d8ecff410e47c3a66b69e6518d
-	sha256sums = 9c20256093997f7cfa9e7eb5d85d4a712d528a6ff19ef35b83ad03fb1ceae3bc
+	sha256sums = d19476c6a599ebe3415680b908412c8f19315246637b3a61e811e2e0961aea78
+	sha256sums = 15b5acea44225b4364ec6472a08d3d48666d241fe84c142e1171cd3b78a5584f
 	b2sums = 9c85c3eb72f3bb39bc4fd44aaa80338ca197a4e8183436fee73cd56705abfdaecfaf1b6fbe8dd508ccce707c8259c7ab6e1733b60b17757f0a7ff92d4e52bbad
 	b2sums = SKIP
-	b2sums = 570e995bba07ea0fb424dff191180b8017b6469501964dc0b70fd51e338a4dad260f87cc313489866cbfd1583e4aac2522cf7309c067cc5314eb83c37fe14ff3
-	b2sums = e14366cbf680e3337d3d478fe759a09be224c963cc5207bee991805312afc49a49e6691f11e5b8bbe8dde60e8d855bd96e7f4f48f24a4c6d4a8c1bab7fc2bba0
+	b2sums = f7c78e5a0ce887e89e5cdc52515381d647a51586cb05c52a900e1307520f6f0fa828f8f5fd5a30823b233dcd79f0496375b21d044103e1d765e20f728c2d0fee
+	b2sums = 779c864611249c3f21d1864508d60cfe5e0f5541d74fb3093c6bdfa56be2c76f386ac1690d363beaee491c5132f5f6dbc01553aa408cda579ebca74b0e0fd1d0
 
 pkgname = zfs-utils
diff --git a/PKGBUILD b/PKGBUILD
index 4426fcb..a30746b 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -15,12 +15,12 @@ source=("https://github.com/zfsonlinux/zfs/releases/download/zfs-${pkgver}/zfs-$
         "zfs.initcpio.hook")
 sha256sums=('6462e63e185de6ff10c64ffa6ed773201a082f9dd13e603d7e8136fcb4aca71b'
             'SKIP'
-            'da1cdc045d144d2109ec7b5d97c53a69823759d8ecff410e47c3a66b69e6518d'
-            '9c20256093997f7cfa9e7eb5d85d4a712d528a6ff19ef35b83ad03fb1ceae3bc')
+            'd19476c6a599ebe3415680b908412c8f19315246637b3a61e811e2e0961aea78'
+            '15b5acea44225b4364ec6472a08d3d48666d241fe84c142e1171cd3b78a5584f')
 b2sums=('9c85c3eb72f3bb39bc4fd44aaa80338ca197a4e8183436fee73cd56705abfdaecfaf1b6fbe8dd508ccce707c8259c7ab6e1733b60b17757f0a7ff92d4e52bbad'
         'SKIP'
-        '570e995bba07ea0fb424dff191180b8017b6469501964dc0b70fd51e338a4dad260f87cc313489866cbfd1583e4aac2522cf7309c067cc5314eb83c37fe14ff3'
-        'e14366cbf680e3337d3d478fe759a09be224c963cc5207bee991805312afc49a49e6691f11e5b8bbe8dde60e8d855bd96e7f4f48f24a4c6d4a8c1bab7fc2bba0')
+        'f7c78e5a0ce887e89e5cdc52515381d647a51586cb05c52a900e1307520f6f0fa828f8f5fd5a30823b233dcd79f0496375b21d044103e1d765e20f728c2d0fee'
+        '779c864611249c3f21d1864508d60cfe5e0f5541d74fb3093c6bdfa56be2c76f386ac1690d363beaee491c5132f5f6dbc01553aa408cda579ebca74b0e0fd1d0')
 validpgpkeys=('4F3BA9AB6D1F8D683DC2DFB56AD860EED4598027'  # Tony Hutter (GPG key for signing ZFS releases) <hutter2@llnl.gov>
               'C33DF142657ED1F7C328A2960AB9E991C6AF658B') # Brian Behlendorf <behlendorf1@llnl.gov>
 backup=('etc/default/zfs'
diff --git a/zfs.initcpio.hook b/zfs.initcpio.hook
index e0f4cfb..4770c08 100644
--- a/zfs.initcpio.hook
+++ b/zfs.initcpio.hook
@@ -6,6 +6,7 @@
 #
 ZPOOL_FORCE=""
 ZPOOL_IMPORT_FLAGS=""
+ZFS_BOOT_ONLY=""
 
 zfs_get_bootfs () {
     for zfs_dataset in $(zpool list -H -o bootfs); do
@@ -25,13 +26,86 @@ zfs_get_bootfs () {
     return 1
 }
 
+zfs_decrypt_fs() {
+    dataset=$1
+
+    # Make sure dataset is encrypted; get fails if ZFS does not support encryption
+    encryption="$(zfs get -H -o value encryption "${dataset}" 2>/dev/null)" || return 0
+    [ "${encryption}" != "off" ] || return 0
+
+    # Make sure the dataset is locked
+    keystatus="$(zfs get -H -o value keystatus "${dataset}")" || return 0
+    [ "${keystatus}" != "available" ] || return 0
+
+    # Make sure the encryptionroot is sensible
+    encryptionroot="$(zfs get -H -o value encryptionroot "${dataset}")" || return 0
+    [ "${encryptionroot}" != "-" ] || return 0
+
+    # Export encryption root to be used by other hooks (SSH)
+    echo "${encryptionroot}" > /.encryptionroot
+
+    prompt_override=""
+    if keylocation="$(zfs get -H -o value keylocation "${encryptionroot}")"; then
+        # If key location is a file, determine if it can by overridden by prompt
+        if [ "${keylocation}" != "prompt" ]; then
+            if keyformat="$(zfs get -H -o value keyformat "${encryptionroot}")"; then
+                [ "${keyformat}" = "passphrase" ] && prompt_override="yes"
+            fi
+        fi
+
+        # If key location is a local file, check if file exists
+        if [ "${keylocation%%://*}" = "file" ]; then
+            keyfile="${keylocation#file://}"
+
+            # If file not yet exist, wait for udev to create device nodes
+            if [ ! -r "${keyfile}" ]; then
+                udevadm settle
+
+                # Wait for udev up to 10 seconds
+                if [ ! -r "${keyfile}" ]; then
+                    echo "Waiting for key ${keyfile} for ${encryptionroot}..."
+                    for _ in $(seq 1 20); do
+                        sleep 0.5s
+                        [ -r "${keyfile}" ] && break
+                    done
+                fi
+
+                if [ ! -r "${keyfile}" ]; then
+                    echo "Key ${keyfile} for ${encryptionroot} hasn't appeared. Trying anyway."
+                fi
+            fi
+        fi
+    fi
+
+    # Loop until key is loaded here or by another vector (SSH, for instance)
+    while [ "$(zfs get -H -o value keystatus "${encryptionroot}")" != "available" ]; do
+        # Try the default loading mechanism
+        zfs load-key "${encryptionroot}" && break
+
+        # Load failed, try a prompt if the failure was not a prompt
+        if [ -n "${prompt_override}" ]; then
+            echo "Unable to load key ${keylocation}; please type the passphrase"
+            echo "To retry the file, interrupt now or repeatedly input a wrong passphrase"
+            zfs load-key -L prompt "${encryptionroot}" && break
+        fi
+
+        # Throttle retry attempts
+        sleep 2
+    done
+
+    if [ -f /.encryptionroot ]; then
+        rm /.encryptionroot
+    fi
+}
+
 zfs_mount_handler () {
     if [ "${ZFS_DATASET}" = "bootfs" ] ; then
         if ! zfs_get_bootfs ; then
             # Lets import everything and try again
             zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
             if ! zfs_get_bootfs ; then
-                die "ZFS: Cannot find bootfs."
+                err "ZFS: Cannot find bootfs."
+                exit 1
             fi
         fi
     fi
@@ -39,7 +113,7 @@ zfs_mount_handler () {
     local pool="${ZFS_DATASET%%/*}"
     local rwopt_exp="${rwopt:-ro}"
 
-    if ! zpool list -H "${pool}" >/dev/null 2>&1; then
+    if ! zpool list -H "${pool}" > /dev/null 2>&1; then
         if [ ! "${rwopt_exp}" = "rw" ]; then
             msg "ZFS: Importing pool ${pool} readonly."
             ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
@@ -48,48 +122,68 @@ zfs_mount_handler () {
         fi
 
         if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N "${pool}" ${ZPOOL_FORCE} ; then
-            die "ZFS: Unable to import pool ${pool}."
+            err "ZFS: Unable to import pool ${pool}."
+            exit 1
         fi
     fi
 
     local node="$1"
-    local tab_file="${node}/etc/fstab"
+    local rootmnt=$(zfs get -H -o value mountpoint "${ZFS_DATASET}")
+    local tab_file="/etc/fstab"
     local zfs_datasets="$(zfs list -H -o name -t filesystem -r ${ZFS_DATASET})"
 
     # Mount the root, and any child datasets
     for dataset in ${zfs_datasets}; do
         mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
-        case ${mountpoint} in
-            "none")
-                # skip this line/dataset.
-                ;;
-            "legacy")
-                if [ -f "${tab_file}" ]; then
-                    if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
-                        opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
-                        mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
-                        mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
-                    fi
+        canmount=$(zfs get -H -o value canmount "${dataset}")
+        # skip dataset
+        [ ${dataset} != "${ZFS_DATASET}" -a \( ${canmount} = "off" -o ${canmount} = "noauto" -o ${mountpoint} = "none" \) ] && continue
+        if [ ${mountpoint} = "legacy" ]; then
+            if [ -f "${tab_file}" ]; then
+                if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
+                    opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
+                    mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
+                    zfs_decrypt_fs "${dataset}"
+                    mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
                 fi
-                ;;
-            *)
-                mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}${mountpoint}"
-                ;;
-        esac
+            fi
+        else
+            zfs_decrypt_fs "${dataset}"
+            mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}/${mountpoint##${rootmnt}}"
+        fi
     done
 }
 
-run_hook() {
+set_flags() {
     # Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
     [ ! "${zfs_force}" = "" ] && ZPOOL_FORCE="-f"
 
+    # Disable late hook, useful if we want to use zfs-import-cache.service instead
+    [ ! "${zfs_boot_only}" = "" ] && ZFS_BOOT_ONLY="1"
+
     # Add import directory to import command flags
     [ ! "${zfs_import_dir}" = "" ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
+    [ "${zfs_import_dir}" = "" ] && [ -f /etc/zfs/zpool.cache.org ] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -c /etc/zfs/zpool.cache.org"
+}
+
+run_hook() {
+    set_flags
 
     # Wait 15 seconds for ZFS devices to show up
     [ "${zfs_wait}" = "" ] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
 
-    [ "${root}" = "zfs" ] && mount_handler="zfs_mount_handler"
+    case ${root} in
+        # root=zfs
+        "zfs")
+            ZFS_DATASET="bootfs"
+            mount_handler="zfs_mount_handler"
+            ;;
+        # root=ZFS=... syntax (grub)
+        "ZFS="*)
+            mount_handler="zfs_mount_handler"
+            ZFS_DATASET="${root#*[=]}"
+            ;;
+    esac
 
     case ${zfs} in
         "")
@@ -98,22 +192,46 @@ run_hook() {
         auto|bootfs)
             ZFS_DATASET="bootfs"
             mount_handler="zfs_mount_handler"
+            local pool="[a-zA-Z][^ ]*"
             ;;
         *)
             ZFS_DATASET="${zfs}"
             mount_handler="zfs_mount_handler"
+            local pool="${ZFS_DATASET%%/*}"
             ;;
     esac
 
-    # Allow up to n seconds for zfs device to show up
-    for i in $(seq 1 ${ZFS_WAIT}); do
-        [ -c "/dev/zfs" ] && break
+    # Allow at least n seconds for zfs device to show up.  Especially
+    # when using zfs_import_dir instead of zpool.cache, the listing of
+    # available pools can be slow, so this loop must be top-tested to
+    # ensure we do one 'zpool import' pass after the timer has expired.
+    sleep ${ZFS_WAIT} & pid=$!
+    local break_after=0
+    while :; do
+        kill -0 $pid > /dev/null 2>&1 || break_after=1
+        if [ -c "/dev/zfs" ]; then
+            zpool import ${ZPOOL_IMPORT_FLAGS} | awk "
+                BEGIN     { pool_found=0; online=0; unavail=0 }
+                /^	${pool} .*/ { pool_found=1 }
+                /^\$/      { pool_found=0 }
+                /UNAVAIL/ { if (pool_found == 1) { unavail=1 } }
+                /ONLINE/  { if (pool_found == 1) { online=1 } }
+                END       { if (online == 1 && unavail != 1)
+                              { exit 0 }
+                            else
+                              { exit 1 }
+                          }" && break
+        fi
+        [ $break_after == 1 ] && break
         sleep 1
     done
+    kill $pid > /dev/null 2>&1
 }
 
 run_latehook () {
-    zpool import -N -a ${ZPOOL_FORCE}
+    set_flags
+    # only run zpool import, if flags were set (cache file found / zfs_import_dir specified) and zfs_boot_only is not set
+    [ ! "${ZPOOL_IMPORT_FLAGS}" = "" ] && [ "${ZFS_BOOT_ONLY}" = "" ] && zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
 }
 
 # vim:set ts=4 sw=4 ft=sh et:
diff --git a/zfs.initcpio.install b/zfs.initcpio.install
index 589b46b..fd3387f 100644
--- a/zfs.initcpio.install
+++ b/zfs.initcpio.install
@@ -22,7 +22,8 @@ build() {
         zstreamdump \
         /lib/udev/vdev_id \
         /lib/udev/zvol_id \
-        findmnt
+        findmnt \
+        udevadm
 
     map add_file \
         /lib/udev/rules.d/60-zvol.rules \
@@ -38,9 +39,9 @@ build() {
     # allow mount(8) to "autodetect" ZFS
     echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
 
-    [[ -f /etc/zfs/zpool.cache ]] && add_file "/etc/zfs/zpool.cache"
+    [[ -f /etc/zfs/zpool.cache ]] && cp "/etc/zfs/zpool.cache" "${BUILDROOT}/etc/zfs/zpool.cache.org"
     [[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"
-    [[ -f /etc/hostid ]] && add_file "/etc/hostid"
+    [[ -f /etc/fstab ]] && add_file "/etc/fstab"
 }
 
 help() {