;;
set|inherit)
- # Only act if the mountpoint or canmount setting is altered.
- case "${ZEVENT_HISTORY_INTERNAL_STR}" in
- canmount=*|mountpoint=*) ;;
+ # Only act if one of the tracked properties is altered.
+ case "${ZEVENT_HISTORY_INTERNAL_STR%%=*}" in
+ canmount|mountpoint|atime|relatime|devices|exec| \
+ readonly|setuid|nbmand) ;;
*) exit 0 ;;
esac
;;
zed_lock zfs-list
trap abort_alter EXIT
-"${ZFS}" list -H -tfilesystem -oname,mountpoint,canmount -r "${ZEVENT_POOL}" \
- >"${FSLIST_TMP}"
+PROPS="name,mountpoint,canmount,atime,relatime,devices,exec,readonly"
+PROPS="${PROPS},setuid,nbmand"
+
+"${ZFS}" list -H -t filesystem -o $PROPS -r "${ZEVENT_POOL}" > "${FSLIST_TMP}"
# Sort the output so that it is stable
sort "${FSLIST_TMP}" -o "${FSLIST_TMP}"
mkdir -p "${req_dir}"
# All needed information about each ZFS is available from
-# zfs list -H -t filesystem -oname,mountpoint,canmount
+# zfs list -H -t filesystem -o <properties>
# cached in $FSLIST, and each line is processed by the following function:
+# See the list below for the properties and their order
process_line() {
+ # -o name
+ dataset="${1}"
+ p_mountpoint="${2}"
+ p_canmount="${3}"
+ p_atime="${4}"
+ p_relatime="${5}"
+ p_devices="${6}"
+ p_exec="${7}"
+ p_readonly="${8}"
+ p_setuid="${9}"
+ p_nbmand="${10}"
+
# Check for canmount=off .
- if [ "${3}" = "off" ] ; then
+ if [ "${p_canmount}" = "off" ] ; then
return
- elif [ "${3}" = "noauto" ] ; then
+ elif [ "${p_canmount}" = "noauto" ] ; then
# Don't let a noauto marked mountpoint block an "auto" market mountpoint
return
- elif [ "${3}" = "on" ] ; then
+ elif [ "${p_canmount}" = "on" ] ; then
: # This is OK
else
do_fail "invalid canmount"
fi
# Check for legacy and blank mountpoints.
- if [ "${2}" = "legacy" ] ; then
+ if [ "${p_mountpoint}" = "legacy" ] ; then
return
- elif [ "${2}" = "none" ] ; then
+ elif [ "${p_mountpoint}" = "none" ] ; then
return
- elif [ "${2%"${2#?}"}" != "/" ] ; then
+ elif [ "${p_mountpoint%"${p_mountpoint#?}"}" != "/" ] ; then
do_fail "invalid mountpoint $*"
fi
# Escape the mountpoint per systemd policy.
- mountfile="$(systemd-escape "${2#?}").mount"
+ mountfile="$(systemd-escape "${p_mountpoint#?}").mount"
+
+ # Parse options
+ # see lib/libzfs/libzfs_mount.c:zfs_add_options
+ opts=""
+
+ # atime
+ if [ "${p_atime}" = on ] ; then
+ # relatime
+ if [ "${p_relatime}" = on ] ; then
+ opts="${opts},atime,relatime"
+ elif [ "${p_relatime}" = off ] ; then
+ opts="${opts},atime,strictatime"
+ else
+ printf 'zfs-mount-generator.sh: (%s) invalid relatime\n' \
+ "${dataset}" >/dev/kmsg
+ fi
+ elif [ "${p_atime}" = off ] ; then
+ opts="${opts},noatime"
+ else
+ printf 'zfs-mount-generator.sh: (%s) invalid atime\n' \
+ "${dataset}" >/dev/kmsg
+ fi
+
+ # devices
+ if [ "${p_devices}" = on ] ; then
+ opts="${opts},dev"
+ elif [ "${p_devices}" = off ] ; then
+ opts="${opts},nodev"
+ else
+ printf 'zfs-mount-generator.sh: (%s) invalid devices\n' \
+ "${dataset}" >/dev/kmsg
+ fi
+
+ # exec
+ if [ "${p_exec}" = on ] ; then
+ opts="${opts},exec"
+ elif [ "${p_exec}" = off ] ; then
+ opts="${opts},noexec"
+ else
+ printf 'zfs-mount-generator.sh: (%s) invalid exec\n' \
+ "${dataset}" >/dev/kmsg
+ fi
+
+ # readonly
+ if [ "${p_readonly}" = on ] ; then
+ opts="${opts},ro"
+ elif [ "${p_readonly}" = off ] ; then
+ opts="${opts},rw"
+ else
+ printf 'zfs-mount-generator.sh: (%s) invalid readonly\n' \
+ "${dataset}" >/dev/kmsg
+ fi
+
+ # setuid
+ if [ "${p_setuid}" = on ] ; then
+ opts="${opts},suid"
+ elif [ "${p_setuid}" = off ] ; then
+ opts="${opts},nosuid"
+ else
+ printf 'zfs-mount-generator.sh: (%s) invalid setuid\n' \
+ "${dataset}" >/dev/kmsg
+ fi
+
+ # nbmand
+ if [ "${p_nbmand}" = on ] ; then
+ opts="${opts},mand"
+ elif [ "${p_nbmand}" = off ] ; then
+ opts="${opts},nomand"
+ else
+ printf 'zfs-mount-generator.sh: (%s) invalid nbmand\n' \
+ "${dataset}" >/dev/kmsg
+ fi
# If the mountpoint has already been created, give it precedence.
if [ -e "${dest_norm}/${mountfile}" ] ; then
- printf 'zfs-mount-generator.sh: %s.mount already exists\n' "${2}" \
+ printf 'zfs-mount-generator.sh: %s already exists\n' "${mountfile}" \
>/dev/kmsg
return
fi
Wants=zfs-import.target
[Mount]
-Where=${2}
-What=${1}
+Where=${p_mountpoint}
+What=${dataset}
Type=zfs
-Options=zfsutil,auto
+Options=defaults${opts},zfsutil
EOF
- # Finally, create the appropriate dependencies based on the ZFS properties.
- [ "$3" = "on" ] & ln -s "../${mountfile}" "${req_dir}"
+ # Finally, create the appropriate dependency
+ ln -s "../${mountfile}" "${req_dir}"
}
# Feed each line into process_line
vdev_id.8 \
zdb.8 \
zfs.8 \
- zfs-mount-generator.8 \
zfs-program.8 \
zgenhostid.8 \
zinject.8 \
zstreamdump.8
nodist_man_MANS = \
- zed.8
+ zed.8 \
+ zfs-mount-generator.8
EXTRA_DIST = \
- zed.8.in
+ zed.8.in \
+ zfs-mount-generator.8.in
-zed.8: $(srcdir)/zed.8.in
-
-do_subst = $(SED) \
- -e 's|@libexecdir[@]|$(libexecdir)|g' \
- -e 's|@runstatedir[@]|$(runstatedir)|g' \
- -e 's|@sysconfdir[@]|$(sysconfdir)|g'
-
-$(nodist_man_MANS): Makefile
- $(RM) $@ $@.tmp
- srcdir=''; \
- test -f ./$@.in || srcdir=$(srcdir)/; \
- $(do_subst) $${srcdir}$@.in >$@.tmp
- mv $@.tmp $@
+$(nodist_man_MANS): %: %.in
+ -$(SED) -e 's,@libexecdir\@,$(libexecdir),g' \
+ -e 's,@runstatedir\@,$(runstatedir),g' \
+ -e 's,@sysconfdir\@,$(sysconfdir),g' \
+ $< >'$@'
install-data-local:
$(INSTALL) -d -m 0755 "$(DESTDIR)$(mandir)/man8"
.TH "ZFS\-MOUNT\-GENERATOR" "8" "ZFS" "zfs-mount-generator" "\""
.SH "NAME"
-zfs\-mount\-generator \- generates systemd mount units for zfs
+zfs\-mount\-generator \- generates systemd mount units for ZFS
.SH SYNOPSIS
.B /lib/systemd/system-generators/zfs\-mount\-generator
.sp
.SH DESCRIPTION
-The zfs\-mount\-generator implements the \fBGenerators Specification\fP
+zfs\-mount\-generator implements the \fBGenerators Specification\fP
of
.BR systemd (1),
and is called during early boot to generate
of the command
.PP
.RS 4
-zfs list -H -oname,mountpoint,canmount
+zfs list -H -o name,mountpoint,canmount,atime,relatime,devices,exec,readonly,setuid,nbmand
.RE
.PP
for datasets that should be mounted by systemd, should be kept
.RE
.PP
.sp
+.SH EXAMPLE
+To begin, enable tracking for the pool:
+.PP
+.RS 4
+touch
+.RI @sysconfdir@/zfs/zfs-list.cache/ POOLNAME
+.RE
+.PP
+Then, enable the tracking ZEDLET:
+.PP
+.RS 4
+ln -s "@libexecdir@/zfs/zed.d/history_event-zfs-list-cacher.sh" "@sysconfdir@/zfs/zed.d/"
+
+systemctl enable zed.service
+
+systemctl restart zed.service
+.RE
+.PP
+Force the running of the ZEDLET by setting canmount=on for at least one dataset in the pool:
+.PP
+.RS 4
+zfs set canmount=on
+.I DATASET
+.RE
+.PP
+This forces an update to the stale cache file.
+.sp
.SH SEE ALSO
.BR zfs (5)
.BR zfs-events (5)