4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
29 #include <sys/refcount.h>
30 #include <sys/nvpair.h>
32 #include <sys/kidmap.h>
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zfs_znode.h>
37 #include <sys/zfs_fuid.h>
40 * FUID Domain table(s).
42 * The FUID table is stored as a packed nvlist of an array
43 * of nvlists which contain an index, domain string and offset
45 * During file system initialization the nvlist(s) are read and
46 * two AVL trees are created. One tree is keyed by the index number
47 * and the other by the domain string. Nodes are never removed from
48 * trees, but new entries may be added. If a new entry is added then
49 * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
50 * be responsible for calling zfs_fuid_sync() to sync the changes to disk.
54 #define FUID_IDX "fuid_idx"
55 #define FUID_DOMAIN "fuid_domain"
56 #define FUID_OFFSET "fuid_offset"
57 #define FUID_NVP_ARRAY "fuid_nvlist"
59 typedef struct fuid_domain {
66 static char *nulldomain = "";
69 * Compare two indexes.
72 idx_compare(const void *arg1, const void *arg2)
74 const fuid_domain_t *node1 = arg1;
75 const fuid_domain_t *node2 = arg2;
77 if (node1->f_idx < node2->f_idx)
79 else if (node1->f_idx > node2->f_idx)
85 * Compare two domain strings.
88 domain_compare(const void *arg1, const void *arg2)
90 const fuid_domain_t *node1 = arg1;
91 const fuid_domain_t *node2 = arg2;
94 val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name);
97 return (val > 0 ? 1 : -1);
101 zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
103 avl_create(idx_tree, idx_compare,
104 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
105 avl_create(domain_tree, domain_compare,
106 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
110 * load initial fuid domain and idx trees. This function is used by
111 * both the kernel and zdb.
114 zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
115 avl_tree_t *domain_tree)
120 ASSERT(fuid_obj != 0);
121 VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
123 fuid_size = *(uint64_t *)db->db_data;
124 dmu_buf_rele(db, FTAG);
128 nvlist_t *nvp = NULL;
133 packed = kmem_alloc(fuid_size, KM_SLEEP);
134 VERIFY(dmu_read(os, fuid_obj, 0,
135 fuid_size, packed, DMU_READ_PREFETCH) == 0);
136 VERIFY(nvlist_unpack(packed, fuid_size,
138 VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
139 &fuidnvp, &count) == 0);
141 for (i = 0; i != count; i++) {
142 fuid_domain_t *domnode;
146 VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
148 VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
151 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
153 domnode->f_idx = idx;
154 domnode->f_ksid = ksid_lookupdomain(domain);
155 avl_add(idx_tree, domnode);
156 avl_add(domain_tree, domnode);
159 kmem_free(packed, fuid_size);
165 zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
167 fuid_domain_t *domnode;
171 while ((domnode = avl_destroy_nodes(domain_tree, &cookie)))
172 ksiddomain_rele(domnode->f_ksid);
174 avl_destroy(domain_tree);
176 while ((domnode = avl_destroy_nodes(idx_tree, &cookie)))
177 kmem_free(domnode, sizeof (fuid_domain_t));
178 avl_destroy(idx_tree);
182 zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
184 fuid_domain_t searchnode, *findnode;
187 searchnode.f_idx = idx;
189 findnode = avl_find(idx_tree, &searchnode, &loc);
191 return (findnode ? findnode->f_ksid->kd_name : nulldomain);
197 * Load the fuid table(s) into memory.
200 zfs_fuid_init(zfsvfs_t *zfsvfs)
202 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
204 if (zfsvfs->z_fuid_loaded) {
205 rw_exit(&zfsvfs->z_fuid_lock);
209 zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
211 (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
212 ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
213 if (zfsvfs->z_fuid_obj != 0) {
214 zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
215 zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
216 &zfsvfs->z_fuid_domain);
219 zfsvfs->z_fuid_loaded = B_TRUE;
220 rw_exit(&zfsvfs->z_fuid_lock);
224 * sync out AVL trees to persistent storage.
227 zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
234 fuid_domain_t *domnode;
238 if (!zfsvfs->z_fuid_dirty) {
242 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
245 * First see if table needs to be created?
247 if (zfsvfs->z_fuid_obj == 0) {
248 zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
249 DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
250 sizeof (uint64_t), tx);
251 VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
252 ZFS_FUID_TABLES, sizeof (uint64_t), 1,
253 &zfsvfs->z_fuid_obj, tx) == 0);
256 VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
258 numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
259 fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
260 for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
261 domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
262 VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
263 VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
264 domnode->f_idx) == 0);
265 VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
266 VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
267 domnode->f_ksid->kd_name) == 0);
269 VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
270 fuids, numnodes) == 0);
271 for (i = 0; i != numnodes; i++)
272 nvlist_free(fuids[i]);
273 kmem_free(fuids, numnodes * sizeof (void *));
274 VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
275 packed = kmem_alloc(nvsize, KM_SLEEP);
276 VERIFY(nvlist_pack(nvp, &packed, &nvsize,
277 NV_ENCODE_XDR, KM_SLEEP) == 0);
279 zfsvfs->z_fuid_size = nvsize;
280 dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
281 zfsvfs->z_fuid_size, packed, tx);
282 kmem_free(packed, zfsvfs->z_fuid_size);
283 VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
285 dmu_buf_will_dirty(db, tx);
286 *(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
287 dmu_buf_rele(db, FTAG);
289 zfsvfs->z_fuid_dirty = B_FALSE;
290 rw_exit(&zfsvfs->z_fuid_lock);
294 * Query domain table for a given domain.
296 * If domain isn't found and addok is set, it is added to AVL trees and
297 * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
298 * necessary for the caller or another thread to detect the dirty table
299 * and sync out the changes.
302 zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
303 char **retdomain, boolean_t addok)
305 fuid_domain_t searchnode, *findnode;
307 krw_t rw = RW_READER;
310 * If the dummy "nobody" domain then return an index of 0
311 * to cause the created FUID to be a standard POSIX id
312 * for the user nobody.
314 if (domain[0] == '\0') {
316 *retdomain = nulldomain;
320 searchnode.f_ksid = ksid_lookupdomain(domain);
322 *retdomain = searchnode.f_ksid->kd_name;
323 if (!zfsvfs->z_fuid_loaded)
324 zfs_fuid_init(zfsvfs);
327 rw_enter(&zfsvfs->z_fuid_lock, rw);
328 findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
331 rw_exit(&zfsvfs->z_fuid_lock);
332 ksiddomain_rele(searchnode.f_ksid);
333 return (findnode->f_idx);
335 fuid_domain_t *domnode;
338 if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
339 rw_exit(&zfsvfs->z_fuid_lock);
344 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
345 domnode->f_ksid = searchnode.f_ksid;
347 retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
349 avl_add(&zfsvfs->z_fuid_domain, domnode);
350 avl_add(&zfsvfs->z_fuid_idx, domnode);
351 zfsvfs->z_fuid_dirty = B_TRUE;
352 rw_exit(&zfsvfs->z_fuid_lock);
355 rw_exit(&zfsvfs->z_fuid_lock);
361 * Query domain table by index, returning domain string
363 * Returns a pointer from an avl node of the domain string.
367 zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
371 if (idx == 0 || !zfsvfs->z_use_fuids)
374 if (!zfsvfs->z_fuid_loaded)
375 zfs_fuid_init(zfsvfs);
377 rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
379 if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
380 domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
383 rw_exit(&zfsvfs->z_fuid_lock);
390 zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
392 *uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
393 *gidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_gid, cr, ZFS_GROUP);
397 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
398 cred_t *cr, zfs_fuid_type_t type)
400 uint32_t index = FUID_INDEX(fuid);
407 domain = zfs_fuid_find_by_idx(zfsvfs, index);
408 ASSERT(domain != NULL);
410 if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
411 (void) kidmap_getuidbysid(crgetzone(cr), domain,
412 FUID_RID(fuid), &id);
414 (void) kidmap_getgidbysid(crgetzone(cr), domain,
415 FUID_RID(fuid), &id);
421 * Add a FUID node to the list of fuid's being created for this
424 * If ACL has multiple domains, then keep only one copy of each unique
428 zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
429 uint64_t idx, uint64_t id, zfs_fuid_type_t type)
432 zfs_fuid_domain_t *fuid_domain;
433 zfs_fuid_info_t *fuidp;
435 boolean_t found = B_FALSE;
438 *fuidpp = zfs_fuid_info_alloc();
442 * First find fuid domain index in linked list
444 * If one isn't found then create an entry.
447 for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
448 fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
449 fuid_domain), fuididx++) {
450 if (idx == fuid_domain->z_domidx) {
457 fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
458 fuid_domain->z_domain = domain;
459 fuid_domain->z_domidx = idx;
460 list_insert_tail(&fuidp->z_domains, fuid_domain);
461 fuidp->z_domain_str_sz += strlen(domain) + 1;
462 fuidp->z_domain_cnt++;
465 if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
468 * Now allocate fuid entry and add it on the end of the list
471 fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
473 fuid->z_domidx = idx;
474 fuid->z_logfuid = FUID_ENCODE(fuididx, rid);
476 list_insert_tail(&fuidp->z_fuids, fuid);
479 if (type == ZFS_OWNER)
480 fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
482 fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
487 * Create a file system FUID, based on information in the users cred
489 * If cred contains KSID_OWNER then it should be used to determine
490 * the uid otherwise cred's uid will be used. By default cred's gid
491 * is used unless it's an ephemeral ID in which case KSID_GROUP will
492 * be used if it exists.
495 zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
496 cred_t *cr, zfs_fuid_info_t **fuidp)
505 VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
507 ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
509 if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
510 id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
512 if (IS_EPHEMERAL(id))
513 return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY);
515 return ((uint64_t)id);
519 * ksid is present and FUID is supported
521 id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr);
523 if (!IS_EPHEMERAL(id))
524 return ((uint64_t)id);
526 if (type == ZFS_GROUP)
527 id = ksid_getid(ksid);
529 rid = ksid_getrid(ksid);
530 domain = ksid_getdomain(ksid);
532 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
534 zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
536 return (FUID_ENCODE(idx, rid));
540 * Create a file system FUID for an ACL ace
541 * or a chown/chgrp of the file.
542 * This is similar to zfs_fuid_create_cred, except that
543 * we can't find the domain + rid information in the
544 * cred. Instead we have to query Winchester for the
547 * During replay operations the domain+rid information is
548 * found in the zfs_fuid_info_t that the replay code has
549 * attached to the zfsvfs of the file system.
552 zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
553 zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
557 uint32_t fuid_idx = FUID_INDEX(id);
561 zfs_fuid_t *zfuid = NULL;
562 zfs_fuid_info_t *fuidp;
565 * If POSIX ID, or entry is already a FUID then
568 * We may also be handed an already FUID'ized id via
572 if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
575 if (zfsvfs->z_replay) {
576 fuidp = zfsvfs->z_fuid_replay;
579 * If we are passed an ephemeral id, but no
580 * fuid_info was logged then return NOBODY.
581 * This is most likely a result of idmap service
582 * not being available.
590 zfuid = list_head(&fuidp->z_fuids);
591 rid = FUID_RID(zfuid->z_logfuid);
592 idx = FUID_INDEX(zfuid->z_logfuid);
595 rid = FUID_RID(fuidp->z_fuid_owner);
596 idx = FUID_INDEX(fuidp->z_fuid_owner);
599 rid = FUID_RID(fuidp->z_fuid_group);
600 idx = FUID_INDEX(fuidp->z_fuid_group);
603 domain = fuidp->z_domain_table[idx -1];
605 if (type == ZFS_OWNER || type == ZFS_ACE_USER)
606 status = kidmap_getsidbyuid(crgetzone(cr), id,
609 status = kidmap_getsidbygid(crgetzone(cr), id,
614 * When returning nobody we will need to
615 * make a dummy fuid table entry for logging
623 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
625 if (!zfsvfs->z_replay)
626 zfs_fuid_node_add(fuidpp, kdomain,
628 else if (zfuid != NULL) {
629 list_remove(&fuidp->z_fuids, zfuid);
630 kmem_free(zfuid, sizeof (zfs_fuid_t));
632 return (FUID_ENCODE(idx, rid));
636 zfs_fuid_destroy(zfsvfs_t *zfsvfs)
638 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
639 if (!zfsvfs->z_fuid_loaded) {
640 rw_exit(&zfsvfs->z_fuid_lock);
643 zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
644 rw_exit(&zfsvfs->z_fuid_lock);
648 * Allocate zfs_fuid_info for tracking FUIDs created during
649 * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR()
652 zfs_fuid_info_alloc(void)
654 zfs_fuid_info_t *fuidp;
656 fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP);
657 list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t),
658 offsetof(zfs_fuid_domain_t, z_next));
659 list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t),
660 offsetof(zfs_fuid_t, z_next));
665 * Release all memory associated with zfs_fuid_info_t
668 zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
671 zfs_fuid_domain_t *zdomain;
673 while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) {
674 list_remove(&fuidp->z_fuids, zfuid);
675 kmem_free(zfuid, sizeof (zfs_fuid_t));
678 if (fuidp->z_domain_table != NULL)
679 kmem_free(fuidp->z_domain_table,
680 (sizeof (char **)) * fuidp->z_domain_cnt);
682 while ((zdomain = list_head(&fuidp->z_domains)) != NULL) {
683 list_remove(&fuidp->z_domains, zdomain);
684 kmem_free(zdomain, sizeof (zfs_fuid_domain_t));
687 kmem_free(fuidp, sizeof (zfs_fuid_info_t));
691 * Check to see if id is a groupmember. If cred
692 * has ksid info then sidlist is checked first
693 * and if still not found then POSIX groups are checked
695 * Will use a straight FUID compare when possible.
698 zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
700 ksid_t *ksid = crgetsid(cr, KSID_GROUP);
701 ksidlist_t *ksidlist = crgetsidlist(cr);
704 if (ksid && ksidlist) {
707 uint32_t idx = FUID_INDEX(id);
708 uint32_t rid = FUID_RID(id);
710 ksid_groups = ksidlist->ksl_sids;
712 for (i = 0; i != ksidlist->ksl_nsid; i++) {
714 if (id != IDMAP_WK_CREATOR_GROUP_GID &&
715 id == ksid_groups[i].ks_id) {
721 domain = zfs_fuid_find_by_idx(zfsvfs, idx);
722 ASSERT(domain != NULL);
725 IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
729 ksid_groups[i].ks_domain->kd_name) == 0) &&
730 rid == ksid_groups[i].ks_rid)
737 * Not found in ksidlist, check posix groups
739 gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
740 return (groupmember(gid, cr));
744 zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
746 if (zfsvfs->z_fuid_obj == 0) {
747 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
748 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
749 FUID_SIZE_ESTIMATE(zfsvfs));
750 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
752 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
753 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
754 FUID_SIZE_ESTIMATE(zfsvfs));
757 #endif /* HAVE_ZPL */