4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
29 #include <sys/refcount.h>
30 #include <sys/nvpair.h>
32 #include <sys/kidmap.h>
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zfs_znode.h>
37 #include <sys/zfs_fuid.h>
40 * FUID Domain table(s).
42 * The FUID table is stored as a packed nvlist of an array
43 * of nvlists which contain an index, domain string and offset
45 * During file system initialization the nvlist(s) are read and
46 * two AVL trees are created. One tree is keyed by the index number
47 * and the other by the domain string. Nodes are never removed from
48 * trees, but new entries may be added. If a new entry is added then
49 * the zfsvfs->z_fuid_dirty flag is set to true and the caller will then
50 * be responsible for calling zfs_fuid_sync() to sync the changes to disk.
54 #define FUID_IDX "fuid_idx"
55 #define FUID_DOMAIN "fuid_domain"
56 #define FUID_OFFSET "fuid_offset"
57 #define FUID_NVP_ARRAY "fuid_nvlist"
59 typedef struct fuid_domain {
66 static char *nulldomain = "";
69 * Compare two indexes.
72 idx_compare(const void *arg1, const void *arg2)
74 const fuid_domain_t *node1 = arg1;
75 const fuid_domain_t *node2 = arg2;
77 if (node1->f_idx < node2->f_idx)
79 else if (node1->f_idx > node2->f_idx)
85 * Compare two domain strings.
88 domain_compare(const void *arg1, const void *arg2)
90 const fuid_domain_t *node1 = arg1;
91 const fuid_domain_t *node2 = arg2;
94 val = strcmp(node1->f_ksid->kd_name, node2->f_ksid->kd_name);
97 return (val > 0 ? 1 : -1);
101 zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
103 avl_create(idx_tree, idx_compare,
104 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
105 avl_create(domain_tree, domain_compare,
106 sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
110 * load initial fuid domain and idx trees. This function is used by
111 * both the kernel and zdb.
114 zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
115 avl_tree_t *domain_tree)
120 ASSERT(fuid_obj != 0);
121 VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
123 fuid_size = *(uint64_t *)db->db_data;
124 dmu_buf_rele(db, FTAG);
128 nvlist_t *nvp = NULL;
133 packed = kmem_alloc(fuid_size, KM_SLEEP);
134 VERIFY(dmu_read(os, fuid_obj, 0,
135 fuid_size, packed, DMU_READ_PREFETCH) == 0);
136 VERIFY(nvlist_unpack(packed, fuid_size,
138 VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
139 &fuidnvp, &count) == 0);
141 for (i = 0; i != count; i++) {
142 fuid_domain_t *domnode;
146 VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
148 VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
151 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
153 domnode->f_idx = idx;
154 domnode->f_ksid = ksid_lookupdomain(domain);
155 avl_add(idx_tree, domnode);
156 avl_add(domain_tree, domnode);
159 kmem_free(packed, fuid_size);
165 zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
167 fuid_domain_t *domnode;
171 while (domnode = avl_destroy_nodes(domain_tree, &cookie))
172 ksiddomain_rele(domnode->f_ksid);
174 avl_destroy(domain_tree);
176 while (domnode = avl_destroy_nodes(idx_tree, &cookie))
177 kmem_free(domnode, sizeof (fuid_domain_t));
178 avl_destroy(idx_tree);
182 zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
184 fuid_domain_t searchnode, *findnode;
187 searchnode.f_idx = idx;
189 findnode = avl_find(idx_tree, &searchnode, &loc);
191 return (findnode ? findnode->f_ksid->kd_name : nulldomain);
196 * Load the fuid table(s) into memory.
199 zfs_fuid_init(zfsvfs_t *zfsvfs)
201 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
203 if (zfsvfs->z_fuid_loaded) {
204 rw_exit(&zfsvfs->z_fuid_lock);
208 zfs_fuid_avl_tree_create(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
210 (void) zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
211 ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
212 if (zfsvfs->z_fuid_obj != 0) {
213 zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
214 zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
215 &zfsvfs->z_fuid_domain);
218 zfsvfs->z_fuid_loaded = B_TRUE;
219 rw_exit(&zfsvfs->z_fuid_lock);
223 * sync out AVL trees to persistent storage.
226 zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
233 fuid_domain_t *domnode;
237 if (!zfsvfs->z_fuid_dirty) {
241 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
244 * First see if table needs to be created?
246 if (zfsvfs->z_fuid_obj == 0) {
247 zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
248 DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
249 sizeof (uint64_t), tx);
250 VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
251 ZFS_FUID_TABLES, sizeof (uint64_t), 1,
252 &zfsvfs->z_fuid_obj, tx) == 0);
255 VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
257 numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
258 fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
259 for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
260 domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
261 VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
262 VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
263 domnode->f_idx) == 0);
264 VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
265 VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
266 domnode->f_ksid->kd_name) == 0);
268 VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
269 fuids, numnodes) == 0);
270 for (i = 0; i != numnodes; i++)
271 nvlist_free(fuids[i]);
272 kmem_free(fuids, numnodes * sizeof (void *));
273 VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
274 packed = kmem_alloc(nvsize, KM_SLEEP);
275 VERIFY(nvlist_pack(nvp, &packed, &nvsize,
276 NV_ENCODE_XDR, KM_SLEEP) == 0);
278 zfsvfs->z_fuid_size = nvsize;
279 dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
280 zfsvfs->z_fuid_size, packed, tx);
281 kmem_free(packed, zfsvfs->z_fuid_size);
282 VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
284 dmu_buf_will_dirty(db, tx);
285 *(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
286 dmu_buf_rele(db, FTAG);
288 zfsvfs->z_fuid_dirty = B_FALSE;
289 rw_exit(&zfsvfs->z_fuid_lock);
293 * Query domain table for a given domain.
295 * If domain isn't found and addok is set, it is added to AVL trees and
296 * the zfsvfs->z_fuid_dirty flag will be set to TRUE. It will then be
297 * necessary for the caller or another thread to detect the dirty table
298 * and sync out the changes.
301 zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
302 char **retdomain, boolean_t addok)
304 fuid_domain_t searchnode, *findnode;
306 krw_t rw = RW_READER;
309 * If the dummy "nobody" domain then return an index of 0
310 * to cause the created FUID to be a standard POSIX id
311 * for the user nobody.
313 if (domain[0] == '\0') {
315 *retdomain = nulldomain;
319 searchnode.f_ksid = ksid_lookupdomain(domain);
321 *retdomain = searchnode.f_ksid->kd_name;
322 if (!zfsvfs->z_fuid_loaded)
323 zfs_fuid_init(zfsvfs);
326 rw_enter(&zfsvfs->z_fuid_lock, rw);
327 findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
330 rw_exit(&zfsvfs->z_fuid_lock);
331 ksiddomain_rele(searchnode.f_ksid);
332 return (findnode->f_idx);
334 fuid_domain_t *domnode;
337 if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
338 rw_exit(&zfsvfs->z_fuid_lock);
343 domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
344 domnode->f_ksid = searchnode.f_ksid;
346 retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
348 avl_add(&zfsvfs->z_fuid_domain, domnode);
349 avl_add(&zfsvfs->z_fuid_idx, domnode);
350 zfsvfs->z_fuid_dirty = B_TRUE;
351 rw_exit(&zfsvfs->z_fuid_lock);
354 rw_exit(&zfsvfs->z_fuid_lock);
360 * Query domain table by index, returning domain string
362 * Returns a pointer from an avl node of the domain string.
366 zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
370 if (idx == 0 || !zfsvfs->z_use_fuids)
373 if (!zfsvfs->z_fuid_loaded)
374 zfs_fuid_init(zfsvfs);
376 rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
378 if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
379 domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
382 rw_exit(&zfsvfs->z_fuid_lock);
389 zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
391 *uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
392 *gidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_gid, cr, ZFS_GROUP);
396 zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
397 cred_t *cr, zfs_fuid_type_t type)
399 uint32_t index = FUID_INDEX(fuid);
406 domain = zfs_fuid_find_by_idx(zfsvfs, index);
407 ASSERT(domain != NULL);
410 if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
411 (void) kidmap_getuidbysid(crgetzone(cr), domain,
412 FUID_RID(fuid), &id);
414 (void) kidmap_getgidbysid(crgetzone(cr), domain,
415 FUID_RID(fuid), &id);
424 * Add a FUID node to the list of fuid's being created for this
427 * If ACL has multiple domains, then keep only one copy of each unique
431 zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
432 uint64_t idx, uint64_t id, zfs_fuid_type_t type)
435 zfs_fuid_domain_t *fuid_domain;
436 zfs_fuid_info_t *fuidp;
438 boolean_t found = B_FALSE;
441 *fuidpp = zfs_fuid_info_alloc();
445 * First find fuid domain index in linked list
447 * If one isn't found then create an entry.
450 for (fuididx = 1, fuid_domain = list_head(&fuidp->z_domains);
451 fuid_domain; fuid_domain = list_next(&fuidp->z_domains,
452 fuid_domain), fuididx++) {
453 if (idx == fuid_domain->z_domidx) {
460 fuid_domain = kmem_alloc(sizeof (zfs_fuid_domain_t), KM_SLEEP);
461 fuid_domain->z_domain = domain;
462 fuid_domain->z_domidx = idx;
463 list_insert_tail(&fuidp->z_domains, fuid_domain);
464 fuidp->z_domain_str_sz += strlen(domain) + 1;
465 fuidp->z_domain_cnt++;
468 if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
471 * Now allocate fuid entry and add it on the end of the list
474 fuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
476 fuid->z_domidx = idx;
477 fuid->z_logfuid = FUID_ENCODE(fuididx, rid);
479 list_insert_tail(&fuidp->z_fuids, fuid);
482 if (type == ZFS_OWNER)
483 fuidp->z_fuid_owner = FUID_ENCODE(fuididx, rid);
485 fuidp->z_fuid_group = FUID_ENCODE(fuididx, rid);
490 * Create a file system FUID, based on information in the users cred
492 * If cred contains KSID_OWNER then it should be used to determine
493 * the uid otherwise cred's uid will be used. By default cred's gid
494 * is used unless it's an ephemeral ID in which case KSID_GROUP will
495 * be used if it exists.
498 zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
499 cred_t *cr, zfs_fuid_info_t **fuidp)
508 VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
510 ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
512 if (!zfsvfs->z_use_fuids || (ksid == NULL)) {
513 id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
515 if (IS_EPHEMERAL(id))
516 return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY);
518 return ((uint64_t)id);
522 * ksid is present and FUID is supported
524 id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr);
526 if (!IS_EPHEMERAL(id))
527 return ((uint64_t)id);
529 if (type == ZFS_GROUP)
530 id = ksid_getid(ksid);
532 rid = ksid_getrid(ksid);
533 domain = ksid_getdomain(ksid);
535 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
537 zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
539 return (FUID_ENCODE(idx, rid));
543 * Create a file system FUID for an ACL ace
544 * or a chown/chgrp of the file.
545 * This is similar to zfs_fuid_create_cred, except that
546 * we can't find the domain + rid information in the
547 * cred. Instead we have to query Winchester for the
550 * During replay operations the domain+rid information is
551 * found in the zfs_fuid_info_t that the replay code has
552 * attached to the zfsvfs of the file system.
555 zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
556 zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
560 uint32_t fuid_idx = FUID_INDEX(id);
564 zfs_fuid_t *zfuid = NULL;
565 zfs_fuid_info_t *fuidp = NULL;
568 * If POSIX ID, or entry is already a FUID then
571 * We may also be handed an already FUID'ized id via
575 if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
578 if (zfsvfs->z_replay) {
579 fuidp = zfsvfs->z_fuid_replay;
582 * If we are passed an ephemeral id, but no
583 * fuid_info was logged then return NOBODY.
584 * This is most likely a result of idmap service
585 * not being available.
590 VERIFY3U(type, >=, ZFS_OWNER);
591 VERIFY3U(type, <=, ZFS_ACE_GROUP);
596 zfuid = list_head(&fuidp->z_fuids);
597 rid = FUID_RID(zfuid->z_logfuid);
598 idx = FUID_INDEX(zfuid->z_logfuid);
601 rid = FUID_RID(fuidp->z_fuid_owner);
602 idx = FUID_INDEX(fuidp->z_fuid_owner);
605 rid = FUID_RID(fuidp->z_fuid_group);
606 idx = FUID_INDEX(fuidp->z_fuid_group);
609 domain = fuidp->z_domain_table[idx - 1];
611 if (type == ZFS_OWNER || type == ZFS_ACE_USER)
612 status = kidmap_getsidbyuid(crgetzone(cr), id,
615 status = kidmap_getsidbygid(crgetzone(cr), id,
620 * When returning nobody we will need to
621 * make a dummy fuid table entry for logging
629 idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, B_TRUE);
631 if (!zfsvfs->z_replay)
632 zfs_fuid_node_add(fuidpp, kdomain,
634 else if (zfuid != NULL) {
635 list_remove(&fuidp->z_fuids, zfuid);
636 kmem_free(zfuid, sizeof (zfs_fuid_t));
638 return (FUID_ENCODE(idx, rid));
642 zfs_fuid_destroy(zfsvfs_t *zfsvfs)
644 rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
645 if (!zfsvfs->z_fuid_loaded) {
646 rw_exit(&zfsvfs->z_fuid_lock);
649 zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
650 rw_exit(&zfsvfs->z_fuid_lock);
654 * Allocate zfs_fuid_info for tracking FUIDs created during
655 * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR()
658 zfs_fuid_info_alloc(void)
660 zfs_fuid_info_t *fuidp;
662 fuidp = kmem_zalloc(sizeof (zfs_fuid_info_t), KM_SLEEP);
663 list_create(&fuidp->z_domains, sizeof (zfs_fuid_domain_t),
664 offsetof(zfs_fuid_domain_t, z_next));
665 list_create(&fuidp->z_fuids, sizeof (zfs_fuid_t),
666 offsetof(zfs_fuid_t, z_next));
671 * Release all memory associated with zfs_fuid_info_t
674 zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
677 zfs_fuid_domain_t *zdomain;
679 while ((zfuid = list_head(&fuidp->z_fuids)) != NULL) {
680 list_remove(&fuidp->z_fuids, zfuid);
681 kmem_free(zfuid, sizeof (zfs_fuid_t));
684 if (fuidp->z_domain_table != NULL)
685 kmem_free(fuidp->z_domain_table,
686 (sizeof (char **)) * fuidp->z_domain_cnt);
688 while ((zdomain = list_head(&fuidp->z_domains)) != NULL) {
689 list_remove(&fuidp->z_domains, zdomain);
690 kmem_free(zdomain, sizeof (zfs_fuid_domain_t));
693 kmem_free(fuidp, sizeof (zfs_fuid_info_t));
697 * Check to see if id is a groupmember. If cred
698 * has ksid info then sidlist is checked first
699 * and if still not found then POSIX groups are checked
701 * Will use a straight FUID compare when possible.
704 zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
707 ksid_t *ksid = crgetsid(cr, KSID_GROUP);
708 ksidlist_t *ksidlist = crgetsidlist(cr);
713 if (ksid && ksidlist) {
716 uint32_t idx = FUID_INDEX(id);
717 uint32_t rid = FUID_RID(id);
719 ksid_groups = ksidlist->ksl_sids;
721 for (i = 0; i != ksidlist->ksl_nsid; i++) {
723 if (id != IDMAP_WK_CREATOR_GROUP_GID &&
724 id == ksid_groups[i].ks_id) {
730 domain = zfs_fuid_find_by_idx(zfsvfs, idx);
731 ASSERT(domain != NULL);
734 IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
738 ksid_groups[i].ks_domain->kd_name) == 0) &&
739 rid == ksid_groups[i].ks_rid)
747 * Not found in ksidlist, check posix groups
749 gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
750 return (groupmember(gid, cr));
754 zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
756 if (zfsvfs->z_fuid_obj == 0) {
757 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
758 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
759 FUID_SIZE_ESTIMATE(zfsvfs));
760 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
762 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj);
763 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0,
764 FUID_SIZE_ESTIMATE(zfsvfs));