4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2011 iXsystems, Inc
25 * Copyright (c) 2013 by Delphix. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/dmu_objset.h>
38 #include <sys/dnode.h>
41 #include <sys/sunddi.h>
42 #include <sys/sa_impl.h>
43 #include <sys/dnode.h>
44 #include <sys/errno.h>
45 #include <sys/zfs_context.h>
48 * ZFS System attributes:
50 * A generic mechanism to allow for arbitrary attributes
51 * to be stored in a dnode. The data will be stored in the bonus buffer of
52 * the dnode and if necessary a special "spill" block will be used to handle
53 * overflow situations. The spill block will be sized to fit the data
54 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
55 * spill block is stored at the end of the current bonus buffer. Any
56 * attributes that would be in the way of the blkptr_t will be relocated
57 * into the spill block.
59 * Attribute registration:
61 * Stored persistently on a per dataset basis
62 * a mapping between attribute "string" names and their actual attribute
63 * numeric values, length, and byteswap function. The names are only used
64 * during registration. All attributes are known by their unique attribute
65 * id value. If an attribute can have a variable size then the value
66 * 0 will be used to indicate this.
70 * Attribute layouts are a way to compactly store multiple attributes, but
71 * without taking the overhead associated with managing each attribute
72 * individually. Since you will typically have the same set of attributes
73 * stored in the same order a single table will be used to represent that
74 * layout. The ZPL for example will usually have only about 10 different
75 * layouts (regular files, device files, symlinks,
76 * regular files + scanstamp, files/dir with extended attributes, and then
77 * you have the possibility of all of those minus ACL, because it would
78 * be kicked out into the spill block)
80 * Layouts are simply an array of the attributes and their
81 * ordering i.e. [0, 1, 4, 5, 2]
83 * Each distinct layout is given a unique layout number and that is whats
84 * stored in the header at the beginning of the SA data buffer.
86 * A layout only covers a single dbuf (bonus or spill). If a set of
87 * attributes is split up between the bonus buffer and a spill buffer then
88 * two different layouts will be used. This allows us to byteswap the
89 * spill without looking at the bonus buffer and keeps the on disk format of
90 * the bonus and spill buffer the same.
92 * Adding a single attribute will cause the entire set of attributes to
93 * be rewritten and could result in a new layout number being constructed
94 * as part of the rewrite if no such layout exists for the new set of
95 * attribues. The new attribute will be appended to the end of the already
96 * existing attributes.
98 * Both the attribute registration and attribute layout information are
99 * stored in normal ZAP attributes. Their should be a small number of
100 * known layouts and the set of attributes is assumed to typically be quite
103 * The registered attributes and layout "table" information is maintained
104 * in core and a special "sa_os_t" is attached to the objset_t.
106 * A special interface is provided to allow for quickly applying
107 * a large set of attributes at once. sa_replace_all_by_template() is
108 * used to set an array of attributes. This is used by the ZPL when
109 * creating a brand new file. The template that is passed into the function
110 * specifies the attribute, size for variable length attributes, location of
111 * data and special "data locator" function if the data isn't in a contiguous
114 * Byteswap implications:
116 * Since the SA attributes are not entirely self describing we can't do
117 * the normal byteswap processing. The special ZAP layout attribute and
118 * attribute registration attributes define the byteswap function and the
119 * size of the attributes, unless it is variable sized.
120 * The normal ZFS byteswapping infrastructure assumes you don't need
121 * to read any objects in order to do the necessary byteswapping. Whereas
122 * SA attributes can only be properly byteswapped if the dataset is opened
123 * and the layout/attribute ZAP attributes are available. Because of this
124 * the SA attributes will be byteswapped when they are first accessed by
125 * the SA code that will read the SA data.
128 typedef void (sa_iterfunc_t)(void *hdr, void *addr, sa_attr_type_t,
129 uint16_t length, int length_idx, boolean_t, void *userp);
131 static int sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype);
132 static void sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab);
133 static void *sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype,
135 static void sa_idx_tab_rele(objset_t *os, void *arg);
136 static void sa_copy_data(sa_data_locator_t *func, void *start, void *target,
138 static int sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
139 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
140 uint16_t buflen, dmu_tx_t *tx);
142 arc_byteswap_func_t *sa_bswap_table[] = {
143 byteswap_uint64_array,
144 byteswap_uint32_array,
145 byteswap_uint16_array,
146 byteswap_uint8_array,
150 #define SA_COPY_DATA(f, s, t, l) \
154 *(uint64_t *)t = *(uint64_t *)s; \
155 } else if (l == 16) { \
156 *(uint64_t *)t = *(uint64_t *)s; \
157 *(uint64_t *)((uintptr_t)t + 8) = \
158 *(uint64_t *)((uintptr_t)s + 8); \
163 sa_copy_data(f, s, t, l); \
167 * This table is fixed and cannot be changed. Its purpose is to
168 * allow the SA code to work with both old/new ZPL file systems.
169 * It contains the list of legacy attributes. These attributes aren't
170 * stored in the "attribute" registry zap objects, since older ZPL file systems
171 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
172 * use this static table.
174 sa_attr_reg_t sa_legacy_attrs[] = {
175 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0},
176 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1},
177 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2},
178 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 3},
179 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY, 4},
180 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY, 5},
181 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY, 6},
182 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY, 7},
183 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY, 8},
184 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY, 9},
185 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY, 10},
186 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY, 11},
187 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY, 12},
188 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY, 13},
189 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY, 14},
190 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15},
194 * This is only used for objects of type DMU_OT_ZNODE
196 sa_attr_type_t sa_legacy_zpl_layout[] = {
197 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
201 * Special dummy layout used for buffers with no attributes.
203 sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };
205 static int sa_legacy_attr_count = 16;
206 static kmem_cache_t *sa_cache = NULL;
210 sa_cache_constructor(void *buf, void *unused, int kmflag)
212 sa_handle_t *hdl = buf;
214 mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL);
220 sa_cache_destructor(void *buf, void *unused)
222 sa_handle_t *hdl = buf;
223 mutex_destroy(&hdl->sa_lock);
229 sa_cache = kmem_cache_create("sa_cache",
230 sizeof (sa_handle_t), 0, sa_cache_constructor,
231 sa_cache_destructor, NULL, NULL, NULL, 0);
238 kmem_cache_destroy(sa_cache);
242 layout_num_compare(const void *arg1, const void *arg2)
244 const sa_lot_t *node1 = arg1;
245 const sa_lot_t *node2 = arg2;
247 if (node1->lot_num > node2->lot_num)
249 else if (node1->lot_num < node2->lot_num)
255 layout_hash_compare(const void *arg1, const void *arg2)
257 const sa_lot_t *node1 = arg1;
258 const sa_lot_t *node2 = arg2;
260 if (node1->lot_hash > node2->lot_hash)
262 if (node1->lot_hash < node2->lot_hash)
264 if (node1->lot_instance > node2->lot_instance)
266 if (node1->lot_instance < node2->lot_instance)
272 sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count)
276 if (count != tbf->lot_attr_count)
279 for (i = 0; i != count; i++) {
280 if (attrs[i] != tbf->lot_attrs[i])
286 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
289 sa_layout_info_hash(sa_attr_type_t *attrs, int attr_count)
292 uint64_t crc = -1ULL;
294 for (i = 0; i != attr_count; i++)
295 crc ^= SA_ATTR_HASH(attrs[i]);
301 sa_get_spill(sa_handle_t *hdl)
304 if (hdl->sa_spill == NULL) {
305 if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
306 &hdl->sa_spill)) == 0)
307 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
316 * Main attribute lookup/update function
317 * returns 0 for success or non zero for failures
319 * Operates on bulk array, first failure will abort further processing
322 sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
323 sa_data_op_t data_op, dmu_tx_t *tx)
325 sa_os_t *sa = hdl->sa_os->os_sa;
328 sa_buf_type_t buftypes;
333 for (i = 0; i != count; i++) {
334 ASSERT(bulk[i].sa_attr <= hdl->sa_os->os_sa->sa_num_attrs);
336 bulk[i].sa_addr = NULL;
337 /* First check the bonus buffer */
339 if (hdl->sa_bonus_tab && TOC_ATTR_PRESENT(
340 hdl->sa_bonus_tab->sa_idx_tab[bulk[i].sa_attr])) {
341 SA_ATTR_INFO(sa, hdl->sa_bonus_tab,
342 SA_GET_HDR(hdl, SA_BONUS),
343 bulk[i].sa_attr, bulk[i], SA_BONUS, hdl);
344 if (tx && !(buftypes & SA_BONUS)) {
345 dmu_buf_will_dirty(hdl->sa_bonus, tx);
346 buftypes |= SA_BONUS;
349 if (bulk[i].sa_addr == NULL &&
350 ((error = sa_get_spill(hdl)) == 0)) {
351 if (TOC_ATTR_PRESENT(
352 hdl->sa_spill_tab->sa_idx_tab[bulk[i].sa_attr])) {
353 SA_ATTR_INFO(sa, hdl->sa_spill_tab,
354 SA_GET_HDR(hdl, SA_SPILL),
355 bulk[i].sa_attr, bulk[i], SA_SPILL, hdl);
356 if (tx && !(buftypes & SA_SPILL) &&
357 bulk[i].sa_size == bulk[i].sa_length) {
358 dmu_buf_will_dirty(hdl->sa_spill, tx);
359 buftypes |= SA_SPILL;
363 if (error && error != ENOENT) {
364 return ((error == ECKSUM) ? EIO : error);
369 if (bulk[i].sa_addr == NULL)
370 return (SET_ERROR(ENOENT));
371 if (bulk[i].sa_data) {
372 SA_COPY_DATA(bulk[i].sa_data_func,
373 bulk[i].sa_addr, bulk[i].sa_data,
379 /* existing rewrite of attr */
380 if (bulk[i].sa_addr &&
381 bulk[i].sa_size == bulk[i].sa_length) {
382 SA_COPY_DATA(bulk[i].sa_data_func,
383 bulk[i].sa_data, bulk[i].sa_addr,
386 } else if (bulk[i].sa_addr) { /* attr size change */
387 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
388 SA_REPLACE, bulk[i].sa_data_func,
389 bulk[i].sa_data, bulk[i].sa_length, tx);
390 } else { /* adding new attribute */
391 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
392 SA_ADD, bulk[i].sa_data_func,
393 bulk[i].sa_data, bulk[i].sa_length, tx);
404 sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count,
405 uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
407 sa_os_t *sa = os->os_sa;
408 sa_lot_t *tb, *findtb;
412 ASSERT(MUTEX_HELD(&sa->sa_lock));
413 tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
414 tb->lot_attr_count = attr_count;
415 tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
417 bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
418 tb->lot_num = lot_num;
420 tb->lot_instance = 0;
425 if (sa->sa_layout_attr_obj == 0) {
426 sa->sa_layout_attr_obj = zap_create_link(os,
427 DMU_OT_SA_ATTR_LAYOUTS,
428 sa->sa_master_obj, SA_LAYOUTS, tx);
431 (void) snprintf(attr_name, sizeof (attr_name),
433 VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
434 attr_name, 2, attr_count, attrs, tx));
437 list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
438 offsetof(sa_idx_tab_t, sa_next));
440 for (i = 0; i != attr_count; i++) {
441 if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
445 avl_add(&sa->sa_layout_num_tree, tb);
447 /* verify we don't have a hash collision */
448 if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
449 for (; findtb && findtb->lot_hash == hash;
450 findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
451 if (findtb->lot_instance != tb->lot_instance)
456 avl_add(&sa->sa_layout_hash_tree, tb);
461 sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs,
462 int count, dmu_tx_t *tx, sa_lot_t **lot)
464 sa_lot_t *tb, tbsearch;
466 sa_os_t *sa = os->os_sa;
467 boolean_t found = B_FALSE;
469 mutex_enter(&sa->sa_lock);
470 tbsearch.lot_hash = hash;
471 tbsearch.lot_instance = 0;
472 tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc);
474 for (; tb && tb->lot_hash == hash;
475 tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) {
476 if (sa_layout_equal(tb, attrs, count) == 0) {
483 tb = sa_add_layout_entry(os, attrs, count,
484 avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx);
486 mutex_exit(&sa->sa_lock);
491 sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
497 blocksize = SPA_MINBLOCKSIZE;
498 } else if (size > SPA_OLD_MAXBLOCKSIZE) {
500 return (SET_ERROR(EFBIG));
502 blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
505 error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
511 sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
514 bcopy(datastart, target, buflen);
519 void *saptr = target;
524 while (bytes < buflen) {
525 func(&dataptr, &length, buflen, start, datastart);
526 bcopy(dataptr, saptr, length);
527 saptr = (void *)((caddr_t)saptr + length);
535 * Determine several different sizes
536 * first the sa header size
537 * the number of bytes to be stored
538 * if spill would occur the index in the attribute array is returned
540 * the boolean will_spill will be set when spilling is necessary. It
541 * is only set when the buftype is SA_BONUS
544 sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count,
545 dmu_buf_t *db, sa_buf_type_t buftype, int *index, int *total,
546 boolean_t *will_spill)
553 boolean_t done = B_FALSE;
555 if (buftype == SA_BONUS && sa->sa_force_spill) {
558 *will_spill = B_TRUE;
565 if (buftype == SA_BONUS)
566 *will_spill = B_FALSE;
568 hdrsize = (SA_BONUSTYPE_FROM_DB(db) == DMU_OT_ZNODE) ? 0 :
569 sizeof (sa_hdr_phys_t);
571 full_space = (buftype == SA_BONUS) ? DN_MAX_BONUSLEN : db->db_size;
572 ASSERT(IS_P2ALIGNED(full_space, 8));
574 for (i = 0; i != attr_count; i++) {
577 *total = P2ROUNDUP(*total, 8);
578 *total += attr_desc[i].sa_length;
582 is_var_sz = (SA_REGISTERED_LEN(sa, attr_desc[i].sa_attr) == 0);
587 if (is_var_sz && var_size > 1) {
588 if (P2ROUNDUP(hdrsize + sizeof (uint16_t), 8) +
589 *total < full_space) {
591 * Account for header space used by array of
592 * optional sizes of variable-length attributes.
593 * Record the index in case this increase needs
594 * to be reversed due to spill-over.
596 hdrsize += sizeof (uint16_t);
601 if (buftype == SA_BONUS)
602 *will_spill = B_TRUE;
608 * find index of where spill *could* occur.
609 * Then continue to count of remainder attribute
610 * space. The sum is used later for sizing bonus
613 if (buftype == SA_BONUS && *index == -1 &&
614 (*total + P2ROUNDUP(hdrsize, 8)) >
615 (full_space - sizeof (blkptr_t))) {
621 if ((*total + P2ROUNDUP(hdrsize, 8)) > full_space &&
623 *will_spill = B_TRUE;
627 * j holds the index of the last variable-sized attribute for
628 * which hdrsize was increased. Reverse the increase if that
629 * attribute will be relocated to the spill block.
631 if (*will_spill && j == *index)
632 hdrsize -= sizeof (uint16_t);
634 hdrsize = P2ROUNDUP(hdrsize, 8);
638 #define BUF_SPACE_NEEDED(total, header) (total + header)
641 * Find layout that corresponds to ordering of attributes
642 * If not found a new layout number is created and added to
643 * persistent layout tables.
646 sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
649 sa_os_t *sa = hdl->sa_os->os_sa;
651 sa_buf_type_t buftype;
652 sa_hdr_phys_t *sahdr;
655 sa_attr_type_t *attrs, *attrs_start;
658 int spillhdrsize = 0;
660 dmu_object_type_t bonustype;
666 dmu_buf_will_dirty(hdl->sa_bonus, tx);
667 bonustype = SA_BONUSTYPE_FROM_DB(hdl->sa_bonus);
669 /* first determine bonus header size and sum of all attributes */
670 hdrsize = sa_find_sizes(sa, attr_desc, attr_count, hdl->sa_bonus,
671 SA_BONUS, &i, &used, &spilling);
673 if (used > SPA_OLD_MAXBLOCKSIZE)
674 return (SET_ERROR(EFBIG));
676 VERIFY(0 == dmu_set_bonus(hdl->sa_bonus, spilling ?
677 MIN(DN_MAX_BONUSLEN - sizeof (blkptr_t), used + hdrsize) :
678 used + hdrsize, tx));
680 ASSERT((bonustype == DMU_OT_ZNODE && spilling == 0) ||
681 bonustype == DMU_OT_SA);
683 /* setup and size spill buffer when needed */
687 if (hdl->sa_spill == NULL) {
688 VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, NULL,
689 &hdl->sa_spill) == 0);
691 dmu_buf_will_dirty(hdl->sa_spill, tx);
693 spillhdrsize = sa_find_sizes(sa, &attr_desc[i],
694 attr_count - i, hdl->sa_spill, SA_SPILL, &i,
695 &spill_used, &dummy);
697 if (spill_used > SPA_OLD_MAXBLOCKSIZE)
698 return (SET_ERROR(EFBIG));
700 buf_space = hdl->sa_spill->db_size - spillhdrsize;
701 if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
702 hdl->sa_spill->db_size)
703 VERIFY(0 == sa_resize_spill(hdl,
704 BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
707 /* setup starting pointers to lay down data */
708 data_start = (void *)((uintptr_t)hdl->sa_bonus->db_data + hdrsize);
709 sahdr = (sa_hdr_phys_t *)hdl->sa_bonus->db_data;
713 buf_space = (sa->sa_force_spill) ?
714 0 : SA_BLKPTR_SPACE - hdrsize;
716 buf_space = hdl->sa_bonus->db_size - hdrsize;
718 attrs_start = attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
722 for (i = 0, len_idx = 0, hash = -1ULL; i != attr_count; i++) {
725 ASSERT(IS_P2ALIGNED(data_start, 8));
726 ASSERT(IS_P2ALIGNED(buf_space, 8));
727 attrs[i] = attr_desc[i].sa_attr;
728 length = SA_REGISTERED_LEN(sa, attrs[i]);
730 length = attr_desc[i].sa_length;
732 VERIFY(length == attr_desc[i].sa_length);
734 if (buf_space < length) { /* switch to spill buffer */
736 VERIFY(bonustype == DMU_OT_SA);
737 if (buftype == SA_BONUS && !sa->sa_force_spill) {
738 sa_find_layout(hdl->sa_os, hash, attrs_start,
739 lot_count, tx, &lot);
740 SA_SET_HDR(sahdr, lot->lot_num, hdrsize);
747 sahdr = (sa_hdr_phys_t *)hdl->sa_spill->db_data;
748 sahdr->sa_magic = SA_MAGIC;
749 data_start = (void *)((uintptr_t)sahdr +
751 attrs_start = &attrs[i];
752 buf_space = hdl->sa_spill->db_size - spillhdrsize;
755 hash ^= SA_ATTR_HASH(attrs[i]);
756 attr_desc[i].sa_addr = data_start;
757 attr_desc[i].sa_size = length;
758 SA_COPY_DATA(attr_desc[i].sa_data_func, attr_desc[i].sa_data,
760 if (sa->sa_attr_table[attrs[i]].sa_length == 0) {
761 sahdr->sa_lengths[len_idx++] = length;
763 VERIFY((uintptr_t)data_start % 8 == 0);
764 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
766 buf_space -= P2ROUNDUP(length, 8);
770 sa_find_layout(hdl->sa_os, hash, attrs_start, lot_count, tx, &lot);
773 * Verify that old znodes always have layout number 0.
774 * Must be DMU_OT_SA for arbitrary layouts
776 VERIFY((bonustype == DMU_OT_ZNODE && lot->lot_num == 0) ||
777 (bonustype == DMU_OT_SA && lot->lot_num > 1));
779 if (bonustype == DMU_OT_SA) {
780 SA_SET_HDR(sahdr, lot->lot_num,
781 buftype == SA_BONUS ? hdrsize : spillhdrsize);
784 kmem_free(attrs, sizeof (sa_attr_type_t) * attr_count);
785 if (hdl->sa_bonus_tab) {
786 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
787 hdl->sa_bonus_tab = NULL;
789 if (!sa->sa_force_spill)
790 VERIFY(0 == sa_build_index(hdl, SA_BONUS));
792 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
795 * remove spill block that is no longer needed.
797 dmu_buf_rele(hdl->sa_spill, NULL);
798 hdl->sa_spill = NULL;
799 hdl->sa_spill_tab = NULL;
800 VERIFY(0 == dmu_rm_spill(hdl->sa_os,
801 sa_handle_object(hdl), tx));
803 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
811 sa_free_attr_table(sa_os_t *sa)
815 if (sa->sa_attr_table == NULL)
818 for (i = 0; i != sa->sa_num_attrs; i++) {
819 if (sa->sa_attr_table[i].sa_name)
820 kmem_free(sa->sa_attr_table[i].sa_name,
821 strlen(sa->sa_attr_table[i].sa_name) + 1);
824 kmem_free(sa->sa_attr_table,
825 sizeof (sa_attr_table_t) * sa->sa_num_attrs);
827 sa->sa_attr_table = NULL;
831 sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
833 sa_os_t *sa = os->os_sa;
834 uint64_t sa_attr_count = 0;
835 uint64_t sa_reg_count = 0;
841 int registered_count = 0;
843 dmu_objset_type_t ostype = dmu_objset_type(os);
846 kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP);
847 sa->sa_user_table_sz = count * sizeof (sa_attr_type_t);
849 if (sa->sa_reg_attr_obj != 0) {
850 error = zap_count(os, sa->sa_reg_attr_obj,
854 * Make sure we retrieved a count and that it isn't zero
856 if (error || (error == 0 && sa_attr_count == 0)) {
858 error = SET_ERROR(EINVAL);
861 sa_reg_count = sa_attr_count;
864 if (ostype == DMU_OST_ZFS && sa_attr_count == 0)
865 sa_attr_count += sa_legacy_attr_count;
867 /* Allocate attribute numbers for attributes that aren't registered */
868 for (i = 0; i != count; i++) {
869 boolean_t found = B_FALSE;
872 if (ostype == DMU_OST_ZFS) {
873 for (j = 0; j != sa_legacy_attr_count; j++) {
874 if (strcmp(reg_attrs[i].sa_name,
875 sa_legacy_attrs[j].sa_name) == 0) {
876 sa->sa_user_table[i] =
877 sa_legacy_attrs[j].sa_attr;
885 if (sa->sa_reg_attr_obj)
886 error = zap_lookup(os, sa->sa_reg_attr_obj,
887 reg_attrs[i].sa_name, 8, 1, &attr_value);
889 error = SET_ERROR(ENOENT);
892 sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
896 sa->sa_user_table[i] = ATTR_NUM(attr_value);
903 sa->sa_num_attrs = sa_attr_count;
904 tb = sa->sa_attr_table =
905 kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP);
908 * Attribute table is constructed from requested attribute list,
909 * previously foreign registered attributes, and also the legacy
910 * ZPL set of attributes.
913 if (sa->sa_reg_attr_obj) {
914 for (zap_cursor_init(&zc, os, sa->sa_reg_attr_obj);
915 (error = zap_cursor_retrieve(&zc, &za)) == 0;
916 zap_cursor_advance(&zc)) {
918 value = za.za_first_integer;
921 tb[ATTR_NUM(value)].sa_attr = ATTR_NUM(value);
922 tb[ATTR_NUM(value)].sa_length = ATTR_LENGTH(value);
923 tb[ATTR_NUM(value)].sa_byteswap = ATTR_BSWAP(value);
924 tb[ATTR_NUM(value)].sa_registered = B_TRUE;
926 if (tb[ATTR_NUM(value)].sa_name) {
929 tb[ATTR_NUM(value)].sa_name =
930 kmem_zalloc(strlen(za.za_name) +1, KM_SLEEP);
931 (void) strlcpy(tb[ATTR_NUM(value)].sa_name, za.za_name,
932 strlen(za.za_name) +1);
934 zap_cursor_fini(&zc);
936 * Make sure we processed the correct number of registered
939 if (registered_count != sa_reg_count) {
946 if (ostype == DMU_OST_ZFS) {
947 for (i = 0; i != sa_legacy_attr_count; i++) {
950 tb[i].sa_attr = sa_legacy_attrs[i].sa_attr;
951 tb[i].sa_length = sa_legacy_attrs[i].sa_length;
952 tb[i].sa_byteswap = sa_legacy_attrs[i].sa_byteswap;
953 tb[i].sa_registered = B_FALSE;
955 kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1,
957 (void) strlcpy(tb[i].sa_name,
958 sa_legacy_attrs[i].sa_name,
959 strlen(sa_legacy_attrs[i].sa_name) + 1);
963 for (i = 0; i != count; i++) {
964 sa_attr_type_t attr_id;
966 attr_id = sa->sa_user_table[i];
967 if (tb[attr_id].sa_name)
970 tb[attr_id].sa_length = reg_attrs[i].sa_length;
971 tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap;
972 tb[attr_id].sa_attr = attr_id;
973 tb[attr_id].sa_name =
974 kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP);
975 (void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name,
976 strlen(reg_attrs[i].sa_name) + 1);
979 sa->sa_need_attr_registration =
980 (sa_attr_count != registered_count);
984 kmem_free(sa->sa_user_table, count * sizeof (sa_attr_type_t));
985 sa->sa_user_table = NULL;
986 sa_free_attr_table(sa);
987 return ((error != 0) ? error : EINVAL);
991 sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
992 sa_attr_type_t **user_table)
997 dmu_objset_type_t ostype = dmu_objset_type(os);
1001 mutex_enter(&os->os_user_ptr_lock);
1003 mutex_enter(&os->os_sa->sa_lock);
1004 mutex_exit(&os->os_user_ptr_lock);
1005 tb = os->os_sa->sa_user_table;
1006 mutex_exit(&os->os_sa->sa_lock);
1011 sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
1012 mutex_init(&sa->sa_lock, NULL, MUTEX_DEFAULT, NULL);
1013 sa->sa_master_obj = sa_obj;
1016 mutex_enter(&sa->sa_lock);
1017 mutex_exit(&os->os_user_ptr_lock);
1018 avl_create(&sa->sa_layout_num_tree, layout_num_compare,
1019 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node));
1020 avl_create(&sa->sa_layout_hash_tree, layout_hash_compare,
1021 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node));
1024 error = zap_lookup(os, sa_obj, SA_LAYOUTS,
1025 8, 1, &sa->sa_layout_attr_obj);
1026 if (error != 0 && error != ENOENT)
1028 error = zap_lookup(os, sa_obj, SA_REGISTRY,
1029 8, 1, &sa->sa_reg_attr_obj);
1030 if (error != 0 && error != ENOENT)
1034 if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0)
1037 if (sa->sa_layout_attr_obj != 0) {
1038 uint64_t layout_count;
1040 error = zap_count(os, sa->sa_layout_attr_obj,
1044 * Layout number count should be > 0
1046 if (error || (error == 0 && layout_count == 0)) {
1048 error = SET_ERROR(EINVAL);
1052 for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj);
1053 (error = zap_cursor_retrieve(&zc, &za)) == 0;
1054 zap_cursor_advance(&zc)) {
1055 sa_attr_type_t *lot_attrs;
1058 lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
1059 za.za_num_integers, KM_SLEEP);
1061 if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
1062 za.za_name, 2, za.za_num_integers,
1063 lot_attrs))) != 0) {
1064 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1065 za.za_num_integers);
1068 VERIFY(ddi_strtoull(za.za_name, NULL, 10,
1069 (unsigned long long *)&lot_num) == 0);
1071 (void) sa_add_layout_entry(os, lot_attrs,
1072 za.za_num_integers, lot_num,
1073 sa_layout_info_hash(lot_attrs,
1074 za.za_num_integers), B_FALSE, NULL);
1075 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1076 za.za_num_integers);
1078 zap_cursor_fini(&zc);
1081 * Make sure layout count matches number of entries added
1084 if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) {
1090 /* Add special layout number for old ZNODES */
1091 if (ostype == DMU_OST_ZFS) {
1092 (void) sa_add_layout_entry(os, sa_legacy_zpl_layout,
1093 sa_legacy_attr_count, 0,
1094 sa_layout_info_hash(sa_legacy_zpl_layout,
1095 sa_legacy_attr_count), B_FALSE, NULL);
1097 (void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1,
1100 *user_table = os->os_sa->sa_user_table;
1101 mutex_exit(&sa->sa_lock);
1105 sa_free_attr_table(sa);
1106 if (sa->sa_user_table)
1107 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1108 mutex_exit(&sa->sa_lock);
1109 avl_destroy(&sa->sa_layout_hash_tree);
1110 avl_destroy(&sa->sa_layout_num_tree);
1111 mutex_destroy(&sa->sa_lock);
1112 kmem_free(sa, sizeof (sa_os_t));
1113 return ((error == ECKSUM) ? EIO : error);
1117 sa_tear_down(objset_t *os)
1119 sa_os_t *sa = os->os_sa;
1123 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1125 /* Free up attr table */
1127 sa_free_attr_table(sa);
1130 while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
1132 while (tab = list_head(&layout->lot_idx_tab)) {
1133 ASSERT(refcount_count(&tab->sa_refcount));
1134 sa_idx_tab_rele(os, tab);
1139 while (layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie)) {
1140 kmem_free(layout->lot_attrs,
1141 sizeof (sa_attr_type_t) * layout->lot_attr_count);
1142 kmem_free(layout, sizeof (sa_lot_t));
1145 avl_destroy(&sa->sa_layout_hash_tree);
1146 avl_destroy(&sa->sa_layout_num_tree);
1147 mutex_destroy(&sa->sa_lock);
1149 kmem_free(sa, sizeof (sa_os_t));
1154 sa_build_idx_tab(void *hdr, void *attr_addr, sa_attr_type_t attr,
1155 uint16_t length, int length_idx, boolean_t var_length, void *userp)
1157 sa_idx_tab_t *idx_tab = userp;
1160 ASSERT(idx_tab->sa_variable_lengths);
1161 idx_tab->sa_variable_lengths[length_idx] = length;
1163 TOC_ATTR_ENCODE(idx_tab->sa_idx_tab[attr], length_idx,
1164 (uint32_t)((uintptr_t)attr_addr - (uintptr_t)hdr));
1168 sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type,
1169 sa_iterfunc_t func, sa_lot_t *tab, void *userp)
1175 sa_os_t *sa = os->os_sa;
1177 uint16_t *length_start = NULL;
1178 uint8_t length_idx = 0;
1181 search.lot_num = SA_LAYOUT_NUM(hdr, type);
1182 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1186 if (IS_SA_BONUSTYPE(type)) {
1187 data_start = (void *)P2ROUNDUP(((uintptr_t)hdr +
1188 offsetof(sa_hdr_phys_t, sa_lengths) +
1189 (sizeof (uint16_t) * tb->lot_var_sizes)), 8);
1190 length_start = hdr->sa_lengths;
1195 for (i = 0; i != tb->lot_attr_count; i++) {
1196 int attr_length, reg_length;
1199 reg_length = sa->sa_attr_table[tb->lot_attrs[i]].sa_length;
1201 attr_length = reg_length;
1204 attr_length = length_start[length_idx];
1205 idx_len = length_idx++;
1208 func(hdr, data_start, tb->lot_attrs[i], attr_length,
1209 idx_len, reg_length == 0 ? B_TRUE : B_FALSE, userp);
1211 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
1218 sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
1219 uint16_t length, int length_idx, boolean_t variable_length, void *userp)
1221 sa_handle_t *hdl = userp;
1222 sa_os_t *sa = hdl->sa_os->os_sa;
1224 sa_bswap_table[sa->sa_attr_table[attr].sa_byteswap](attr_addr, length);
1228 sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
1230 sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1232 sa_os_t *sa = hdl->sa_os->os_sa;
1233 int num_lengths = 1;
1236 ASSERT(MUTEX_HELD(&sa->sa_lock));
1237 if (sa_hdr_phys->sa_magic == SA_MAGIC)
1240 db = SA_GET_DB(hdl, buftype);
1242 if (buftype == SA_SPILL) {
1243 arc_release(db->db_buf, NULL);
1244 arc_buf_thaw(db->db_buf);
1247 sa_hdr_phys->sa_magic = BSWAP_32(sa_hdr_phys->sa_magic);
1248 sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info);
1251 * Determine number of variable lenghts in header
1252 * The standard 8 byte header has one for free and a
1253 * 16 byte header would have 4 + 1;
1255 if (SA_HDR_SIZE(sa_hdr_phys) > 8)
1256 num_lengths += (SA_HDR_SIZE(sa_hdr_phys) - 8) >> 1;
1257 for (i = 0; i != num_lengths; i++)
1258 sa_hdr_phys->sa_lengths[i] =
1259 BSWAP_16(sa_hdr_phys->sa_lengths[i]);
1261 sa_attr_iter(hdl->sa_os, sa_hdr_phys, DMU_OT_SA,
1262 sa_byteswap_cb, NULL, hdl);
1264 if (buftype == SA_SPILL)
1265 arc_buf_freeze(((dmu_buf_impl_t *)hdl->sa_spill)->db_buf);
1269 sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype)
1271 sa_hdr_phys_t *sa_hdr_phys;
1272 dmu_buf_impl_t *db = SA_GET_DB(hdl, buftype);
1273 dmu_object_type_t bonustype = SA_BONUSTYPE_FROM_DB(db);
1274 sa_os_t *sa = hdl->sa_os->os_sa;
1275 sa_idx_tab_t *idx_tab;
1277 sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1279 mutex_enter(&sa->sa_lock);
1281 /* Do we need to byteswap? */
1283 /* only check if not old znode */
1284 if (IS_SA_BONUSTYPE(bonustype) && sa_hdr_phys->sa_magic != SA_MAGIC &&
1285 sa_hdr_phys->sa_magic != 0) {
1286 VERIFY(BSWAP_32(sa_hdr_phys->sa_magic) == SA_MAGIC);
1287 sa_byteswap(hdl, buftype);
1290 idx_tab = sa_find_idx_tab(hdl->sa_os, bonustype, sa_hdr_phys);
1292 if (buftype == SA_BONUS)
1293 hdl->sa_bonus_tab = idx_tab;
1295 hdl->sa_spill_tab = idx_tab;
1297 mutex_exit(&sa->sa_lock);
1305 panic("evicting sa dbuf\n");
1309 sa_idx_tab_rele(objset_t *os, void *arg)
1311 sa_os_t *sa = os->os_sa;
1312 sa_idx_tab_t *idx_tab = arg;
1314 if (idx_tab == NULL)
1317 mutex_enter(&sa->sa_lock);
1318 if (refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
1319 list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
1320 if (idx_tab->sa_variable_lengths)
1321 kmem_free(idx_tab->sa_variable_lengths,
1323 idx_tab->sa_layout->lot_var_sizes);
1324 refcount_destroy(&idx_tab->sa_refcount);
1325 kmem_free(idx_tab->sa_idx_tab,
1326 sizeof (uint32_t) * sa->sa_num_attrs);
1327 kmem_free(idx_tab, sizeof (sa_idx_tab_t));
1329 mutex_exit(&sa->sa_lock);
1333 sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
1335 sa_os_t *sa = os->os_sa;
1337 ASSERT(MUTEX_HELD(&sa->sa_lock));
1338 (void) refcount_add(&idx_tab->sa_refcount, NULL);
1342 sa_handle_destroy(sa_handle_t *hdl)
1344 dmu_buf_t *db = hdl->sa_bonus;
1346 mutex_enter(&hdl->sa_lock);
1347 (void) dmu_buf_remove_user(db, &hdl->sa_dbu);
1349 if (hdl->sa_bonus_tab)
1350 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
1352 if (hdl->sa_spill_tab)
1353 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
1355 dmu_buf_rele(hdl->sa_bonus, NULL);
1358 dmu_buf_rele((dmu_buf_t *)hdl->sa_spill, NULL);
1359 mutex_exit(&hdl->sa_lock);
1361 kmem_cache_free(sa_cache, hdl);
1365 sa_handle_get_from_db(objset_t *os, dmu_buf_t *db, void *userp,
1366 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1369 dmu_object_info_t doi;
1370 sa_handle_t *handle = NULL;
1373 dmu_object_info_from_db(db, &doi);
1374 ASSERT(doi.doi_bonus_type == DMU_OT_SA ||
1375 doi.doi_bonus_type == DMU_OT_ZNODE);
1377 /* find handle, if it exists */
1378 /* if one doesn't exist then create a new one, and initialize it */
1380 if (hdl_type == SA_HDL_SHARED)
1381 handle = dmu_buf_get_user(db);
1383 if (handle == NULL) {
1384 sa_handle_t *winner = NULL;
1386 handle = kmem_cache_alloc(sa_cache, KM_SLEEP);
1387 handle->sa_dbu.dbu_evict_func = NULL;
1388 handle->sa_userp = userp;
1389 handle->sa_bonus = db;
1391 handle->sa_spill = NULL;
1392 handle->sa_bonus_tab = NULL;
1393 handle->sa_spill_tab = NULL;
1395 error = sa_build_index(handle, SA_BONUS);
1397 if (hdl_type == SA_HDL_SHARED) {
1398 dmu_buf_init_user(&handle->sa_dbu, sa_evict, NULL);
1399 winner = dmu_buf_set_user_ie(db, &handle->sa_dbu);
1402 if (winner != NULL) {
1403 kmem_cache_free(sa_cache, handle);
1413 sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
1414 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1419 if (error = dmu_bonus_hold(objset, objid, NULL, &db))
1422 return (sa_handle_get_from_db(objset, db, userp, hdl_type,
1427 sa_buf_hold(objset_t *objset, uint64_t obj_num, void *tag, dmu_buf_t **db)
1429 return (dmu_bonus_hold(objset, obj_num, tag, db));
1433 sa_buf_rele(dmu_buf_t *db, void *tag)
1435 dmu_buf_rele(db, tag);
1439 sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
1442 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1443 return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
1447 sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
1450 sa_bulk_attr_t bulk;
1452 bulk.sa_attr = attr;
1454 bulk.sa_length = buflen;
1455 bulk.sa_data_func = NULL;
1458 mutex_enter(&hdl->sa_lock);
1459 error = sa_lookup_impl(hdl, &bulk, 1);
1460 mutex_exit(&hdl->sa_lock);
1466 sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, uio_t *uio)
1469 sa_bulk_attr_t bulk;
1471 bulk.sa_data = NULL;
1472 bulk.sa_attr = attr;
1473 bulk.sa_data_func = NULL;
1477 mutex_enter(&hdl->sa_lock);
1478 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) {
1479 error = uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size,
1480 uio->uio_resid), UIO_READ, uio);
1482 mutex_exit(&hdl->sa_lock);
1489 sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, void *data)
1491 sa_idx_tab_t *idx_tab;
1492 sa_hdr_phys_t *hdr = (sa_hdr_phys_t *)data;
1493 sa_os_t *sa = os->os_sa;
1494 sa_lot_t *tb, search;
1498 * Deterimine layout number. If SA node and header == 0 then
1499 * force the index table to the dummy "1" empty layout.
1501 * The layout number would only be zero for a newly created file
1502 * that has not added any attributes yet, or with crypto enabled which
1503 * doesn't write any attributes to the bonus buffer.
1506 search.lot_num = SA_LAYOUT_NUM(hdr, bonustype);
1508 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1510 /* Verify header size is consistent with layout information */
1512 ASSERT(IS_SA_BONUSTYPE(bonustype) &&
1513 SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb) || !IS_SA_BONUSTYPE(bonustype) ||
1514 (IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
1517 * See if any of the already existing TOC entries can be reused?
1520 for (idx_tab = list_head(&tb->lot_idx_tab); idx_tab;
1521 idx_tab = list_next(&tb->lot_idx_tab, idx_tab)) {
1522 boolean_t valid_idx = B_TRUE;
1525 if (tb->lot_var_sizes != 0 &&
1526 idx_tab->sa_variable_lengths != NULL) {
1527 for (i = 0; i != tb->lot_var_sizes; i++) {
1528 if (hdr->sa_lengths[i] !=
1529 idx_tab->sa_variable_lengths[i]) {
1530 valid_idx = B_FALSE;
1536 sa_idx_tab_hold(os, idx_tab);
1541 /* No such luck, create a new entry */
1542 idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_SLEEP);
1543 idx_tab->sa_idx_tab =
1544 kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
1545 idx_tab->sa_layout = tb;
1546 refcount_create(&idx_tab->sa_refcount);
1547 if (tb->lot_var_sizes)
1548 idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
1549 tb->lot_var_sizes, KM_SLEEP);
1551 sa_attr_iter(os, hdr, bonustype, sa_build_idx_tab,
1553 sa_idx_tab_hold(os, idx_tab); /* one hold for consumer */
1554 sa_idx_tab_hold(os, idx_tab); /* one for layout */
1555 list_insert_tail(&tb->lot_idx_tab, idx_tab);
1560 sa_default_locator(void **dataptr, uint32_t *len, uint32_t total_len,
1561 boolean_t start, void *userdata)
1565 *dataptr = userdata;
1570 sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
1572 uint64_t attr_value = 0;
1573 sa_os_t *sa = hdl->sa_os->os_sa;
1574 sa_attr_table_t *tb = sa->sa_attr_table;
1577 mutex_enter(&sa->sa_lock);
1579 if (!sa->sa_need_attr_registration || sa->sa_master_obj == 0) {
1580 mutex_exit(&sa->sa_lock);
1584 if (sa->sa_reg_attr_obj == 0) {
1585 sa->sa_reg_attr_obj = zap_create_link(hdl->sa_os,
1586 DMU_OT_SA_ATTR_REGISTRATION,
1587 sa->sa_master_obj, SA_REGISTRY, tx);
1589 for (i = 0; i != sa->sa_num_attrs; i++) {
1590 if (sa->sa_attr_table[i].sa_registered)
1592 ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
1594 VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
1595 tb[i].sa_name, 8, 1, &attr_value, tx));
1596 tb[i].sa_registered = B_TRUE;
1598 sa->sa_need_attr_registration = B_FALSE;
1599 mutex_exit(&sa->sa_lock);
1603 * Replace all attributes with attributes specified in template.
1604 * If dnode had a spill buffer then those attributes will be
1605 * also be replaced, possibly with just an empty spill block
1607 * This interface is intended to only be used for bulk adding of
1608 * attributes for a new file. It will also be used by the ZPL
1609 * when converting and old formatted znode to native SA support.
1612 sa_replace_all_by_template_locked(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1613 int attr_count, dmu_tx_t *tx)
1615 sa_os_t *sa = hdl->sa_os->os_sa;
1617 if (sa->sa_need_attr_registration)
1618 sa_attr_register_sync(hdl, tx);
1619 return (sa_build_layouts(hdl, attr_desc, attr_count, tx));
1623 sa_replace_all_by_template(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1624 int attr_count, dmu_tx_t *tx)
1628 mutex_enter(&hdl->sa_lock);
1629 error = sa_replace_all_by_template_locked(hdl, attr_desc,
1631 mutex_exit(&hdl->sa_lock);
1636 * Add/remove a single attribute or replace a variable-sized attribute value
1637 * with a value of a different size, and then rewrite the entire set
1639 * Same-length attribute value replacement (including fixed-length attributes)
1640 * is handled more efficiently by the upper layers.
1643 sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
1644 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
1645 uint16_t buflen, dmu_tx_t *tx)
1647 sa_os_t *sa = hdl->sa_os->os_sa;
1648 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1650 sa_bulk_attr_t *attr_desc;
1652 int bonus_attr_count = 0;
1653 int bonus_data_size = 0;
1654 int spill_data_size = 0;
1655 int spill_attr_count = 0;
1658 int i, j, k, length_idx;
1660 sa_idx_tab_t *idx_tab;
1664 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1666 /* First make of copy of the old data */
1670 if (dn->dn_bonuslen != 0) {
1671 bonus_data_size = hdl->sa_bonus->db_size;
1672 old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
1673 bcopy(hdl->sa_bonus->db_data, old_data[0],
1674 hdl->sa_bonus->db_size);
1675 bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
1681 /* Bring spill buffer online if it isn't currently */
1683 if ((error = sa_get_spill(hdl)) == 0) {
1684 spill_data_size = hdl->sa_spill->db_size;
1685 old_data[1] = kmem_alloc(spill_data_size, KM_SLEEP);
1686 bcopy(hdl->sa_spill->db_data, old_data[1],
1687 hdl->sa_spill->db_size);
1689 hdl->sa_spill_tab->sa_layout->lot_attr_count;
1690 } else if (error && error != ENOENT) {
1692 kmem_free(old_data[0], bonus_data_size);
1698 /* build descriptor of all attributes */
1700 attr_count = bonus_attr_count + spill_attr_count;
1701 if (action == SA_ADD)
1703 else if (action == SA_REMOVE)
1706 attr_desc = kmem_zalloc(sizeof (sa_bulk_attr_t) * attr_count, KM_SLEEP);
1709 * loop through bonus and spill buffer if it exists, and
1710 * build up new attr_descriptor to reset the attributes
1713 count = bonus_attr_count;
1714 hdr = SA_GET_HDR(hdl, SA_BONUS);
1715 idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
1716 for (; k != 2; k++) {
1717 /* iterate over each attribute in layout */
1718 for (i = 0, length_idx = 0; i != count; i++) {
1719 sa_attr_type_t attr;
1721 attr = idx_tab->sa_layout->lot_attrs[i];
1722 if (attr == newattr) {
1723 /* duplicate attributes are not allowed */
1724 ASSERT(action == SA_REPLACE ||
1725 action == SA_REMOVE);
1726 /* must be variable-sized to be replaced here */
1727 if (action == SA_REPLACE) {
1728 ASSERT(SA_REGISTERED_LEN(sa, attr) == 0);
1729 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1730 locator, datastart, buflen);
1733 length = SA_REGISTERED_LEN(sa, attr);
1735 length = hdr->sa_lengths[length_idx];
1738 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1740 (TOC_OFF(idx_tab->sa_idx_tab[attr]) +
1741 (uintptr_t)old_data[k]), length);
1743 if (SA_REGISTERED_LEN(sa, attr) == 0)
1746 if (k == 0 && hdl->sa_spill) {
1747 hdr = SA_GET_HDR(hdl, SA_SPILL);
1748 idx_tab = SA_IDX_TAB_GET(hdl, SA_SPILL);
1749 count = spill_attr_count;
1754 if (action == SA_ADD) {
1755 length = SA_REGISTERED_LEN(sa, newattr);
1759 SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
1762 ASSERT3U(j, ==, attr_count);
1764 error = sa_build_layouts(hdl, attr_desc, attr_count, tx);
1767 kmem_free(old_data[0], bonus_data_size);
1769 kmem_free(old_data[1], spill_data_size);
1770 kmem_free(attr_desc, sizeof (sa_bulk_attr_t) * attr_count);
1776 sa_bulk_update_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
1780 sa_os_t *sa = hdl->sa_os->os_sa;
1781 dmu_object_type_t bonustype;
1783 bonustype = SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl, SA_BONUS));
1786 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1788 /* sync out registration table if necessary */
1789 if (sa->sa_need_attr_registration)
1790 sa_attr_register_sync(hdl, tx);
1792 error = sa_attr_op(hdl, bulk, count, SA_UPDATE, tx);
1793 if (error == 0 && !IS_SA_BONUSTYPE(bonustype) && sa->sa_update_cb)
1794 sa->sa_update_cb(hdl, tx);
1800 * update or add new attribute
1803 sa_update(sa_handle_t *hdl, sa_attr_type_t type,
1804 void *buf, uint32_t buflen, dmu_tx_t *tx)
1807 sa_bulk_attr_t bulk;
1809 bulk.sa_attr = type;
1810 bulk.sa_data_func = NULL;
1811 bulk.sa_length = buflen;
1814 mutex_enter(&hdl->sa_lock);
1815 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
1816 mutex_exit(&hdl->sa_lock);
1821 sa_update_from_cb(sa_handle_t *hdl, sa_attr_type_t attr,
1822 uint32_t buflen, sa_data_locator_t *locator, void *userdata, dmu_tx_t *tx)
1825 sa_bulk_attr_t bulk;
1827 bulk.sa_attr = attr;
1828 bulk.sa_data = userdata;
1829 bulk.sa_data_func = locator;
1830 bulk.sa_length = buflen;
1832 mutex_enter(&hdl->sa_lock);
1833 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
1834 mutex_exit(&hdl->sa_lock);
1839 * Return size of an attribute
1843 sa_size(sa_handle_t *hdl, sa_attr_type_t attr, int *size)
1845 sa_bulk_attr_t bulk;
1848 bulk.sa_data = NULL;
1849 bulk.sa_attr = attr;
1850 bulk.sa_data_func = NULL;
1853 mutex_enter(&hdl->sa_lock);
1854 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) != 0) {
1855 mutex_exit(&hdl->sa_lock);
1858 *size = bulk.sa_size;
1860 mutex_exit(&hdl->sa_lock);
1865 sa_bulk_lookup_locked(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
1868 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1869 return (sa_lookup_impl(hdl, attrs, count));
1873 sa_bulk_lookup(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
1878 mutex_enter(&hdl->sa_lock);
1879 error = sa_bulk_lookup_locked(hdl, attrs, count);
1880 mutex_exit(&hdl->sa_lock);
1885 sa_bulk_update(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count, dmu_tx_t *tx)
1890 mutex_enter(&hdl->sa_lock);
1891 error = sa_bulk_update_impl(hdl, attrs, count, tx);
1892 mutex_exit(&hdl->sa_lock);
1897 sa_remove(sa_handle_t *hdl, sa_attr_type_t attr, dmu_tx_t *tx)
1901 mutex_enter(&hdl->sa_lock);
1902 error = sa_modify_attrs(hdl, attr, SA_REMOVE, NULL,
1904 mutex_exit(&hdl->sa_lock);
1909 sa_object_info(sa_handle_t *hdl, dmu_object_info_t *doi)
1911 dmu_object_info_from_db((dmu_buf_t *)hdl->sa_bonus, doi);
1915 sa_object_size(sa_handle_t *hdl, uint32_t *blksize, u_longlong_t *nblocks)
1917 dmu_object_size_from_db((dmu_buf_t *)hdl->sa_bonus,
1922 sa_set_userp(sa_handle_t *hdl, void *ptr)
1924 hdl->sa_userp = ptr;
1928 sa_get_db(sa_handle_t *hdl)
1930 return ((dmu_buf_t *)hdl->sa_bonus);
1934 sa_get_userdata(sa_handle_t *hdl)
1936 return (hdl->sa_userp);
1940 sa_register_update_callback_locked(objset_t *os, sa_update_cb_t *func)
1942 ASSERT(MUTEX_HELD(&os->os_sa->sa_lock));
1943 os->os_sa->sa_update_cb = func;
1947 sa_register_update_callback(objset_t *os, sa_update_cb_t *func)
1950 mutex_enter(&os->os_sa->sa_lock);
1951 sa_register_update_callback_locked(os, func);
1952 mutex_exit(&os->os_sa->sa_lock);
1956 sa_handle_object(sa_handle_t *hdl)
1958 return (hdl->sa_bonus->db_object);
1962 sa_enabled(objset_t *os)
1964 return (os->os_sa == NULL);
1968 sa_set_sa_object(objset_t *os, uint64_t sa_object)
1970 sa_os_t *sa = os->os_sa;
1972 if (sa->sa_master_obj)
1975 sa->sa_master_obj = sa_object;
1981 sa_hdrsize(void *arg)
1983 sa_hdr_phys_t *hdr = arg;
1985 return (SA_HDR_SIZE(hdr));
1989 sa_handle_lock(sa_handle_t *hdl)
1992 mutex_enter(&hdl->sa_lock);
1996 sa_handle_unlock(sa_handle_t *hdl)
1999 mutex_exit(&hdl->sa_lock);