4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
34 #include <sys/zfs_znode.h>
35 #include <sys/zfs_dir.h>
37 #include <sys/zil_impl.h>
38 #include <sys/byteorder.h>
39 #include <sys/policy.h>
44 #include <sys/zfs_fuid.h>
45 #include <sys/dsl_dataset.h>
47 #define ZFS_HANDLE_REPLAY(zilog, tx) \
48 if (zilog->zl_replay) { \
49 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); \
50 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = \
51 zilog->zl_replaying_seq; \
56 * These zfs_log_* functions must be called within a dmu tx, in one
57 * of 2 contexts depending on zilog->z_replay:
61 * We need to record the transaction so that if it is committed to
62 * the Intent Log then it can be replayed. An intent log transaction
63 * structure (itx_t) is allocated and all the information necessary to
64 * possibly replay the transaction is saved in it. The itx is then assigned
65 * a sequence number and inserted in the in-memory list anchored in the zilog.
69 * We need to mark the intent log record as replayed in the log header.
70 * This is done in the same transaction as the replay so that they
75 zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap)
77 int isxvattr = (vap->va_mask & AT_XVATTR);
80 if (vsecp == NULL && !isxvattr)
82 if (vsecp && isxvattr)
84 return (TX_CREATE_ACL_ATTR);
86 panic("%s:%u: unsupported condition", __func__, __LINE__);
89 return (TX_CREATE_ACL);
91 return (TX_CREATE_ATTR);
94 if (vsecp == NULL && !isxvattr)
96 if (vsecp && isxvattr)
98 return (TX_MKDIR_ACL_ATTR);
100 panic("%s:%u: unsupported condition", __func__, __LINE__);
103 return (TX_MKDIR_ACL);
105 return (TX_MKDIR_ATTR);
110 return (TX_MAX_TYPE);
114 * build up the log data necessary for logging xvattr_t
115 * First lr_attr_t is initialized. following the lr_attr_t
116 * is the mapsize and attribute bitmap copied from the xvattr_t.
117 * Following the bitmap and bitmapsize two 64 bit words are reserved
118 * for the create time which may be set. Following the create time
119 * records a single 64 bit integer which has the bits to set on
120 * replay for the xvattr.
123 zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
132 xoap = xva_getxoptattr(xvap);
135 lrattr->lr_attr_masksize = xvap->xva_mapsize;
136 bitmap = &lrattr->lr_attr_bitmap;
137 for (i = 0; i != xvap->xva_mapsize; i++, bitmap++) {
138 *bitmap = xvap->xva_reqattrmap[i];
141 /* Now pack the attributes up in a single uint64_t */
142 attrs = (uint64_t *)bitmap;
144 scanstamp = (caddr_t)(crtime + 2);
146 if (XVA_ISSET_REQ(xvap, XAT_READONLY))
147 *attrs |= (xoap->xoa_readonly == 0) ? 0 :
149 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
150 *attrs |= (xoap->xoa_hidden == 0) ? 0 :
152 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
153 *attrs |= (xoap->xoa_system == 0) ? 0 :
155 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
156 *attrs |= (xoap->xoa_archive == 0) ? 0 :
158 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
159 *attrs |= (xoap->xoa_immutable == 0) ? 0 :
161 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
162 *attrs |= (xoap->xoa_nounlink == 0) ? 0 :
164 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
165 *attrs |= (xoap->xoa_appendonly == 0) ? 0 :
167 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
168 *attrs |= (xoap->xoa_opaque == 0) ? 0 :
170 if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
171 *attrs |= (xoap->xoa_nodump == 0) ? 0 :
173 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
174 *attrs |= (xoap->xoa_av_quarantined == 0) ? 0 :
176 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
177 *attrs |= (xoap->xoa_av_modified == 0) ? 0 :
179 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
180 ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime);
181 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
182 bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
186 zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start)
189 uint64_t *fuidloc = start;
191 /* First copy in the ACE FUIDs */
192 for (zfuid = list_head(&fuidp->z_fuids); zfuid;
193 zfuid = list_next(&fuidp->z_fuids, zfuid)) {
194 *fuidloc++ = zfuid->z_logfuid;
201 zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
203 zfs_fuid_domain_t *zdomain;
205 /* now copy in the domain info, if any */
206 if (fuidp->z_domain_str_sz != 0) {
207 for (zdomain = list_head(&fuidp->z_domains); zdomain;
208 zdomain = list_next(&fuidp->z_domains, zdomain)) {
209 bcopy((void *)zdomain->z_domain, start,
210 strlen(zdomain->z_domain) + 1);
211 start = (caddr_t)start +
212 strlen(zdomain->z_domain) + 1;
219 * zfs_log_create() is used to handle TX_CREATE, TX_CREATE_ATTR, TX_MKDIR,
220 * TX_MKDIR_ATTR and TX_MKXATTR
223 * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
224 * domain information appended prior to the name. In this case the
225 * uid/gid in the log record will be a log centric FUID.
227 * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
228 * may contain attributes, ACL and optional fuid information.
230 * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
231 * and ACL and normal users/groups in the ACEs.
233 * There may be an optional xvattr attribute information similar
234 * to zfs_log_setattr.
236 * Also, after the file name "domain" strings may be appended.
239 zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
240 znode_t *dzp, znode_t *zp, char *name, vsecattr_t *vsecp,
241 zfs_fuid_info_t *fuidp, vattr_t *vap)
246 lr_acl_create_t *lracl;
250 xvattr_t *xvap = (xvattr_t *)vap;
253 size_t namesize = strlen(name) + 1;
259 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
262 * If we have FUIDs present then add in space for
263 * domains and ACE fuid's if any.
266 fuidsz += fuidp->z_domain_str_sz;
267 fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t);
270 if (vap->va_mask & AT_XVATTR)
271 xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize);
273 if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR ||
274 (int)txtype == TX_CREATE || (int)txtype == TX_MKDIR ||
275 (int)txtype == TX_MKXATTR) {
276 txsize = sizeof (*lr) + namesize + fuidsz + xvatsize;
277 lrsize = sizeof (*lr);
279 aclsize = (vsecp) ? vsecp->vsa_aclentsz : 0;
281 sizeof (lr_acl_create_t) + namesize + fuidsz +
282 ZIL_ACE_LENGTH(aclsize) + xvatsize;
283 lrsize = sizeof (lr_acl_create_t);
286 itx = zil_itx_create(txtype, txsize);
288 lr = (lr_create_t *)&itx->itx_lr;
289 lr->lr_doid = dzp->z_id;
290 lr->lr_foid = zp->z_id;
291 lr->lr_mode = zp->z_phys->zp_mode;
292 if (!IS_EPHEMERAL(zp->z_phys->zp_uid)) {
293 lr->lr_uid = (uint64_t)zp->z_phys->zp_uid;
295 lr->lr_uid = fuidp->z_fuid_owner;
297 if (!IS_EPHEMERAL(zp->z_phys->zp_gid)) {
298 lr->lr_gid = (uint64_t)zp->z_phys->zp_gid;
300 lr->lr_gid = fuidp->z_fuid_group;
302 lr->lr_gen = zp->z_phys->zp_gen;
303 lr->lr_crtime[0] = zp->z_phys->zp_crtime[0];
304 lr->lr_crtime[1] = zp->z_phys->zp_crtime[1];
305 lr->lr_rdev = zp->z_phys->zp_rdev;
308 * Fill in xvattr info if any
310 if (vap->va_mask & AT_XVATTR) {
311 zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap);
312 end = (caddr_t)lr + lrsize + xvatsize;
314 end = (caddr_t)lr + lrsize;
317 /* Now fill in any ACL info */
320 lracl = (lr_acl_create_t *)&itx->itx_lr;
321 lracl->lr_aclcnt = vsecp->vsa_aclcnt;
322 lracl->lr_acl_bytes = aclsize;
323 lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
324 lracl->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
325 if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS)
326 lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
328 lracl->lr_acl_flags = 0;
330 bcopy(vsecp->vsa_aclentp, end, aclsize);
331 end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize);
334 /* drop in FUID info */
336 end = zfs_log_fuid_ids(fuidp, end);
337 end = zfs_log_fuid_domains(fuidp, end);
340 * Now place file name in log record
342 bcopy(name, end, namesize);
344 seq = zil_itx_assign(zilog, itx, tx);
345 dzp->z_last_itx = seq;
346 zp->z_last_itx = seq;
350 * zfs_log_remove() handles both TX_REMOVE and TX_RMDIR transactions.
353 zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
354 znode_t *dzp, char *name)
359 size_t namesize = strlen(name) + 1;
364 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
366 itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
367 lr = (lr_remove_t *)&itx->itx_lr;
368 lr->lr_doid = dzp->z_id;
369 bcopy(name, (char *)(lr + 1), namesize);
371 seq = zil_itx_assign(zilog, itx, tx);
372 dzp->z_last_itx = seq;
376 * zfs_log_link() handles TX_LINK transactions.
379 zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
380 znode_t *dzp, znode_t *zp, char *name)
385 size_t namesize = strlen(name) + 1;
390 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
392 itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
393 lr = (lr_link_t *)&itx->itx_lr;
394 lr->lr_doid = dzp->z_id;
395 lr->lr_link_obj = zp->z_id;
396 bcopy(name, (char *)(lr + 1), namesize);
398 seq = zil_itx_assign(zilog, itx, tx);
399 dzp->z_last_itx = seq;
400 zp->z_last_itx = seq;
404 * zfs_log_symlink() handles TX_SYMLINK transactions.
407 zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
408 znode_t *dzp, znode_t *zp, char *name, char *link)
413 size_t namesize = strlen(name) + 1;
414 size_t linksize = strlen(link) + 1;
419 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
421 itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize);
422 lr = (lr_create_t *)&itx->itx_lr;
423 lr->lr_doid = dzp->z_id;
424 lr->lr_foid = zp->z_id;
425 lr->lr_mode = zp->z_phys->zp_mode;
426 lr->lr_uid = zp->z_phys->zp_uid;
427 lr->lr_gid = zp->z_phys->zp_gid;
428 lr->lr_gen = zp->z_phys->zp_gen;
429 lr->lr_crtime[0] = zp->z_phys->zp_crtime[0];
430 lr->lr_crtime[1] = zp->z_phys->zp_crtime[1];
431 bcopy(name, (char *)(lr + 1), namesize);
432 bcopy(link, (char *)(lr + 1) + namesize, linksize);
434 seq = zil_itx_assign(zilog, itx, tx);
435 dzp->z_last_itx = seq;
436 zp->z_last_itx = seq;
440 * zfs_log_rename() handles TX_RENAME transactions.
443 zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
444 znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp)
449 size_t snamesize = strlen(sname) + 1;
450 size_t dnamesize = strlen(dname) + 1;
455 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
457 itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize);
458 lr = (lr_rename_t *)&itx->itx_lr;
459 lr->lr_sdoid = sdzp->z_id;
460 lr->lr_tdoid = tdzp->z_id;
461 bcopy(sname, (char *)(lr + 1), snamesize);
462 bcopy(dname, (char *)(lr + 1) + snamesize, dnamesize);
464 seq = zil_itx_assign(zilog, itx, tx);
465 sdzp->z_last_itx = seq;
466 tdzp->z_last_itx = seq;
467 szp->z_last_itx = seq;
471 * zfs_log_write() handles TX_WRITE transactions.
473 ssize_t zfs_immediate_write_sz = 32768;
475 #define ZIL_MAX_LOG_DATA (SPA_MAXBLOCKSIZE - sizeof (zil_trailer_t) - \
479 zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
480 znode_t *zp, offset_t off, ssize_t resid, int ioflag)
482 itx_wr_state_t write_state;
486 if (zilog == NULL || zp->z_unlinked)
489 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
492 * Writes are handled in three different ways:
495 * In this mode, if we need to commit the write later, then the block
496 * is immediately written into the file system (using dmu_sync),
497 * and a pointer to the block is put into the log record.
498 * When the txg commits the block is linked in.
499 * This saves additionally writing the data into the log record.
500 * There are a few requirements for this to occur:
501 * - write is greater than zfs_immediate_write_sz
502 * - not using slogs (as slogs are assumed to always be faster
503 * than writing into the main pool)
504 * - the write occupies only one block
506 * If we know we'll immediately be committing the
507 * transaction (FSYNC or FDSYNC), the we allocate a larger
508 * log record here for the data and copy the data in.
510 * Otherwise we don't allocate a buffer, and *if* we need to
511 * flush the write later then a buffer is allocated and
512 * we retrieve the data using the dmu.
514 slogging = spa_has_slogs(zilog->zl_spa);
515 if (resid > zfs_immediate_write_sz && !slogging && resid <= zp->z_blksz)
516 write_state = WR_INDIRECT;
517 else if (ioflag & (FSYNC | FDSYNC))
518 write_state = WR_COPIED;
520 write_state = WR_NEED_COPY;
522 if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) {
523 (void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1));
532 * If the write would overflow the largest block then split it.
534 if (write_state != WR_INDIRECT && resid > ZIL_MAX_LOG_DATA)
535 len = SPA_MAXBLOCKSIZE >> 1;
539 itx = zil_itx_create(txtype, sizeof (*lr) +
540 (write_state == WR_COPIED ? len : 0));
541 lr = (lr_write_t *)&itx->itx_lr;
542 if (write_state == WR_COPIED && dmu_read(zp->z_zfsvfs->z_os,
543 zp->z_id, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
544 kmem_free(itx, offsetof(itx_t, itx_lr) +
545 itx->itx_lr.lrc_reclen);
546 itx = zil_itx_create(txtype, sizeof (*lr));
547 lr = (lr_write_t *)&itx->itx_lr;
548 write_state = WR_NEED_COPY;
551 itx->itx_wr_state = write_state;
552 if (write_state == WR_NEED_COPY)
554 lr->lr_foid = zp->z_id;
558 BP_ZERO(&lr->lr_blkptr);
560 itx->itx_private = zp->z_zfsvfs;
562 if ((zp->z_sync_cnt != 0) || (fsync_cnt != 0) ||
563 (ioflag & (FSYNC | FDSYNC)))
564 itx->itx_sync = B_TRUE;
566 itx->itx_sync = B_FALSE;
568 zp->z_last_itx = zil_itx_assign(zilog, itx, tx);
576 * zfs_log_truncate() handles TX_TRUNCATE transactions.
579 zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
580 znode_t *zp, uint64_t off, uint64_t len)
586 if (zilog == NULL || zp->z_unlinked)
589 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
591 itx = zil_itx_create(txtype, sizeof (*lr));
592 lr = (lr_truncate_t *)&itx->itx_lr;
593 lr->lr_foid = zp->z_id;
597 itx->itx_sync = (zp->z_sync_cnt != 0);
598 seq = zil_itx_assign(zilog, itx, tx);
599 zp->z_last_itx = seq;
603 * zfs_log_setattr() handles TX_SETATTR transactions.
606 zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
607 znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp)
612 xvattr_t *xvap = (xvattr_t *)vap;
613 size_t recsize = sizeof (lr_setattr_t);
617 if (zilog == NULL || zp->z_unlinked)
620 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
623 * If XVATTR set, then log record size needs to allow
624 * for lr_attr_t + xvattr mask, mapsize and create time
625 * plus actual attribute values
627 if (vap->va_mask & AT_XVATTR)
628 recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize);
631 recsize += fuidp->z_domain_str_sz;
633 itx = zil_itx_create(txtype, recsize);
634 lr = (lr_setattr_t *)&itx->itx_lr;
635 lr->lr_foid = zp->z_id;
636 lr->lr_mask = (uint64_t)mask_applied;
637 lr->lr_mode = (uint64_t)vap->va_mode;
638 if ((mask_applied & AT_UID) && IS_EPHEMERAL(vap->va_uid))
639 lr->lr_uid = fuidp->z_fuid_owner;
641 lr->lr_uid = (uint64_t)vap->va_uid;
643 if ((mask_applied & AT_GID) && IS_EPHEMERAL(vap->va_gid))
644 lr->lr_gid = fuidp->z_fuid_group;
646 lr->lr_gid = (uint64_t)vap->va_gid;
648 lr->lr_size = (uint64_t)vap->va_size;
649 ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime);
650 ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime);
651 start = (lr_setattr_t *)(lr + 1);
652 if (vap->va_mask & AT_XVATTR) {
653 zfs_log_xvattr((lr_attr_t *)start, xvap);
654 start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize);
658 * Now stick on domain information if any on end
662 (void) zfs_log_fuid_domains(fuidp, start);
664 itx->itx_sync = (zp->z_sync_cnt != 0);
665 seq = zil_itx_assign(zilog, itx, tx);
666 zp->z_last_itx = seq;
670 * zfs_log_acl() handles TX_ACL transactions.
673 zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
674 vsecattr_t *vsecp, zfs_fuid_info_t *fuidp)
683 size_t aclbytes = vsecp->vsa_aclentsz;
685 if (zilog == NULL || zp->z_unlinked)
688 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */
690 txtype = (zp->z_zfsvfs->z_version < ZPL_VERSION_FUID) ?
693 if (txtype == TX_ACL)
694 lrsize = sizeof (*lr);
696 lrsize = sizeof (*lrv0);
699 ((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) +
700 (fuidp ? fuidp->z_domain_str_sz : 0) +
701 sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0);
703 itx = zil_itx_create(txtype, txsize);
705 lr = (lr_acl_t *)&itx->itx_lr;
706 lr->lr_foid = zp->z_id;
707 if (txtype == TX_ACL) {
708 lr->lr_acl_bytes = aclbytes;
709 lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
710 lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
711 if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS)
712 lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
714 lr->lr_acl_flags = 0;
716 lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt;
718 if (txtype == TX_ACL_V0) {
719 lrv0 = (lr_acl_v0_t *)lr;
720 bcopy(vsecp->vsa_aclentp, (ace_t *)(lrv0 + 1), aclbytes);
722 void *start = (ace_t *)(lr + 1);
724 bcopy(vsecp->vsa_aclentp, start, aclbytes);
726 start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
729 start = zfs_log_fuid_ids(fuidp, start);
730 (void) zfs_log_fuid_domains(fuidp, start);
734 itx->itx_sync = (zp->z_sync_cnt != 0);
735 seq = zil_itx_assign(zilog, itx, tx);
736 zp->z_last_itx = seq;