2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Zheng Liu <lz@freebsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/vnode.h>
39 #include <sys/endian.h>
44 #include <fs/ext2fs/ext2_mount.h>
45 #include <fs/ext2fs/fs.h>
46 #include <fs/ext2fs/inode.h>
47 #include <fs/ext2fs/ext2fs.h>
48 #include <fs/ext2fs/ext2_extents.h>
49 #include <fs/ext2fs/ext2_extern.h>
51 SDT_PROVIDER_DECLARE(ext2fs);
54 * arg0: verbosity. Higher numbers give more verbose messages
55 * arg1: Textual message
57 SDT_PROBE_DEFINE2(ext2fs, , trace, extents, "int", "char*");
59 static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents");
61 #ifdef EXT2FS_PRINT_EXTENTS
62 static const bool print_extents_walk = true;
64 static int ext4_ext_check_header(struct inode *, struct ext4_extent_header *);
65 static int ext4_ext_walk_header(struct inode *, struct ext4_extent_header *);
66 static inline e4fs_daddr_t ext4_ext_index_pblock(struct ext4_extent_index *);
67 static inline e4fs_daddr_t ext4_ext_extent_pblock(struct ext4_extent *);
70 ext4_ext_blk_check(struct inode *ip, e4fs_daddr_t blk)
76 if (blk < fs->e2fs->e2fs_first_dblock || blk >= fs->e2fs_bcount)
83 ext4_ext_walk_index(struct inode *ip, struct ext4_extent_index *ex, bool do_walk)
92 if (print_extents_walk)
93 printf(" index %p => (blk %u pblk %ju)\n", ex,
94 le32toh(ex->ei_blk), (uint64_t)le16toh(ex->ei_leaf_hi) << 32 |
95 le32toh(ex->ei_leaf_lo));
100 blk = ext4_ext_index_pblock(ex);
101 error = ext4_ext_blk_check(ip, blk);
105 if ((error = bread(ip->i_devvp,
106 fsbtodb(fs, blk), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
111 error = ext4_ext_walk_header(ip, (struct ext4_extent_header *)bp->b_data);
119 ext4_ext_walk_extent(struct inode *ip, struct ext4_extent *ep)
124 blk = ext4_ext_extent_pblock(ep);
125 error = ext4_ext_blk_check(ip, blk);
129 if (print_extents_walk)
130 printf(" ext %p => (blk %u len %u start %ju)\n",
131 ep, le32toh(ep->e_blk), le16toh(ep->e_len),
138 ext4_ext_walk_header(struct inode *ip, struct ext4_extent_header *eh)
142 error = ext4_ext_check_header(ip, eh);
146 if (print_extents_walk)
147 printf("header %p => (entries %d max %d depth %d gen %d)\n",
148 eh, le16toh(eh->eh_ecount),
149 le16toh(eh->eh_max), le16toh(eh->eh_depth), le32toh(eh->eh_gen));
151 for (i = 0; i < le16toh(eh->eh_ecount) && error == 0; i++)
152 if (eh->eh_depth != 0)
153 error = ext4_ext_walk_index(ip,
154 (struct ext4_extent_index *)(eh + 1 + i), true);
156 error = ext4_ext_walk_extent(ip, (struct ext4_extent *)(eh + 1 + i));
162 ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path)
168 if (print_extents_walk)
169 printf("ip=%ju, Path:\n", ip->i_number);
171 for (k = 0; k <= l && error == 0; k++, path++) {
172 if (path->ep_index) {
173 error = ext4_ext_walk_index(ip, path->ep_index, false);
174 } else if (path->ep_ext) {
175 error = ext4_ext_walk_extent(ip, path->ep_ext);
183 ext4_ext_walk(struct inode *ip)
185 struct ext4_extent_header *ehp;
187 ehp = (struct ext4_extent_header *)ip->i_db;
189 if (print_extents_walk)
190 printf("Extent status:ip=%ju\n", ip->i_number);
192 if (!(ip->i_flag & IN_E4EXTENTS))
195 return (ext4_ext_walk_header(ip, ehp));
199 static inline struct ext4_extent_header *
200 ext4_ext_inode_header(struct inode *ip)
203 return ((struct ext4_extent_header *)ip->i_db);
206 static inline struct ext4_extent_header *
207 ext4_ext_block_header(char *bdata)
210 return ((struct ext4_extent_header *)bdata);
213 static inline unsigned short
214 ext4_ext_inode_depth(struct inode *ip)
216 struct ext4_extent_header *ehp;
218 ehp = (struct ext4_extent_header *)ip->i_data;
219 return (le16toh(ehp->eh_depth));
222 static inline e4fs_daddr_t
223 ext4_ext_index_pblock(struct ext4_extent_index *index)
227 blk = le32toh(index->ei_leaf_lo);
228 blk |= (e4fs_daddr_t)le16toh(index->ei_leaf_hi) << 32;
234 ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb)
237 index->ei_leaf_lo = htole32(pb & 0xffffffff);
238 index->ei_leaf_hi = htole16((pb >> 32) & 0xffff);
241 static inline e4fs_daddr_t
242 ext4_ext_extent_pblock(struct ext4_extent *extent)
246 blk = le32toh(extent->e_start_lo);
247 blk |= (e4fs_daddr_t)le16toh(extent->e_start_hi) << 32;
253 ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb)
256 ex->e_start_lo = htole32(pb & 0xffffffff);
257 ex->e_start_hi = htole16((pb >> 32) & 0xffff);
261 ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep)
263 struct ext4_extent_cache *ecp;
264 int ret = EXT4_EXT_CACHE_NO;
266 ecp = &ip->i_ext_cache;
267 if (ecp->ec_type == EXT4_EXT_CACHE_NO)
270 if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) {
271 ep->e_blk = htole32(ecp->ec_blk);
272 ep->e_start_lo = htole32(ecp->ec_start & 0xffffffff);
273 ep->e_start_hi = htole16(ecp->ec_start >> 32 & 0xffff);
274 ep->e_len = htole16(ecp->ec_len);
281 ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh)
288 if (le16toh(eh->eh_magic) != EXT4_EXT_MAGIC) {
289 error_msg = "header: invalid magic";
292 if (eh->eh_max == 0) {
293 error_msg = "header: invalid eh_max";
296 if (le16toh(eh->eh_ecount) > le16toh(eh->eh_max)) {
297 error_msg = "header: invalid eh_entries";
304 SDT_PROBE2(ext2fs, , trace, extents, 1, error_msg);
309 ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk)
311 struct ext4_extent_header *eh;
312 struct ext4_extent_index *r, *l, *m;
314 eh = path->ep_header;
316 KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max) &&
317 le16toh(eh->eh_ecount) > 0,
318 ("ext4_ext_binsearch_index: bad args"));
320 l = EXT_FIRST_INDEX(eh) + 1;
321 r = EXT_FIRST_INDEX(eh) + le16toh(eh->eh_ecount) - 1;
324 if (blk < le32toh(m->ei_blk))
330 path->ep_index = l - 1;
334 ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk)
336 struct ext4_extent_header *eh;
337 struct ext4_extent *r, *l, *m;
339 eh = path->ep_header;
341 KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max),
342 ("ext4_ext_binsearch_ext: bad args"));
344 if (eh->eh_ecount == 0)
347 l = EXT_FIRST_EXTENT(eh) + 1;
348 r = EXT_FIRST_EXTENT(eh) + le16toh(eh->eh_ecount) - 1;
352 if (blk < le32toh(m->e_blk))
358 path->ep_ext = l - 1;
362 ext4_ext_fill_path_bdata(struct ext4_extent_path *path,
363 struct buf *bp, uint64_t blk)
366 KASSERT(path->ep_data == NULL,
367 ("ext4_ext_fill_path_bdata: bad ep_data"));
369 path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK);
370 memcpy(path->ep_data, bp->b_data, bp->b_bufsize);
377 ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp)
380 KASSERT(path->ep_data != NULL,
381 ("ext4_ext_fill_path_buf: bad ep_data"));
383 memcpy(bp->b_data, path->ep_data, bp->b_bufsize);
387 ext4_ext_drop_refs(struct ext4_extent_path *path)
394 depth = path->ep_depth;
395 for (i = 0; i <= depth; i++, path++)
397 free(path->ep_data, M_EXT2EXTENTS);
398 path->ep_data = NULL;
403 ext4_ext_path_free(struct ext4_extent_path *path)
409 ext4_ext_drop_refs(path);
410 free(path, M_EXT2EXTENTS);
414 ext4_ext_find_extent(struct inode *ip, daddr_t block,
415 struct ext4_extent_path **ppath)
418 struct ext4_extent_header *eh;
419 struct ext4_extent_path *path;
422 int error, depth, i, ppos, alloc;
425 eh = ext4_ext_inode_header(ip);
426 depth = ext4_ext_inode_depth(ip);
430 error = ext4_ext_check_header(ip, eh);
439 path = malloc(EXT4_EXT_DEPTH_MAX *
440 sizeof(struct ext4_extent_path),
441 M_EXT2EXTENTS, M_WAITOK | M_ZERO);
446 path[0].ep_header = eh;
447 path[0].ep_data = NULL;
449 /* Walk through the tree. */
452 ext4_ext_binsearch_index(&path[ppos], block);
453 blk = ext4_ext_index_pblock(path[ppos].ep_index);
454 path[ppos].ep_depth = i;
455 path[ppos].ep_ext = NULL;
457 error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk),
458 ip->i_e2fs->e2fs_bsize, NOCRED, &bp);
465 SDT_PROBE2(ext2fs, , trace, extents, 1,
466 "ppos > depth => extent corrupted");
472 ext4_ext_fill_path_bdata(&path[ppos], bp, blk);
475 eh = ext4_ext_block_header(path[ppos].ep_data);
476 if (ext4_ext_check_header(ip, eh) ||
477 ext2_extent_blk_csum_verify(ip, path[ppos].ep_data)) {
482 path[ppos].ep_header = eh;
487 error = ext4_ext_check_header(ip, eh);
492 path[ppos].ep_depth = i;
493 path[ppos].ep_header = eh;
494 path[ppos].ep_ext = NULL;
495 path[ppos].ep_index = NULL;
496 ext4_ext_binsearch_ext(&path[ppos], block);
500 ext4_ext_drop_refs(path);
502 free(path, M_EXT2EXTENTS);
510 ext4_ext_space_root(struct inode *ip)
514 size = sizeof(ip->i_data);
515 size -= sizeof(struct ext4_extent_header);
516 size /= sizeof(struct ext4_extent);
522 ext4_ext_space_block(struct inode *ip)
529 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
530 sizeof(struct ext4_extent);
536 ext4_ext_space_block_index(struct inode *ip)
543 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
544 sizeof(struct ext4_extent_index);
550 ext4_ext_tree_init(struct inode *ip)
552 struct ext4_extent_header *ehp;
554 ip->i_flag |= IN_E4EXTENTS;
556 memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR);
557 ehp = (struct ext4_extent_header *)ip->i_data;
558 ehp->eh_magic = htole16(EXT4_EXT_MAGIC);
559 ehp->eh_max = htole16(ext4_ext_space_root(ip));
560 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
561 ip->i_flag |= IN_CHANGE | IN_UPDATE;
562 ext2_update(ip->i_vnode, 1);
566 ext4_ext_put_in_cache(struct inode *ip, uint32_t blk,
567 uint32_t len, uint32_t start, int type)
570 KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input"));
572 ip->i_ext_cache.ec_type = type;
573 ip->i_ext_cache.ec_blk = blk;
574 ip->i_ext_cache.ec_len = len;
575 ip->i_ext_cache.ec_start = start;
579 ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path,
583 struct ext4_extent *ex;
584 e4fs_daddr_t bg_start;
590 depth = path->ep_depth;
591 ex = path[depth].ep_ext;
593 e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex);
594 e2fs_daddr_t blk = le32toh(ex->e_blk);
597 return (pblk + (block - blk));
599 return (pblk - (blk - block));
602 /* Try to get block from index itself. */
603 if (path[depth].ep_data)
604 return (path[depth].ep_blk);
607 /* Use inode's group. */
608 bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
609 le32toh(fs->e2fs->e2fs_first_dblock);
611 return (bg_start + block);
615 ext4_can_extents_be_merged(struct ext4_extent *ex1,
616 struct ext4_extent *ex2)
619 if (le32toh(ex1->e_blk) + le16toh(ex1->e_len) != le32toh(ex2->e_blk))
622 if (le16toh(ex1->e_len) + le16toh(ex2->e_len) > EXT4_MAX_LEN)
625 if (ext4_ext_extent_pblock(ex1) + le16toh(ex1->e_len) ==
626 ext4_ext_extent_pblock(ex2))
633 ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path)
635 int depth = path->ep_depth;
639 return (EXT4_MAX_BLOCKS);
645 if (path[depth].ep_index !=
646 EXT_LAST_INDEX(path[depth].ep_header))
647 return (le32toh(path[depth].ep_index[1].ei_blk));
652 return (EXT4_MAX_BLOCKS);
656 ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path)
670 bp = getblk(ip->i_devvp, fsbtodb(fs, blk),
671 fs->e2fs_bsize, 0, 0, 0);
674 ext4_ext_fill_path_buf(path, bp);
675 ext2_extent_blk_csum_set(ip, bp->b_data);
678 ip->i_flag |= IN_CHANGE | IN_UPDATE;
679 error = ext2_update(ip->i_vnode, 1);
686 ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path,
687 uint32_t lblk, e4fs_daddr_t blk)
690 struct ext4_extent_index *idx;
695 if (lblk == le32toh(path->ep_index->ei_blk)) {
696 SDT_PROBE2(ext2fs, , trace, extents, 1,
697 "lblk == index blk => extent corrupted");
701 if (le16toh(path->ep_header->eh_ecount) >=
702 le16toh(path->ep_header->eh_max)) {
703 SDT_PROBE2(ext2fs, , trace, extents, 1,
704 "ecout > maxcount => extent corrupted");
708 if (lblk > le32toh(path->ep_index->ei_blk)) {
710 idx = path->ep_index + 1;
713 idx = path->ep_index;
716 len = EXT_LAST_INDEX(path->ep_header) - idx + 1;
718 memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index));
720 if (idx > EXT_MAX_INDEX(path->ep_header)) {
721 SDT_PROBE2(ext2fs, , trace, extents, 1,
722 "index is out of range => extent corrupted");
726 idx->ei_blk = htole32(lblk);
727 ext4_index_store_pblock(idx, blk);
728 path->ep_header->eh_ecount =
729 htole16(le16toh(path->ep_header->eh_ecount) + 1);
731 return (ext4_ext_dirty(ip, path));
735 ext4_ext_alloc_meta(struct inode *ip)
737 e4fs_daddr_t blk = ext2_alloc_meta(ip);
739 ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize);
740 ip->i_flag |= IN_CHANGE | IN_UPDATE;
741 ext2_update(ip->i_vnode, 1);
748 ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags)
751 int i, blocksreleased;
754 blocksreleased = count;
756 for(i = 0; i < count; i++)
757 ext2_blkfree(ip, blk + i, fs->e2fs_bsize);
759 if (ip->i_blocks >= blocksreleased)
760 ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased);
764 ip->i_flag |= IN_CHANGE | IN_UPDATE;
765 ext2_update(ip->i_vnode, 1);
769 ext4_ext_split(struct inode *ip, struct ext4_extent_path *path,
770 struct ext4_extent *newext, int at)
774 int depth = ext4_ext_inode_depth(ip);
775 struct ext4_extent_header *neh;
776 struct ext4_extent_index *fidx;
777 struct ext4_extent *ex;
779 e4fs_daddr_t newblk, oldblk;
781 e4fs_daddr_t *ablks = NULL;
788 * We will split at current extent for now.
790 if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) {
791 SDT_PROBE2(ext2fs, , trace, extents, 1,
792 "extent is out of range => extent corrupted");
796 if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header))
797 border = le32toh(path[depth].ep_ext[1].e_blk);
799 border = le32toh(newext->e_blk);
801 /* Allocate new blocks. */
802 ablks = malloc(sizeof(e4fs_daddr_t) * depth,
803 M_EXT2EXTENTS, M_WAITOK | M_ZERO);
804 for (a = 0; a < depth - at; a++) {
805 newblk = ext4_ext_alloc_meta(ip);
812 bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
818 neh = ext4_ext_block_header(bp->b_data);
820 neh->eh_max = le16toh(ext4_ext_space_block(ip));
821 neh->eh_magic = le16toh(EXT4_EXT_MAGIC);
823 ex = EXT_FIRST_EXTENT(neh);
825 if (le16toh(path[depth].ep_header->eh_ecount) !=
826 le16toh(path[depth].ep_header->eh_max)) {
827 SDT_PROBE2(ext2fs, , trace, extents, 1,
828 "extents count out of range => extent corrupted");
833 /* Start copy from next extent. */
835 path[depth].ep_ext++;
836 while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) {
837 path[depth].ep_ext++;
841 memmove(ex, path[depth].ep_ext - m,
842 sizeof(struct ext4_extent) * m);
843 neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m);
846 ext2_extent_blk_csum_set(ip, bp->b_data);
852 path[depth].ep_header->eh_ecount =
853 htole16(le16toh(path[depth].ep_header->eh_ecount) - m);
854 ext4_ext_dirty(ip, path + depth);
857 /* Create intermediate indexes. */
859 KASSERT(k >= 0, ("ext4_ext_split: negative k"));
861 /* Insert new index into current index block. */
866 error = bread(ip->i_devvp, fsbtodb(fs, newblk),
867 (int)fs->e2fs_bsize, NOCRED, &bp);
872 neh = (struct ext4_extent_header *)bp->b_data;
873 neh->eh_ecount = htole16(1);
874 neh->eh_magic = htole16(EXT4_EXT_MAGIC);
875 neh->eh_max = htole16(ext4_ext_space_block_index(ip));
876 neh->eh_depth = htole16(depth - i);
877 fidx = EXT_FIRST_INDEX(neh);
878 fidx->ei_blk = htole32(border);
879 ext4_index_store_pblock(fidx, oldblk);
883 while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) {
888 memmove(++fidx, path[i].ep_index - m,
889 sizeof(struct ext4_extent_index) * m);
890 neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m);
893 ext2_extent_blk_csum_set(ip, bp->b_data);
899 path[i].ep_header->eh_ecount =
900 htole16(le16toh(path[i].ep_header->eh_ecount) - m);
901 ext4_ext_dirty(ip, path + i);
907 error = ext4_ext_insert_index(ip, path + at, border, newblk);
914 for (i = 0; i < depth; i++) {
917 ext4_ext_blkfree(ip, ablks[i], 1, 0);
921 free(ablks, M_EXT2EXTENTS);
927 ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path,
928 struct ext4_extent *newext)
931 struct ext4_extent_path *curpath;
932 struct ext4_extent_header *neh;
940 newblk = ext4_ext_alloc_meta(ip);
944 bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
948 /* Move top-level index/leaf into new block. */
949 memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data));
951 /* Set size of new block */
952 neh = ext4_ext_block_header(bp->b_data);
953 neh->eh_magic = htole16(EXT4_EXT_MAGIC);
955 if (ext4_ext_inode_depth(ip))
956 neh->eh_max = htole16(ext4_ext_space_block_index(ip));
958 neh->eh_max = htole16(ext4_ext_space_block(ip));
960 ext2_extent_blk_csum_set(ip, bp->b_data);
967 curpath->ep_header->eh_magic = htole16(EXT4_EXT_MAGIC);
968 curpath->ep_header->eh_max = htole16(ext4_ext_space_root(ip));
969 curpath->ep_header->eh_ecount = htole16(1);
970 curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header);
971 curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk;
972 ext4_index_store_pblock(curpath->ep_index, newblk);
974 neh = ext4_ext_inode_header(ip);
975 neh->eh_depth = htole16(path->ep_depth + 1);
976 ext4_ext_dirty(ip, curpath);
984 ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path,
985 struct ext4_extent *newext)
987 struct ext4_extent_path *curpath;
991 i = depth = ext4_ext_inode_depth(ip);
993 /* Look for free index entry int the tree */
994 curpath = path + depth;
995 while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) {
1001 * We use already allocated block for index block,
1002 * so subsequent data blocks should be contiguous.
1004 if (EXT_HAS_FREE_INDEX(curpath)) {
1005 error = ext4_ext_split(ip, path, newext, i);
1010 ext4_ext_drop_refs(path);
1011 error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path);
1015 /* Tree is full, do grow in depth. */
1016 error = ext4_ext_grow_indepth(ip, path, newext);
1021 ext4_ext_drop_refs(path);
1022 error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path);
1026 /* Check and split tree if required. */
1027 depth = ext4_ext_inode_depth(ip);
1028 if (le16toh(path[depth].ep_header->eh_ecount) ==
1029 le16toh(path[depth].ep_header->eh_max))
1038 ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path)
1040 struct ext4_extent_header *eh;
1041 struct ext4_extent *ex;
1045 depth = ext4_ext_inode_depth(ip);
1046 eh = path[depth].ep_header;
1047 ex = path[depth].ep_ext;
1049 if (ex == NULL || eh == NULL)
1055 /* We will correct tree if first leaf got modified only. */
1056 if (ex != EXT_FIRST_EXTENT(eh))
1060 border = le32toh(path[depth].ep_ext->e_blk);
1061 path[k].ep_index->ei_blk = htole32(border);
1062 ext4_ext_dirty(ip, path + k);
1064 /* Change all left-side indexes. */
1065 if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header))
1068 path[k].ep_index->ei_blk = htole32(border);
1069 ext4_ext_dirty(ip, path + k);
1076 ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path,
1077 struct ext4_extent *newext)
1079 struct ext4_extent_header * eh;
1080 struct ext4_extent *ex, *nex, *nearex;
1081 struct ext4_extent_path *npath;
1082 int depth, len, error, next;
1084 depth = ext4_ext_inode_depth(ip);
1085 ex = path[depth].ep_ext;
1088 if (htole16(newext->e_len) == 0 || path[depth].ep_header == NULL)
1091 /* Insert block into found extent. */
1092 if (ex && ext4_can_extents_be_merged(ex, newext)) {
1093 ex->e_len = htole16(le16toh(ex->e_len) + le16toh(newext->e_len));
1094 eh = path[depth].ep_header;
1100 depth = ext4_ext_inode_depth(ip);
1101 eh = path[depth].ep_header;
1102 if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max))
1106 nex = EXT_LAST_EXTENT(eh);
1107 next = ext4_ext_next_leaf_block(ip, path);
1108 if (le32toh(newext->e_blk) > le32toh(nex->e_blk) && next !=
1110 KASSERT(npath == NULL,
1111 ("ext4_ext_insert_extent: bad path"));
1113 error = ext4_ext_find_extent(ip, next, &npath);
1117 if (npath->ep_depth != path->ep_depth) {
1122 eh = npath[depth].ep_header;
1123 if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max)) {
1130 * There is no free space in the found leaf,
1131 * try to add a new leaf to the tree.
1133 error = ext4_ext_create_new_leaf(ip, path, newext);
1137 depth = ext4_ext_inode_depth(ip);
1138 eh = path[depth].ep_header;
1141 nearex = path[depth].ep_ext;
1143 /* Create new extent in the leaf. */
1144 path[depth].ep_ext = EXT_FIRST_EXTENT(eh);
1145 } else if (le32toh(newext->e_blk) > le32toh(nearex->e_blk)) {
1146 if (nearex != EXT_LAST_EXTENT(eh)) {
1147 len = EXT_MAX_EXTENT(eh) - nearex;
1148 len = (len - 1) * sizeof(struct ext4_extent);
1149 len = len < 0 ? 0 : len;
1150 memmove(nearex + 2, nearex + 1, len);
1152 path[depth].ep_ext = nearex + 1;
1154 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1155 len = len < 0 ? 0 : len;
1156 memmove(nearex + 1, nearex, len);
1157 path[depth].ep_ext = nearex;
1160 eh->eh_ecount = htole16(le16toh(eh->eh_ecount) + 1);
1161 nearex = path[depth].ep_ext;
1162 nearex->e_blk = newext->e_blk;
1163 nearex->e_start_lo = newext->e_start_lo;
1164 nearex->e_start_hi = newext->e_start_hi;
1165 nearex->e_len = newext->e_len;
1168 /* Try to merge extents to the right. */
1169 while (nearex < EXT_LAST_EXTENT(eh)) {
1170 if (!ext4_can_extents_be_merged(nearex, nearex + 1))
1173 /* Merge with next extent. */
1174 nearex->e_len = htole16(le16toh(nearex->e_len) +
1175 le16toh(nearex[1].e_len));
1176 if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1177 len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1178 sizeof(struct ext4_extent);
1179 memmove(nearex + 1, nearex + 2, len);
1182 eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1);
1183 KASSERT(le16toh(eh->eh_ecount) != 0,
1184 ("ext4_ext_insert_extent: bad ecount"));
1188 * Try to merge extents to the left,
1189 * start from inexes correction.
1191 error = ext4_ext_correct_indexes(ip, path);
1195 ext4_ext_dirty(ip, path + depth);
1199 ext4_ext_drop_refs(npath);
1200 free(npath, M_EXT2EXTENTS);
1203 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
1208 ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref,
1209 struct ucred *cred, unsigned long *count, int *perror)
1211 struct m_ext2fs *fs;
1212 e4fs_daddr_t newblk;
1215 * We will allocate only single block for now.
1221 EXT2_LOCK(ip->i_ump);
1222 *perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk);
1227 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1228 ext2_update(ip->i_vnode, 1);
1235 ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk,
1236 unsigned long max_blocks, struct ucred *cred, struct buf **bpp,
1237 int *pallocated, daddr_t *nb)
1239 struct m_ext2fs *fs;
1240 struct buf *bp = NULL;
1241 struct ext4_extent_path *path;
1242 struct ext4_extent newex, *ex;
1243 e4fs_daddr_t bpref, newblk = 0;
1244 unsigned long allocated = 0;
1245 int error = 0, depth;
1253 if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) {
1254 if (bpref == EXT4_EXT_CACHE_IN) {
1255 /* Block is already allocated. */
1256 newblk = iblk - le32toh(newex.e_blk) +
1257 ext4_ext_extent_pblock(&newex);
1258 allocated = le16toh(newex.e_len) - (iblk - le32toh(newex.e_blk));
1266 error = ext4_ext_find_extent(ip, iblk, &path);
1271 depth = ext4_ext_inode_depth(ip);
1272 if (path[depth].ep_ext == NULL && depth != 0) {
1277 if ((ex = path[depth].ep_ext)) {
1278 uint64_t lblk = le32toh(ex->e_blk);
1279 uint16_t e_len = le16toh(ex->e_len);
1280 e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex);
1282 if (e_len > EXT4_MAX_LEN)
1285 /* If we found extent covers block, simply return it. */
1286 if (iblk >= lblk && iblk < lblk + e_len) {
1287 newblk = iblk - lblk + e_start;
1288 allocated = e_len - (iblk - lblk);
1289 ext4_ext_put_in_cache(ip, lblk, e_len,
1290 e_start, EXT4_EXT_CACHE_IN);
1295 /* Allocate the new block. */
1296 if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) {
1297 ip->i_next_alloc_goal = 0;
1300 bpref = ext4_ext_blkpref(ip, path, iblk);
1301 allocated = max_blocks;
1302 newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error);
1306 /* Try to insert new extent into found leaf and return. */
1307 newex.e_blk = htole32(iblk);
1308 ext4_ext_store_pblock(&newex, newblk);
1309 newex.e_len = htole16(allocated);
1310 error = ext4_ext_insert_extent(ip, path, &newex);
1314 newblk = ext4_ext_extent_pblock(&newex);
1315 ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN);
1319 if (allocated > max_blocks)
1320 allocated = max_blocks;
1325 error = bread(ip->i_devvp, fsbtodb(fs, newblk),
1326 fs->e2fs_bsize, cred, &bp);
1336 ext4_ext_drop_refs(path);
1337 free(path, M_EXT2EXTENTS);
1346 static inline uint16_t
1347 ext4_ext_get_actual_len(struct ext4_extent *ext)
1350 return (le16toh(ext->e_len) <= EXT_INIT_MAX_LEN ?
1351 le16toh(ext->e_len) : (le16toh(ext->e_len) - EXT_INIT_MAX_LEN));
1354 static inline struct ext4_extent_header *
1355 ext4_ext_header(struct inode *ip)
1358 return ((struct ext4_extent_header *)ip->i_db);
1362 ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex,
1363 unsigned long from, unsigned long to)
1365 unsigned long num, start;
1367 if (from >= le32toh(ex->e_blk) &&
1368 to == le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - 1) {
1370 num = le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - from;
1371 start = ext4_ext_extent_pblock(ex) +
1372 ext4_ext_get_actual_len(ex) - num;
1373 ext4_ext_blkfree(ip, start, num, 0);
1380 ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path)
1384 /* Free index block. */
1386 leaf = ext4_ext_index_pblock(path->ep_index);
1387 KASSERT(path->ep_header->eh_ecount != 0,
1388 ("ext4_ext_rm_index: bad ecount"));
1389 path->ep_header->eh_ecount =
1390 htole16(le16toh(path->ep_header->eh_ecount) - 1);
1391 ext4_ext_dirty(ip, path);
1392 ext4_ext_blkfree(ip, leaf, 1, 0);
1397 ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path,
1400 struct ext4_extent_header *eh;
1401 struct ext4_extent *ex;
1402 unsigned int a, b, block, num;
1403 unsigned long ex_blk;
1404 unsigned short ex_len;
1406 int error, correct_index;
1408 depth = ext4_ext_inode_depth(ip);
1409 if (!path[depth].ep_header) {
1410 if (path[depth].ep_data == NULL)
1412 path[depth].ep_header =
1413 (struct ext4_extent_header* )path[depth].ep_data;
1416 eh = path[depth].ep_header;
1418 SDT_PROBE2(ext2fs, , trace, extents, 1,
1419 "bad header => extent corrupted");
1423 ex = EXT_LAST_EXTENT(eh);
1424 ex_blk = le32toh(ex->e_blk);
1425 ex_len = ext4_ext_get_actual_len(ex);
1429 while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) {
1430 path[depth].ep_ext = ex;
1431 a = ex_blk > start ? ex_blk : start;
1432 b = (uint64_t)ex_blk + ex_len - 1 <
1433 EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS;
1435 if (a != ex_blk && b != ex_blk + ex_len - 1)
1437 else if (a != ex_blk) {
1438 /* Remove tail of the extent. */
1441 } else if (b != ex_blk + ex_len - 1) {
1442 /* Remove head of the extent, not implemented. */
1445 /* Remove whole extent. */
1450 if (ex == EXT_FIRST_EXTENT(eh))
1453 error = ext4_remove_blocks(ip, ex, a, b);
1458 ext4_ext_store_pblock(ex, 0);
1459 eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1);
1462 ex->e_blk = htole32(block);
1463 ex->e_len = htole16(num);
1465 ext4_ext_dirty(ip, path + depth);
1468 ex_blk = htole32(ex->e_blk);
1469 ex_len = ext4_ext_get_actual_len(ex);
1472 if (correct_index && le16toh(eh->eh_ecount))
1473 error = ext4_ext_correct_indexes(ip, path);
1476 * If this leaf is free, we should
1477 * remove it from index block above.
1479 if (error == 0 && eh->eh_ecount == 0 &&
1480 path[depth].ep_data != NULL)
1481 error = ext4_ext_rm_index(ip, path + depth);
1488 ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk,
1489 int depth, int flags)
1491 struct m_ext2fs *fs;
1492 struct ext4_extent_header *eh;
1497 error = bread(ip->i_devvp, fsbtodb(fs, pblk),
1498 fs->e2fs_bsize, NOCRED, &bp);
1503 eh = ext4_ext_block_header(bp->b_data);
1504 if (le16toh(eh->eh_depth) != depth) {
1505 SDT_PROBE2(ext2fs, , trace, extents, 1,
1506 "unexpected eh_depth");
1510 error = ext4_ext_check_header(ip, eh);
1523 ext4_ext_more_to_rm(struct ext4_extent_path *path)
1526 KASSERT(path->ep_index != NULL,
1527 ("ext4_ext_more_to_rm: bad index from path"));
1529 if (path->ep_index < EXT_FIRST_INDEX(path->ep_header))
1532 if (le16toh(path->ep_header->eh_ecount) == path->index_count)
1539 ext4_ext_remove_space(struct inode *ip, off_t length, int flags,
1540 struct ucred *cred, struct thread *td)
1543 struct ext4_extent_header *ehp;
1544 struct ext4_extent_path *path;
1548 ehp = (struct ext4_extent_header *)ip->i_db;
1549 depth = ext4_ext_inode_depth(ip);
1551 error = ext4_ext_check_header(ip, ehp);
1555 path = malloc(sizeof(struct ext4_extent_path) * (depth + 1),
1556 M_EXT2EXTENTS, M_WAITOK | M_ZERO);
1557 path[0].ep_header = ehp;
1558 path[0].ep_depth = depth;
1560 while (error == 0 && i >= 0) {
1563 error = ext4_ext_rm_leaf(ip, path, length);
1566 free(path[i].ep_data, M_EXT2EXTENTS);
1567 path[i].ep_data = NULL;
1572 /* This is index. */
1573 if (!path[i].ep_header)
1575 (struct ext4_extent_header *)path[i].ep_data;
1577 if (!path[i].ep_index) {
1578 /* This level hasn't touched yet. */
1579 path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header);
1580 path[i].index_count =
1581 le16toh(path[i].ep_header->eh_ecount) + 1;
1583 /* We've already was here, see at next index. */
1587 if (ext4_ext_more_to_rm(path + i)) {
1588 memset(path + i + 1, 0, sizeof(*path));
1589 bp = ext4_read_extent_tree_block(ip,
1590 ext4_ext_index_pblock(path[i].ep_index),
1591 path[0].ep_depth - (i + 1), 0);
1597 ext4_ext_fill_path_bdata(&path[i+1], bp,
1598 ext4_ext_index_pblock(path[i].ep_index));
1600 path[i].index_count =
1601 le16toh(path[i].ep_header->eh_ecount);
1604 if (path[i].ep_header->eh_ecount == 0 && i > 0) {
1605 /* Index is empty, remove it. */
1606 error = ext4_ext_rm_index(ip, path + i);
1608 free(path[i].ep_data, M_EXT2EXTENTS);
1609 path[i].ep_data = NULL;
1614 if (path->ep_header->eh_ecount == 0) {
1616 * Truncate the tree to zero.
1618 ext4_ext_header(ip)->eh_depth = 0;
1619 ext4_ext_header(ip)->eh_max = htole16(ext4_ext_space_root(ip));
1620 ext4_ext_dirty(ip, path);
1623 ext4_ext_drop_refs(path);
1624 free(path, M_EXT2EXTENTS);
1626 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;