2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Zheng Liu <lz@freebsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/vnode.h>
43 #include <fs/ext2fs/ext2_mount.h>
44 #include <fs/ext2fs/fs.h>
45 #include <fs/ext2fs/inode.h>
46 #include <fs/ext2fs/ext2fs.h>
47 #include <fs/ext2fs/ext2_extents.h>
48 #include <fs/ext2fs/ext2_extern.h>
50 SDT_PROVIDER_DECLARE(ext2fs);
53 * arg0: verbosity. Higher numbers give more verbose messages
54 * arg1: Textual message
56 SDT_PROBE_DEFINE2(ext2fs, , trace, extents, "int", "char*");
58 static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents");
60 #ifdef EXT2FS_PRINT_EXTENTS
62 ext4_ext_print_extent(struct ext4_extent *ep)
65 printf(" ext %p => (blk %u len %u start %ju)\n",
66 ep, ep->e_blk, ep->e_len,
67 (uint64_t)ep->e_start_hi << 32 | ep->e_start_lo);
70 static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp);
73 ext4_ext_print_index(struct inode *ip, struct ext4_extent_index *ex, int do_walk)
81 printf(" index %p => (blk %u pblk %ju)\n",
82 ex, ex->ei_blk, (uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo);
87 if ((error = bread(ip->i_devvp,
88 fsbtodb(fs, ((uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo)),
89 (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
94 ext4_ext_print_header(ip, (struct ext4_extent_header *)bp->b_data);
101 ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp)
105 printf("header %p => (magic 0x%x entries %d max %d depth %d gen %d)\n",
106 ehp, ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth,
109 for (i = 0; i < ehp->eh_ecount; i++)
110 if (ehp->eh_depth != 0)
111 ext4_ext_print_index(ip,
112 (struct ext4_extent_index *)(ehp + 1 + i), 1);
114 ext4_ext_print_extent((struct ext4_extent *)(ehp + 1 + i));
118 ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path)
124 printf("ip=%ju, Path:\n", ip->i_number);
125 for (k = 0; k <= l; k++, path++) {
126 if (path->ep_index) {
127 ext4_ext_print_index(ip, path->ep_index, 0);
128 } else if (path->ep_ext) {
129 ext4_ext_print_extent(path->ep_ext);
135 ext4_ext_print_extent_tree_status(struct inode *ip)
137 struct ext4_extent_header *ehp;
139 ehp = (struct ext4_extent_header *)(char *)ip->i_db;
141 printf("Extent status:ip=%ju\n", ip->i_number);
142 if (!(ip->i_flag & IN_E4EXTENTS))
145 ext4_ext_print_header(ip, ehp);
151 static inline struct ext4_extent_header *
152 ext4_ext_inode_header(struct inode *ip)
155 return ((struct ext4_extent_header *)ip->i_db);
158 static inline struct ext4_extent_header *
159 ext4_ext_block_header(char *bdata)
162 return ((struct ext4_extent_header *)bdata);
165 static inline unsigned short
166 ext4_ext_inode_depth(struct inode *ip)
168 struct ext4_extent_header *ehp;
170 ehp = (struct ext4_extent_header *)ip->i_data;
171 return (ehp->eh_depth);
174 static inline e4fs_daddr_t
175 ext4_ext_index_pblock(struct ext4_extent_index *index)
179 blk = index->ei_leaf_lo;
180 blk |= (e4fs_daddr_t)index->ei_leaf_hi << 32;
186 ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb)
189 index->ei_leaf_lo = pb & 0xffffffff;
190 index->ei_leaf_hi = (pb >> 32) & 0xffff;
194 static inline e4fs_daddr_t
195 ext4_ext_extent_pblock(struct ext4_extent *extent)
199 blk = extent->e_start_lo;
200 blk |= (e4fs_daddr_t)extent->e_start_hi << 32;
206 ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb)
209 ex->e_start_lo = pb & 0xffffffff;
210 ex->e_start_hi = (pb >> 32) & 0xffff;
214 ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep)
216 struct ext4_extent_cache *ecp;
217 int ret = EXT4_EXT_CACHE_NO;
219 ecp = &ip->i_ext_cache;
220 if (ecp->ec_type == EXT4_EXT_CACHE_NO)
223 if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) {
224 ep->e_blk = ecp->ec_blk;
225 ep->e_start_lo = ecp->ec_start & 0xffffffff;
226 ep->e_start_hi = ecp->ec_start >> 32 & 0xffff;
227 ep->e_len = ecp->ec_len;
234 ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh)
241 if (eh->eh_magic != EXT4_EXT_MAGIC) {
242 error_msg = "header: invalid magic";
245 if (eh->eh_max == 0) {
246 error_msg = "header: invalid eh_max";
249 if (eh->eh_ecount > eh->eh_max) {
250 error_msg = "header: invalid eh_entries";
257 SDT_PROBE2(ext2fs, , trace, extents, 1, error_msg);
262 ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk)
264 struct ext4_extent_header *eh;
265 struct ext4_extent_index *r, *l, *m;
267 eh = path->ep_header;
269 KASSERT(eh->eh_ecount <= eh->eh_max && eh->eh_ecount > 0,
270 ("ext4_ext_binsearch_index: bad args"));
272 l = EXT_FIRST_INDEX(eh) + 1;
273 r = EXT_FIRST_INDEX(eh) + eh->eh_ecount - 1;
282 path->ep_index = l - 1;
286 ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk)
288 struct ext4_extent_header *eh;
289 struct ext4_extent *r, *l, *m;
291 eh = path->ep_header;
293 KASSERT(eh->eh_ecount <= eh->eh_max,
294 ("ext4_ext_binsearch_ext: bad args"));
296 if (eh->eh_ecount == 0)
299 l = EXT_FIRST_EXTENT(eh) + 1;
300 r = EXT_FIRST_EXTENT(eh) + eh->eh_ecount - 1;
310 path->ep_ext = l - 1;
314 ext4_ext_fill_path_bdata(struct ext4_extent_path *path,
315 struct buf *bp, uint64_t blk)
318 KASSERT(path->ep_data == NULL,
319 ("ext4_ext_fill_path_bdata: bad ep_data"));
321 path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK);
325 memcpy(path->ep_data, bp->b_data, bp->b_bufsize);
332 ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp)
335 KASSERT(path->ep_data != NULL,
336 ("ext4_ext_fill_path_buf: bad ep_data"));
338 memcpy(bp->b_data, path->ep_data, bp->b_bufsize);
342 ext4_ext_drop_refs(struct ext4_extent_path *path)
349 depth = path->ep_depth;
350 for (i = 0; i <= depth; i++, path++)
352 free(path->ep_data, M_EXT2EXTENTS);
353 path->ep_data = NULL;
358 ext4_ext_path_free(struct ext4_extent_path *path)
364 ext4_ext_drop_refs(path);
365 free(path, M_EXT2EXTENTS);
369 ext4_ext_find_extent(struct inode *ip, daddr_t block,
370 struct ext4_extent_path **ppath)
373 struct ext4_extent_header *eh;
374 struct ext4_extent_path *path;
377 int error, depth, i, ppos, alloc;
380 eh = ext4_ext_inode_header(ip);
381 depth = ext4_ext_inode_depth(ip);
385 error = ext4_ext_check_header(ip, eh);
394 path = malloc(EXT4_EXT_DEPTH_MAX *
395 sizeof(struct ext4_extent_path),
396 M_EXT2EXTENTS, M_WAITOK | M_ZERO);
404 path[0].ep_header = eh;
405 path[0].ep_data = NULL;
407 /* Walk through the tree. */
410 ext4_ext_binsearch_index(&path[ppos], block);
411 blk = ext4_ext_index_pblock(path[ppos].ep_index);
412 path[ppos].ep_depth = i;
413 path[ppos].ep_ext = NULL;
415 error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk),
416 ip->i_e2fs->e2fs_bsize, NOCRED, &bp);
423 SDT_PROBE2(ext2fs, , trace, extents, 1,
424 "ppos > depth => extent corrupted");
430 ext4_ext_fill_path_bdata(&path[ppos], bp, blk);
433 eh = ext4_ext_block_header(path[ppos].ep_data);
434 if (ext4_ext_check_header(ip, eh) ||
435 ext2_extent_blk_csum_verify(ip, path[ppos].ep_data)) {
440 path[ppos].ep_header = eh;
445 error = ext4_ext_check_header(ip, eh);
450 path[ppos].ep_depth = i;
451 path[ppos].ep_header = eh;
452 path[ppos].ep_ext = NULL;
453 path[ppos].ep_index = NULL;
454 ext4_ext_binsearch_ext(&path[ppos], block);
458 ext4_ext_drop_refs(path);
460 free(path, M_EXT2EXTENTS);
468 ext4_ext_space_root(struct inode *ip)
472 size = sizeof(ip->i_data);
473 size -= sizeof(struct ext4_extent_header);
474 size /= sizeof(struct ext4_extent);
480 ext4_ext_space_block(struct inode *ip)
487 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
488 sizeof(struct ext4_extent);
494 ext4_ext_space_block_index(struct inode *ip)
501 size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
502 sizeof(struct ext4_extent_index);
508 ext4_ext_tree_init(struct inode *ip)
510 struct ext4_extent_header *ehp;
512 ip->i_flag |= IN_E4EXTENTS;
514 memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR);
515 ehp = (struct ext4_extent_header *)ip->i_data;
516 ehp->eh_magic = EXT4_EXT_MAGIC;
517 ehp->eh_max = ext4_ext_space_root(ip);
518 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
519 ip->i_flag |= IN_CHANGE | IN_UPDATE;
520 ext2_update(ip->i_vnode, 1);
524 ext4_ext_put_in_cache(struct inode *ip, uint32_t blk,
525 uint32_t len, uint32_t start, int type)
528 KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input"));
530 ip->i_ext_cache.ec_type = type;
531 ip->i_ext_cache.ec_blk = blk;
532 ip->i_ext_cache.ec_len = len;
533 ip->i_ext_cache.ec_start = start;
537 ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path,
541 struct ext4_extent *ex;
542 e4fs_daddr_t bg_start;
548 depth = path->ep_depth;
549 ex = path[depth].ep_ext;
551 e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex);
552 e2fs_daddr_t blk = ex->e_blk;
555 return (pblk + (block - blk));
557 return (pblk - (blk - block));
560 /* Try to get block from index itself. */
561 if (path[depth].ep_data)
562 return (path[depth].ep_blk);
565 /* Use inode's group. */
566 bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
567 fs->e2fs->e2fs_first_dblock;
569 return (bg_start + block);
573 ext4_can_extents_be_merged(struct ext4_extent *ex1,
574 struct ext4_extent *ex2)
577 if (ex1->e_blk + ex1->e_len != ex2->e_blk)
580 if (ex1->e_len + ex2->e_len > EXT4_MAX_LEN)
583 if (ext4_ext_extent_pblock(ex1) + ex1->e_len ==
584 ext4_ext_extent_pblock(ex2))
591 ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path)
593 int depth = path->ep_depth;
597 return (EXT4_MAX_BLOCKS);
603 if (path[depth].ep_index !=
604 EXT_LAST_INDEX(path[depth].ep_header))
605 return (path[depth].ep_index[1].ei_blk);
610 return (EXT4_MAX_BLOCKS);
614 ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path)
628 bp = getblk(ip->i_devvp, fsbtodb(fs, blk),
629 fs->e2fs_bsize, 0, 0, 0);
632 ext4_ext_fill_path_buf(path, bp);
633 ext2_extent_blk_csum_set(ip, bp->b_data);
636 ip->i_flag |= IN_CHANGE | IN_UPDATE;
637 error = ext2_update(ip->i_vnode, 1);
644 ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path,
645 uint32_t lblk, e4fs_daddr_t blk)
648 struct ext4_extent_index *idx;
653 if (lblk == path->ep_index->ei_blk) {
654 SDT_PROBE2(ext2fs, , trace, extents, 1,
655 "lblk == index blk => extent corrupted");
659 if (path->ep_header->eh_ecount >= path->ep_header->eh_max) {
660 SDT_PROBE2(ext2fs, , trace, extents, 1,
661 "ecout > maxcount => extent corrupted");
665 if (lblk > path->ep_index->ei_blk) {
667 idx = path->ep_index + 1;
670 idx = path->ep_index;
673 len = EXT_LAST_INDEX(path->ep_header) - idx + 1;
675 memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index));
677 if (idx > EXT_MAX_INDEX(path->ep_header)) {
678 SDT_PROBE2(ext2fs, , trace, extents, 1,
679 "index is out of range => extent corrupted");
684 ext4_index_store_pblock(idx, blk);
685 path->ep_header->eh_ecount++;
687 return (ext4_ext_dirty(ip, path));
691 ext4_ext_alloc_meta(struct inode *ip)
693 e4fs_daddr_t blk = ext2_alloc_meta(ip);
695 ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize);
696 ip->i_flag |= IN_CHANGE | IN_UPDATE;
697 ext2_update(ip->i_vnode, 1);
704 ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags)
707 int i, blocksreleased;
710 blocksreleased = count;
712 for(i = 0; i < count; i++)
713 ext2_blkfree(ip, blk + i, fs->e2fs_bsize);
715 if (ip->i_blocks >= blocksreleased)
716 ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased);
720 ip->i_flag |= IN_CHANGE | IN_UPDATE;
721 ext2_update(ip->i_vnode, 1);
725 ext4_ext_split(struct inode *ip, struct ext4_extent_path *path,
726 struct ext4_extent *newext, int at)
730 int depth = ext4_ext_inode_depth(ip);
731 struct ext4_extent_header *neh;
732 struct ext4_extent_index *fidx;
733 struct ext4_extent *ex;
735 e4fs_daddr_t newblk, oldblk;
737 e4fs_daddr_t *ablks = NULL;
744 * We will split at current extent for now.
746 if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) {
747 SDT_PROBE2(ext2fs, , trace, extents, 1,
748 "extent is out of range => extent corrupted");
752 if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header))
753 border = path[depth].ep_ext[1].e_blk;
755 border = newext->e_blk;
757 /* Allocate new blocks. */
758 ablks = malloc(sizeof(e4fs_daddr_t) * depth,
759 M_EXT2EXTENTS, M_WAITOK | M_ZERO);
762 for (a = 0; a < depth - at; a++) {
763 newblk = ext4_ext_alloc_meta(ip);
770 bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
776 neh = ext4_ext_block_header(bp->b_data);
778 neh->eh_max = ext4_ext_space_block(ip);
779 neh->eh_magic = EXT4_EXT_MAGIC;
781 ex = EXT_FIRST_EXTENT(neh);
783 if (path[depth].ep_header->eh_ecount != path[depth].ep_header->eh_max) {
784 SDT_PROBE2(ext2fs, , trace, extents, 1,
785 "extents count out of range => extent corrupted");
790 /* Start copy from next extent. */
792 path[depth].ep_ext++;
793 while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) {
794 path[depth].ep_ext++;
798 memmove(ex, path[depth].ep_ext - m,
799 sizeof(struct ext4_extent) * m);
800 neh->eh_ecount = neh->eh_ecount + m;
803 ext2_extent_blk_csum_set(ip, bp->b_data);
809 path[depth].ep_header->eh_ecount =
810 path[depth].ep_header->eh_ecount - m;
811 ext4_ext_dirty(ip, path + depth);
814 /* Create intermediate indexes. */
816 KASSERT(k >= 0, ("ext4_ext_split: negative k"));
818 /* Insert new index into current index block. */
823 error = bread(ip->i_devvp, fsbtodb(fs, newblk),
824 (int)fs->e2fs_bsize, NOCRED, &bp);
829 neh = (struct ext4_extent_header *)bp->b_data;
831 neh->eh_magic = EXT4_EXT_MAGIC;
832 neh->eh_max = ext4_ext_space_block_index(ip);
833 neh->eh_depth = depth - i;
834 fidx = EXT_FIRST_INDEX(neh);
835 fidx->ei_blk = border;
836 ext4_index_store_pblock(fidx, oldblk);
840 while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) {
845 memmove(++fidx, path[i].ep_index - m,
846 sizeof(struct ext4_extent_index) * m);
847 neh->eh_ecount = neh->eh_ecount + m;
850 ext2_extent_blk_csum_set(ip, bp->b_data);
856 path[i].ep_header->eh_ecount =
857 path[i].ep_header->eh_ecount - m;
858 ext4_ext_dirty(ip, path + i);
864 error = ext4_ext_insert_index(ip, path + at, border, newblk);
871 for (i = 0; i < depth; i++) {
874 ext4_ext_blkfree(ip, ablks[i], 1, 0);
878 free(ablks, M_EXT2EXTENTS);
884 ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path,
885 struct ext4_extent *newext)
888 struct ext4_extent_path *curpath;
889 struct ext4_extent_header *neh;
897 newblk = ext4_ext_alloc_meta(ip);
901 bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
905 /* Move top-level index/leaf into new block. */
906 memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data));
908 /* Set size of new block */
909 neh = ext4_ext_block_header(bp->b_data);
910 neh->eh_magic = EXT4_EXT_MAGIC;
912 if (ext4_ext_inode_depth(ip))
913 neh->eh_max = ext4_ext_space_block_index(ip);
915 neh->eh_max = ext4_ext_space_block(ip);
917 ext2_extent_blk_csum_set(ip, bp->b_data);
924 curpath->ep_header->eh_magic = EXT4_EXT_MAGIC;
925 curpath->ep_header->eh_max = ext4_ext_space_root(ip);
926 curpath->ep_header->eh_ecount = 1;
927 curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header);
928 curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk;
929 ext4_index_store_pblock(curpath->ep_index, newblk);
931 neh = ext4_ext_inode_header(ip);
932 neh->eh_depth = path->ep_depth + 1;
933 ext4_ext_dirty(ip, curpath);
941 ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path,
942 struct ext4_extent *newext)
944 struct ext4_extent_path *curpath;
948 i = depth = ext4_ext_inode_depth(ip);
950 /* Look for free index entry int the tree */
951 curpath = path + depth;
952 while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) {
958 * We use already allocated block for index block,
959 * so subsequent data blocks should be contiguous.
961 if (EXT_HAS_FREE_INDEX(curpath)) {
962 error = ext4_ext_split(ip, path, newext, i);
967 ext4_ext_drop_refs(path);
968 error = ext4_ext_find_extent(ip, newext->e_blk, &path);
972 /* Tree is full, do grow in depth. */
973 error = ext4_ext_grow_indepth(ip, path, newext);
978 ext4_ext_drop_refs(path);
979 error = ext4_ext_find_extent(ip, newext->e_blk, &path);
983 /* Check and split tree if required. */
984 depth = ext4_ext_inode_depth(ip);
985 if (path[depth].ep_header->eh_ecount ==
986 path[depth].ep_header->eh_max)
995 ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path)
997 struct ext4_extent_header *eh;
998 struct ext4_extent *ex;
1002 depth = ext4_ext_inode_depth(ip);
1003 eh = path[depth].ep_header;
1004 ex = path[depth].ep_ext;
1006 if (ex == NULL || eh == NULL)
1012 /* We will correct tree if first leaf got modified only. */
1013 if (ex != EXT_FIRST_EXTENT(eh))
1017 border = path[depth].ep_ext->e_blk;
1018 path[k].ep_index->ei_blk = border;
1019 ext4_ext_dirty(ip, path + k);
1021 /* Change all left-side indexes. */
1022 if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header))
1025 path[k].ep_index->ei_blk = border;
1026 ext4_ext_dirty(ip, path + k);
1033 ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path,
1034 struct ext4_extent *newext)
1036 struct ext4_extent_header * eh;
1037 struct ext4_extent *ex, *nex, *nearex;
1038 struct ext4_extent_path *npath;
1039 int depth, len, error, next;
1041 depth = ext4_ext_inode_depth(ip);
1042 ex = path[depth].ep_ext;
1045 if (newext->e_len == 0 || path[depth].ep_header == NULL)
1048 /* Insert block into found extent. */
1049 if (ex && ext4_can_extents_be_merged(ex, newext)) {
1050 ex->e_len = ex->e_len + newext->e_len;
1051 eh = path[depth].ep_header;
1057 depth = ext4_ext_inode_depth(ip);
1058 eh = path[depth].ep_header;
1059 if (eh->eh_ecount < eh->eh_max)
1063 nex = EXT_LAST_EXTENT(eh);
1064 next = ext4_ext_next_leaf_block(ip, path);
1065 if (newext->e_blk > nex->e_blk && next != EXT4_MAX_BLOCKS) {
1066 KASSERT(npath == NULL,
1067 ("ext4_ext_insert_extent: bad path"));
1069 error = ext4_ext_find_extent(ip, next, &npath);
1073 if (npath->ep_depth != path->ep_depth) {
1078 eh = npath[depth].ep_header;
1079 if (eh->eh_ecount < eh->eh_max) {
1086 * There is no free space in the found leaf,
1087 * try to add a new leaf to the tree.
1089 error = ext4_ext_create_new_leaf(ip, path, newext);
1093 depth = ext4_ext_inode_depth(ip);
1094 eh = path[depth].ep_header;
1097 nearex = path[depth].ep_ext;
1099 /* Create new extent in the leaf. */
1100 path[depth].ep_ext = EXT_FIRST_EXTENT(eh);
1101 } else if (newext->e_blk > nearex->e_blk) {
1102 if (nearex != EXT_LAST_EXTENT(eh)) {
1103 len = EXT_MAX_EXTENT(eh) - nearex;
1104 len = (len - 1) * sizeof(struct ext4_extent);
1105 len = len < 0 ? 0 : len;
1106 memmove(nearex + 2, nearex + 1, len);
1108 path[depth].ep_ext = nearex + 1;
1110 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1111 len = len < 0 ? 0 : len;
1112 memmove(nearex + 1, nearex, len);
1113 path[depth].ep_ext = nearex;
1116 eh->eh_ecount = eh->eh_ecount + 1;
1117 nearex = path[depth].ep_ext;
1118 nearex->e_blk = newext->e_blk;
1119 nearex->e_start_lo = newext->e_start_lo;
1120 nearex->e_start_hi = newext->e_start_hi;
1121 nearex->e_len = newext->e_len;
1124 /* Try to merge extents to the right. */
1125 while (nearex < EXT_LAST_EXTENT(eh)) {
1126 if (!ext4_can_extents_be_merged(nearex, nearex + 1))
1129 /* Merge with next extent. */
1130 nearex->e_len = nearex->e_len + nearex[1].e_len;
1131 if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1132 len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1133 sizeof(struct ext4_extent);
1134 memmove(nearex + 1, nearex + 2, len);
1137 eh->eh_ecount = eh->eh_ecount - 1;
1138 KASSERT(eh->eh_ecount != 0,
1139 ("ext4_ext_insert_extent: bad ecount"));
1143 * Try to merge extents to the left,
1144 * start from inexes correction.
1146 error = ext4_ext_correct_indexes(ip, path);
1150 ext4_ext_dirty(ip, path + depth);
1154 ext4_ext_drop_refs(npath);
1155 free(npath, M_EXT2EXTENTS);
1158 ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
1163 ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref,
1164 struct ucred *cred, unsigned long *count, int *perror)
1166 struct m_ext2fs *fs;
1167 e4fs_daddr_t newblk;
1170 * We will allocate only single block for now.
1176 EXT2_LOCK(ip->i_ump);
1177 *perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk);
1182 ip->i_flag |= IN_CHANGE | IN_UPDATE;
1183 ext2_update(ip->i_vnode, 1);
1190 ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk,
1191 unsigned long max_blocks, struct ucred *cred, struct buf **bpp,
1192 int *pallocated, daddr_t *nb)
1194 struct m_ext2fs *fs;
1195 struct buf *bp = NULL;
1196 struct ext4_extent_path *path;
1197 struct ext4_extent newex, *ex;
1198 e4fs_daddr_t bpref, newblk = 0;
1199 unsigned long allocated = 0;
1200 int error = 0, depth;
1208 if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) {
1209 if (bpref == EXT4_EXT_CACHE_IN) {
1210 /* Block is already allocated. */
1211 newblk = iblk - newex.e_blk +
1212 ext4_ext_extent_pblock(&newex);
1213 allocated = newex.e_len - (iblk - newex.e_blk);
1221 error = ext4_ext_find_extent(ip, iblk, &path);
1226 depth = ext4_ext_inode_depth(ip);
1227 if (path[depth].ep_ext == NULL && depth != 0) {
1232 if ((ex = path[depth].ep_ext)) {
1233 uint64_t lblk = ex->e_blk;
1234 uint16_t e_len = ex->e_len;
1235 e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex);
1237 if (e_len > EXT4_MAX_LEN)
1240 /* If we found extent covers block, simply return it. */
1241 if (iblk >= lblk && iblk < lblk + e_len) {
1242 newblk = iblk - lblk + e_start;
1243 allocated = e_len - (iblk - lblk);
1244 ext4_ext_put_in_cache(ip, lblk, e_len,
1245 e_start, EXT4_EXT_CACHE_IN);
1250 /* Allocate the new block. */
1251 if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) {
1252 ip->i_next_alloc_goal = 0;
1255 bpref = ext4_ext_blkpref(ip, path, iblk);
1256 allocated = max_blocks;
1257 newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error);
1261 /* Try to insert new extent into found leaf and return. */
1263 ext4_ext_store_pblock(&newex, newblk);
1264 newex.e_len = allocated;
1265 error = ext4_ext_insert_extent(ip, path, &newex);
1269 newblk = ext4_ext_extent_pblock(&newex);
1270 ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN);
1274 if (allocated > max_blocks)
1275 allocated = max_blocks;
1280 error = bread(ip->i_devvp, fsbtodb(fs, newblk),
1281 fs->e2fs_bsize, cred, &bp);
1291 ext4_ext_drop_refs(path);
1292 free(path, M_EXT2EXTENTS);
1301 static inline uint16_t
1302 ext4_ext_get_actual_len(struct ext4_extent *ext)
1305 return (ext->e_len <= EXT_INIT_MAX_LEN ?
1306 ext->e_len : (ext->e_len - EXT_INIT_MAX_LEN));
1309 static inline struct ext4_extent_header *
1310 ext4_ext_header(struct inode *ip)
1313 return ((struct ext4_extent_header *)ip->i_db);
1317 ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex,
1318 unsigned long from, unsigned long to)
1320 unsigned long num, start;
1322 if (from >= ex->e_blk &&
1323 to == ex->e_blk + ext4_ext_get_actual_len(ex) - 1) {
1325 num = ex->e_blk + ext4_ext_get_actual_len(ex) - from;
1326 start = ext4_ext_extent_pblock(ex) +
1327 ext4_ext_get_actual_len(ex) - num;
1328 ext4_ext_blkfree(ip, start, num, 0);
1335 ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path)
1339 /* Free index block. */
1341 leaf = ext4_ext_index_pblock(path->ep_index);
1342 KASSERT(path->ep_header->eh_ecount != 0,
1343 ("ext4_ext_rm_index: bad ecount"));
1344 path->ep_header->eh_ecount--;
1345 ext4_ext_dirty(ip, path);
1346 ext4_ext_blkfree(ip, leaf, 1, 0);
1351 ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path,
1354 struct ext4_extent_header *eh;
1355 struct ext4_extent *ex;
1356 unsigned int a, b, block, num;
1357 unsigned long ex_blk;
1358 unsigned short ex_len;
1360 int error, correct_index;
1362 depth = ext4_ext_inode_depth(ip);
1363 if (!path[depth].ep_header) {
1364 if (path[depth].ep_data == NULL)
1366 path[depth].ep_header =
1367 (struct ext4_extent_header* )path[depth].ep_data;
1370 eh = path[depth].ep_header;
1372 SDT_PROBE2(ext2fs, , trace, extents, 1,
1373 "bad header => extent corrupted");
1377 ex = EXT_LAST_EXTENT(eh);
1379 ex_len = ext4_ext_get_actual_len(ex);
1383 while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) {
1384 path[depth].ep_ext = ex;
1385 a = ex_blk > start ? ex_blk : start;
1386 b = (uint64_t)ex_blk + ex_len - 1 <
1387 EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS;
1389 if (a != ex_blk && b != ex_blk + ex_len - 1)
1391 else if (a != ex_blk) {
1392 /* Remove tail of the extent. */
1395 } else if (b != ex_blk + ex_len - 1) {
1396 /* Remove head of the extent, not implemented. */
1399 /* Remove whole extent. */
1404 if (ex == EXT_FIRST_EXTENT(eh))
1407 error = ext4_remove_blocks(ip, ex, a, b);
1412 ext4_ext_store_pblock(ex, 0);
1419 ext4_ext_dirty(ip, path + depth);
1423 ex_len = ext4_ext_get_actual_len(ex);
1426 if (correct_index && eh->eh_ecount)
1427 error = ext4_ext_correct_indexes(ip, path);
1430 * If this leaf is free, we should
1431 * remove it from index block above.
1433 if (error == 0 && eh->eh_ecount == 0 && path[depth].ep_data != NULL)
1434 error = ext4_ext_rm_index(ip, path + depth);
1441 ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk,
1442 int depth, int flags)
1444 struct m_ext2fs *fs;
1445 struct ext4_extent_header *eh;
1450 error = bread(ip->i_devvp, fsbtodb(fs, pblk),
1451 fs->e2fs_bsize, NOCRED, &bp);
1456 eh = ext4_ext_block_header(bp->b_data);
1457 if (eh->eh_depth != depth) {
1458 SDT_PROBE2(ext2fs, , trace, extents, 1,
1459 "unexpected eh_depth");
1463 error = ext4_ext_check_header(ip, eh);
1476 ext4_ext_more_to_rm(struct ext4_extent_path *path)
1479 KASSERT(path->ep_index != NULL,
1480 ("ext4_ext_more_to_rm: bad index from path"));
1482 if (path->ep_index < EXT_FIRST_INDEX(path->ep_header))
1485 if (path->ep_header->eh_ecount == path->index_count)
1492 ext4_ext_remove_space(struct inode *ip, off_t length, int flags,
1493 struct ucred *cred, struct thread *td)
1496 struct ext4_extent_header *ehp;
1497 struct ext4_extent_path *path;
1501 ehp = (struct ext4_extent_header *)ip->i_db;
1502 depth = ext4_ext_inode_depth(ip);
1504 error = ext4_ext_check_header(ip, ehp);
1508 path = malloc(sizeof(struct ext4_extent_path) * (depth + 1),
1509 M_EXT2EXTENTS, M_WAITOK | M_ZERO);
1513 path[0].ep_header = ehp;
1514 path[0].ep_depth = depth;
1516 while (error == 0 && i >= 0) {
1519 error = ext4_ext_rm_leaf(ip, path, length);
1522 free(path[i].ep_data, M_EXT2EXTENTS);
1523 path[i].ep_data = NULL;
1528 /* This is index. */
1529 if (!path[i].ep_header)
1531 (struct ext4_extent_header *)path[i].ep_data;
1533 if (!path[i].ep_index) {
1534 /* This level hasn't touched yet. */
1535 path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header);
1536 path[i].index_count = path[i].ep_header->eh_ecount + 1;
1538 /* We've already was here, see at next index. */
1542 if (ext4_ext_more_to_rm(path + i)) {
1543 memset(path + i + 1, 0, sizeof(*path));
1544 bp = ext4_read_extent_tree_block(ip,
1545 ext4_ext_index_pblock(path[i].ep_index),
1546 path[0].ep_depth - (i + 1), 0);
1552 ext4_ext_fill_path_bdata(&path[i+1], bp,
1553 ext4_ext_index_pblock(path[i].ep_index));
1555 path[i].index_count = path[i].ep_header->eh_ecount;
1558 if (path[i].ep_header->eh_ecount == 0 && i > 0) {
1559 /* Index is empty, remove it. */
1560 error = ext4_ext_rm_index(ip, path + i);
1562 free(path[i].ep_data, M_EXT2EXTENTS);
1563 path[i].ep_data = NULL;
1568 if (path->ep_header->eh_ecount == 0) {
1570 * Truncate the tree to zero.
1572 ext4_ext_header(ip)->eh_depth = 0;
1573 ext4_ext_header(ip)->eh_max = ext4_ext_space_root(ip);
1574 ext4_ext_dirty(ip, path);
1577 ext4_ext_drop_refs(path);
1578 free(path, M_EXT2EXTENTS);