2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 Google LLC
5 * Copyright (C) 1995, 1996, 1997 Wolfgang Solfrank
6 * Copyright (c) 1995 Martin Husemann
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
32 __RCSID("$NetBSD: fat.c,v 1.18 2006/06/05 16:51:18 christos Exp $");
33 static const char rcsid[] =
37 #include <sys/endian.h>
38 #include <sys/queue.h>
39 #include <sys/limits.h>
41 #include <sys/param.h>
54 static int _readfat(struct fat_descriptor *);
55 static inline struct bootblock* boot_of_(struct fat_descriptor *);
56 static inline int fd_of_(struct fat_descriptor *);
57 static inline bool valid_cl(struct fat_descriptor *, cl_t);
61 * Head bitmap for FAT scanning.
63 * FAT32 have up to 2^28 = 256M entries, and FAT16/12 have much less.
64 * For each cluster, we use 1 bit to represent if it's a head cluster
65 * (the first cluster of a cluster chain).
69 * Initially, we set all bits to 1. In readfat(), we traverse the
70 * whole FAT and mark each cluster identified as "next" cluster as
71 * 0. After the scan, we have a bitmap with 1's to indicate the
72 * corresponding cluster was a "head" cluster.
74 * We use head bitmap to identify lost chains: a head cluster that was
75 * not being claimed by any file or directories is the head cluster of
78 * Handle of lost chains
79 * =====================
80 * At the end of scanning, we can easily find all lost chain's heads
81 * by finding out the 1's in the head bitmap.
84 typedef struct long_bitmap {
86 size_t count; /* Total set bits in the map */
90 bitmap_clear(long_bitmap_t *lbp, cl_t cl)
92 cl_t i = cl / LONG_BIT;
93 unsigned long clearmask = ~(1UL << (cl % LONG_BIT));
95 assert((lbp->map[i] & ~clearmask) != 0);
96 lbp->map[i] &= clearmask;
101 bitmap_get(long_bitmap_t *lbp, cl_t cl)
103 cl_t i = cl / LONG_BIT;
104 unsigned long usedbit = 1UL << (cl % LONG_BIT);
106 return ((lbp->map[i] & usedbit) == usedbit);
110 bitmap_none_in_range(long_bitmap_t *lbp, cl_t cl)
112 cl_t i = cl / LONG_BIT;
114 return (lbp->map[i] == 0);
118 bitmap_count(long_bitmap_t *lbp)
124 bitmap_ctor(long_bitmap_t *lbp, size_t bits, bool allone)
126 size_t bitmap_size = roundup2(bits, LONG_BIT) / (LONG_BIT / 8);
129 lbp->map = calloc(1, bitmap_size);
130 if (lbp->map == NULL)
134 memset(lbp->map, 0xff, bitmap_size);
143 bitmap_dtor(long_bitmap_t *lbp)
150 * FAT32 can be as big as 256MiB (2^26 entries * 4 bytes), when we
151 * can not ask the kernel to manage the access, use a simple LRU
152 * cache with chunk size of 128 KiB to manage it.
154 struct fat32_cache_entry {
155 TAILQ_ENTRY(fat32_cache_entry) entries;
156 uint8_t *chunk; /* pointer to chunk */
157 off_t addr; /* offset */
158 bool dirty; /* dirty bit */
161 static const size_t fat32_cache_chunk_size = 131072; /* MAXPHYS */
162 static const size_t fat32_cache_size = 4194304;
163 static const size_t fat32_cache_entries = 32; /* XXXgcc: cache_size / cache_chunk_size */
166 * FAT table descriptor, represents a FAT table that is already loaded
169 struct fat_descriptor {
170 struct bootblock *boot;
172 cl_t (*get)(struct fat_descriptor *, cl_t);
173 int (*set)(struct fat_descriptor *, cl_t, cl_t);
174 long_bitmap_t headbitmap;
180 size_t fat32_cached_chunks;
181 TAILQ_HEAD(cachehead, fat32_cache_entry) fat32_cache_head;
182 struct fat32_cache_entry *fat32_cache_allentries;
184 off_t fat32_lastaddr;
188 fat_clear_cl_head(struct fat_descriptor *fat, cl_t cl)
190 bitmap_clear(&fat->headbitmap, cl);
194 fat_is_cl_head(struct fat_descriptor *fat, cl_t cl)
196 return (bitmap_get(&fat->headbitmap, cl));
200 fat_is_cl_head_in_range(struct fat_descriptor *fat, cl_t cl)
202 return (!(bitmap_none_in_range(&fat->headbitmap, cl)));
206 fat_get_head_count(struct fat_descriptor *fat)
208 return (bitmap_count(&fat->headbitmap));
214 * FAT12s are sufficiently small, expect it to always fit in the RAM.
216 static inline uint8_t *
217 fat_get_fat12_ptr(struct fat_descriptor *fat, cl_t cl)
219 return (fat->fatbuf + ((cl + (cl >> 1))));
223 fat_get_fat12_next(struct fat_descriptor *fat, cl_t cl)
228 p = fat_get_fat12_ptr(fat, cl);
230 /* Odd cluster: lower 4 bits belongs to the subsequent cluster */
233 retval &= CLUST12_MASK;
235 if (retval >= (CLUST_BAD & CLUST12_MASK))
236 retval |= ~CLUST12_MASK;
242 fat_set_fat12_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
246 /* Truncate 'nextcl' value, if needed */
247 nextcl &= CLUST12_MASK;
249 p = fat_get_fat12_ptr(fat, cl);
252 * Read in the 4 bits from the subsequent (for even clusters)
253 * or the preceding (for odd clusters) cluster and combine
254 * it to the nextcl value for encoding
257 nextcl |= ((p[1] & 0xf0) << 8);
260 nextcl |= (p[0] & 0x0f);
263 le16enc(p, (uint16_t)nextcl);
271 * FAT16s are sufficiently small, expect it to always fit in the RAM.
273 static inline uint8_t *
274 fat_get_fat16_ptr(struct fat_descriptor *fat, cl_t cl)
276 return (fat->fatbuf + (cl << 1));
280 fat_get_fat16_next(struct fat_descriptor *fat, cl_t cl)
285 p = fat_get_fat16_ptr(fat, cl);
286 retval = le16dec(p) & CLUST16_MASK;
288 if (retval >= (CLUST_BAD & CLUST16_MASK))
289 retval |= ~CLUST16_MASK;
295 fat_set_fat16_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
299 /* Truncate 'nextcl' value, if needed */
300 nextcl &= CLUST16_MASK;
302 p = fat_get_fat16_ptr(fat, cl);
304 le16enc(p, (uint16_t)nextcl);
312 static inline uint8_t *
313 fat_get_fat32_ptr(struct fat_descriptor *fat, cl_t cl)
315 return (fat->fatbuf + (cl << 2));
319 fat_get_fat32_next(struct fat_descriptor *fat, cl_t cl)
324 p = fat_get_fat32_ptr(fat, cl);
325 retval = le32dec(p) & CLUST32_MASK;
327 if (retval >= (CLUST_BAD & CLUST32_MASK))
328 retval |= ~CLUST32_MASK;
334 fat_set_fat32_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
338 /* Truncate 'nextcl' value, if needed */
339 nextcl &= CLUST32_MASK;
341 p = fat_get_fat32_ptr(fat, cl);
343 le32enc(p, (uint32_t)nextcl);
349 fat_get_iosize(struct fat_descriptor *fat, off_t address)
352 if (address == fat->fat32_lastaddr) {
353 return (fat->fatsize & ((off_t)fat32_cache_chunk_size - 1));
355 return (fat32_cache_chunk_size);
360 fat_flush_fat32_cache_entry(struct fat_descriptor *fat,
361 struct fat32_cache_entry *entry)
372 writesize = fat_get_iosize(fat, entry->addr);
374 fat_addr = fat->fat32_offset + entry->addr;
375 if (lseek(fd, fat_addr, SEEK_SET) != fat_addr ||
376 (size_t)write(fd, entry->chunk, writesize) != writesize) {
377 pfatal("Unable to write FAT");
381 entry->dirty = false;
385 static struct fat32_cache_entry *
386 fat_get_fat32_cache_entry(struct fat_descriptor *fat, off_t addr,
390 struct fat32_cache_entry *entry, *first;
394 addr &= ~(fat32_cache_chunk_size - 1);
396 first = TAILQ_FIRST(&fat->fat32_cache_head);
399 * Cache hit: if we already have the chunk, move it to list head
401 TAILQ_FOREACH(entry, &fat->fat32_cache_head, entries) {
402 if (entry->addr == addr) {
406 if (entry != first) {
408 TAILQ_REMOVE(&fat->fat32_cache_head, entry, entries);
409 TAILQ_INSERT_HEAD(&fat->fat32_cache_head, entry, entries);
416 * Cache miss: detach the chunk at tail of list, overwrite with
417 * the located chunk, and populate with data from disk.
419 entry = TAILQ_LAST(&fat->fat32_cache_head, cachehead);
420 TAILQ_REMOVE(&fat->fat32_cache_head, entry, entries);
421 if (fat_flush_fat32_cache_entry(fat, entry) != FSOK) {
425 rwsize = fat_get_iosize(fat, addr);
426 fat_addr = fat->fat32_offset + addr;
429 if (lseek(fd, fat_addr, SEEK_SET) != fat_addr ||
430 (size_t)read(fd, entry->chunk, rwsize) != rwsize) {
431 pfatal("Unable to read FAT");
437 TAILQ_INSERT_HEAD(&fat->fat32_cache_head, entry, entries);
442 static inline uint8_t *
443 fat_get_fat32_cached_ptr(struct fat_descriptor *fat, cl_t cl, bool writing)
446 struct fat32_cache_entry *entry;
449 entry = fat_get_fat32_cache_entry(fat, addr, writing);
452 off = addr & (fat32_cache_chunk_size - 1);
453 return (entry->chunk + off);
461 fat_get_fat32_cached_next(struct fat_descriptor *fat, cl_t cl)
466 p = fat_get_fat32_cached_ptr(fat, cl, false);
468 retval = le32dec(p) & CLUST32_MASK;
469 if (retval >= (CLUST_BAD & CLUST32_MASK))
470 retval |= ~CLUST32_MASK;
479 fat_set_fat32_cached_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
483 /* Truncate 'nextcl' value, if needed */
484 nextcl &= CLUST32_MASK;
486 p = fat_get_fat32_cached_ptr(fat, cl, true);
488 le32enc(p, (uint32_t)nextcl);
495 cl_t fat_get_cl_next(struct fat_descriptor *fat, cl_t cl)
498 if (!valid_cl(fat, cl)) {
499 pfatal("Invalid cluster: %ud", cl);
503 return (fat->get(fat, cl));
506 int fat_set_cl_next(struct fat_descriptor *fat, cl_t cl, cl_t nextcl)
510 pwarn(" (NO WRITE)\n");
514 if (!valid_cl(fat, cl)) {
515 pfatal("Invalid cluster: %ud", cl);
519 return (fat->set(fat, cl, nextcl));
522 static inline struct bootblock*
523 boot_of_(struct fat_descriptor *fat) {
529 fat_get_boot(struct fat_descriptor *fat) {
531 return (boot_of_(fat));
535 fd_of_(struct fat_descriptor *fat)
541 fat_get_fd(struct fat_descriptor * fat)
543 return (fd_of_(fat));
547 * Whether a cl is in valid data range.
550 fat_is_valid_cl(struct fat_descriptor *fat, cl_t cl)
553 return (valid_cl(fat, cl));
557 valid_cl(struct fat_descriptor *fat, cl_t cl)
559 const struct bootblock *boot = boot_of_(fat);
561 return (cl >= CLUST_FIRST && cl < boot->NumClusters);
565 * The first 2 FAT entries contain pseudo-cluster numbers with the following
568 * 31...... ........ ........ .......0
569 * rrrr1111 11111111 11111111 mmmmmmmm FAT32 entry 0
570 * rrrrsh11 11111111 11111111 11111xxx FAT32 entry 1
572 * 11111111 mmmmmmmm FAT16 entry 0
573 * sh111111 11111xxx FAT16 entry 1
576 * m = BPB media ID byte
577 * s = clean flag (1 = dismounted; 0 = still mounted)
578 * h = hard error flag (1 = ok; 0 = I/O error)
582 checkdirty(int fs, struct bootblock *boot)
589 if (boot->ClustMask != CLUST16_MASK && boot->ClustMask != CLUST32_MASK)
592 off = boot->bpbResSectors;
593 off *= boot->bpbBytesPerSec;
595 buffer = malloc(len = boot->bpbBytesPerSec);
596 if (buffer == NULL) {
597 perr("No space for FAT sectors (%zu)", len);
601 if (lseek(fs, off, SEEK_SET) != off) {
602 perr("Unable to read FAT");
606 if ((size_t)read(fs, buffer, boot->bpbBytesPerSec) !=
607 boot->bpbBytesPerSec) {
608 perr("Unable to read FAT");
613 * If we don't understand the FAT, then the file system must be
614 * assumed to be unclean.
616 if (buffer[0] != boot->bpbMedia || buffer[1] != 0xff)
618 if (boot->ClustMask == CLUST16_MASK) {
619 if ((buffer[2] & 0xf8) != 0xf8 || (buffer[3] & 0x3f) != 0x3f)
622 if (buffer[2] != 0xff || (buffer[3] & 0x0f) != 0x0f
623 || (buffer[4] & 0xf8) != 0xf8 || buffer[5] != 0xff
624 || buffer[6] != 0xff || (buffer[7] & 0x03) != 0x03)
629 * Now check the actual clean flag (and the no-error flag).
631 if (boot->ClustMask == CLUST16_MASK) {
632 if ((buffer[3] & 0xc0) == 0xc0)
635 if ((buffer[7] & 0x0c) == 0x0c)
645 cleardirty(struct fat_descriptor *fat)
647 int fd, ret = FSERROR;
648 struct bootblock *boot;
653 boot = boot_of_(fat);
656 if (boot->ClustMask != CLUST16_MASK && boot->ClustMask != CLUST32_MASK)
659 off = boot->bpbResSectors;
660 off *= boot->bpbBytesPerSec;
662 buffer = malloc(len = boot->bpbBytesPerSec);
663 if (buffer == NULL) {
664 perr("No memory for FAT sectors (%zu)", len);
668 if ((size_t)pread(fd, buffer, len, off) != len) {
669 perr("Unable to read FAT");
673 if (boot->ClustMask == CLUST16_MASK) {
679 if ((size_t)pwrite(fd, buffer, len, off) != len) {
680 perr("Unable to write FAT");
692 * Read a FAT from disk. Returns 1 if successful, 0 otherwise.
695 _readfat(struct fat_descriptor *fat)
701 struct bootblock *boot;
702 struct fat32_cache_entry *entry;
704 boot = boot_of_(fat);
706 fat->fatsize = boot->FATsecs * boot->bpbBytesPerSec;
708 off = boot->bpbResSectors;
709 off *= boot->bpbBytesPerSec;
711 fat->is_mmapped = false;
712 fat->use_cache = false;
714 /* Attempt to mmap() first */
716 fat->fatbuf = mmap(NULL, fat->fatsize,
717 PROT_READ | (rdonly ? 0 : PROT_WRITE),
718 MAP_SHARED, fd_of_(fat), off);
719 if (fat->fatbuf != MAP_FAILED) {
720 fat->is_mmapped = true;
726 * Unfortunately, we were unable to mmap().
728 * Only use the cache manager when it's necessary, that is,
729 * when the FAT is sufficiently large; in that case, only
730 * read in the first 4 MiB of FAT into memory, and split the
731 * buffer into chunks and insert to the LRU queue to populate
732 * the cache with data.
734 if (boot->ClustMask == CLUST32_MASK &&
735 fat->fatsize >= fat32_cache_size) {
736 readsize = fat32_cache_size;
737 fat->use_cache = true;
739 fat->fat32_offset = boot->bpbResSectors * boot->bpbBytesPerSec;
740 fat->fat32_lastaddr = fat->fatsize & ~(fat32_cache_chunk_size);
742 readsize = fat->fatsize;
744 fat->fatbuf = malloc(readsize);
745 if (fat->fatbuf == NULL) {
746 perr("No space for FAT (%zu)", readsize);
750 if (lseek(fd, off, SEEK_SET) != off) {
751 perr("Unable to read FAT");
754 if ((size_t)read(fd, fat->fatbuf, readsize) != readsize) {
755 perr("Unable to read FAT");
760 * When cache is used, split the buffer into chunks, and
761 * connect the buffer into the cache.
763 if (fat->use_cache) {
764 TAILQ_INIT(&fat->fat32_cache_head);
765 entry = calloc(fat32_cache_entries, sizeof(*entry));
767 perr("No space for FAT cache (%zu of %zu)",
768 fat32_cache_entries, sizeof(entry));
771 for (i = 0; i < fat32_cache_entries; i++) {
772 entry[i].addr = fat32_cache_chunk_size * i;
773 entry[i].chunk = &fat->fatbuf[entry[i].addr];
774 TAILQ_INSERT_TAIL(&fat->fat32_cache_head,
777 fat->fat32_cache_allentries = entry;
789 releasefat(struct fat_descriptor *fat)
791 if (fat->is_mmapped) {
792 munmap(fat->fatbuf, fat->fatsize);
794 if (fat->use_cache) {
795 free(fat->fat32_cache_allentries);
796 fat->fat32_cache_allentries = NULL;
801 bitmap_dtor(&fat->headbitmap);
805 * Read or map a FAT and populate head bitmap
808 readfat(int fs, struct bootblock *boot, struct fat_descriptor **fp)
810 struct fat_descriptor *fat;
815 boot->NumFree = boot->NumBad = 0;
817 fat = calloc(1, sizeof(struct fat_descriptor));
819 perr("No space for FAT descriptor");
826 if (!_readfat(fat)) {
830 buffer = fat->fatbuf;
832 /* Populate accessors */
833 switch(boot->ClustMask) {
835 fat->get = fat_get_fat12_next;
836 fat->set = fat_set_fat12_next;
839 fat->get = fat_get_fat16_next;
840 fat->set = fat_set_fat16_next;
843 if (fat->is_mmapped || !fat->use_cache) {
844 fat->get = fat_get_fat32_next;
845 fat->set = fat_set_fat32_next;
847 fat->get = fat_get_fat32_cached_next;
848 fat->set = fat_set_fat32_cached_next;
852 pfatal("Invalid ClustMask: %d", boot->ClustMask);
858 if (bitmap_ctor(&fat->headbitmap, boot->NumClusters,
860 perr("No space for head bitmap for FAT clusters (%zu)",
861 (size_t)boot->NumClusters);
867 if (buffer[0] != boot->bpbMedia
868 || buffer[1] != 0xff || buffer[2] != 0xff
869 || (boot->ClustMask == CLUST16_MASK && buffer[3] != 0xff)
870 || (boot->ClustMask == CLUST32_MASK
871 && ((buffer[3]&0x0f) != 0x0f
872 || buffer[4] != 0xff || buffer[5] != 0xff
873 || buffer[6] != 0xff || (buffer[7]&0x0f) != 0x0f))) {
875 /* Windows 95 OSR2 (and possibly any later) changes
876 * the FAT signature to 0xXXffff7f for FAT16 and to
877 * 0xXXffff0fffffff07 for FAT32 upon boot, to know that the
878 * file system is dirty if it doesn't reboot cleanly.
879 * Check this special condition before errorring out.
881 if (buffer[0] == boot->bpbMedia && buffer[1] == 0xff
883 && ((boot->ClustMask == CLUST16_MASK && buffer[3] == 0x7f)
884 || (boot->ClustMask == CLUST32_MASK
885 && buffer[3] == 0x0f && buffer[4] == 0xff
886 && buffer[5] == 0xff && buffer[6] == 0xff
887 && buffer[7] == 0x07)))
890 /* just some odd byte sequence in FAT */
892 switch (boot->ClustMask) {
894 pwarn("%s (%02x%02x%02x%02x%02x%02x%02x%02x)\n",
895 "FAT starts with odd byte sequence",
896 buffer[0], buffer[1], buffer[2], buffer[3],
897 buffer[4], buffer[5], buffer[6], buffer[7]);
900 pwarn("%s (%02x%02x%02x%02x)\n",
901 "FAT starts with odd byte sequence",
902 buffer[0], buffer[1], buffer[2], buffer[3]);
905 pwarn("%s (%02x%02x%02x)\n",
906 "FAT starts with odd byte sequence",
907 buffer[0], buffer[1], buffer[2]);
911 if (ask(1, "Correct")) {
915 *p++ = (u_char)boot->bpbMedia;
918 switch (boot->ClustMask) {
937 * Traverse the FAT table and populate head map. Initially, we
938 * consider all clusters as possible head cluster (beginning of
939 * a file or directory), and traverse the whole allocation table
940 * by marking every non-head nodes as such (detailed below) and
941 * fix obvious issues while we walk.
943 * For each "next" cluster, the possible values are:
945 * a) CLUST_FREE or CLUST_BAD. The *current* cluster can't be a
947 * b) An out-of-range value. The only fix would be to truncate at
949 * c) A valid cluster. It means that cluster (nextcl) is not a
950 * head cluster. Note that during the scan, every cluster is
951 * expected to be seen for at most once, and when we saw them
952 * twice, it means a cross-linked chain which should be
953 * truncated at the current cluster.
955 * After scan, the remaining set bits indicates all possible
956 * head nodes, because they were never claimed by any other
957 * node as the next node, but we do not know if these chains
958 * would end with a valid EOF marker. We will check that in
959 * checkchain() at a later time when checking directories,
960 * where these head nodes would be marked as non-head.
962 * In the final pass, all head nodes should be cleared, and if
963 * there is still head nodes, these would be leaders of lost
966 for (cl = CLUST_FIRST; cl < boot->NumClusters; cl++) {
967 nextcl = fat_get_cl_next(fat, cl);
969 /* Check if the next cluster number is valid */
970 if (nextcl == CLUST_FREE) {
971 /* Save a hint for next free cluster */
972 if (boot->FSNext == 0) {
975 if (fat_is_cl_head(fat, cl)) {
976 fat_clear_cl_head(fat, cl);
979 } else if (nextcl == CLUST_BAD) {
980 if (fat_is_cl_head(fat, cl)) {
981 fat_clear_cl_head(fat, cl);
984 } else if (!valid_cl(fat, nextcl) && nextcl < CLUST_RSRVD) {
985 pwarn("Cluster %u continues with out of range "
986 "cluster number %u\n",
988 nextcl & boot->ClustMask);
989 if (ask(0, "Truncate")) {
990 ret |= fat_set_cl_next(fat, cl, CLUST_EOF);
993 } else if (valid_cl(fat, nextcl)) {
994 if (fat_is_cl_head(fat, nextcl)) {
995 fat_clear_cl_head(fat, nextcl);
997 pwarn("Cluster %u crossed another chain at %u\n",
999 if (ask(0, "Truncate")) {
1000 ret |= fat_set_cl_next(fat, cl, CLUST_EOF);
1008 if (ret & FSFATAL) {
1018 * Get type of reserved cluster
1021 rsrvdcltype(cl_t cl)
1023 if (cl == CLUST_FREE)
1033 * Examine a cluster chain for errors and count its size.
1036 checkchain(struct fat_descriptor *fat, cl_t head, size_t *chainsize)
1038 cl_t prev_cl, current_cl, next_cl;
1042 * We expect that the caller to give us a real, unvisited 'head'
1043 * cluster, and it must be a valid cluster. While scanning the
1044 * FAT table, we already excluded all clusters that was claimed
1045 * as a "next" cluster. Assert all the three conditions.
1047 assert(valid_cl(fat, head));
1048 assert(fat_is_cl_head(fat, head));
1051 * Immediately mark the 'head' cluster that we are about to visit.
1053 fat_clear_cl_head(fat, head);
1056 * The allocation of a non-zero sized file or directory is
1057 * represented as a singly linked list, and the tail node
1058 * would be the EOF marker (>=CLUST_EOFS).
1060 * With a valid head node at hand, we expect all subsequent
1061 * cluster to be either a not yet seen and valid cluster (we
1062 * would continue counting), or the EOF marker (we conclude
1063 * the scan of this chain).
1065 * For all other cases, the chain is invalid, and the only
1066 * viable fix would be to truncate at the current node (mark
1067 * it as EOF) when the next node violates that.
1070 prev_cl = current_cl = head;
1071 for (next_cl = fat_get_cl_next(fat, current_cl);
1072 valid_cl(fat, next_cl);
1073 prev_cl = current_cl, current_cl = next_cl, next_cl = fat_get_cl_next(fat, current_cl))
1077 if (next_cl >= CLUST_EOFS) {
1083 * The chain ended with an out-of-range cluster number.
1085 * If the current node is e.g. CLUST_FREE, CLUST_BAD, etc.,
1086 * it should not be present in a chain and we has to truncate
1087 * at the previous node.
1089 * If the current cluster points to an invalid cluster, the
1090 * current cluster might have useful data and we truncate at
1091 * the current cluster instead.
1093 if (next_cl == CLUST_FREE || next_cl >= CLUST_RSRVD) {
1094 pwarn("Cluster chain starting at %u ends with cluster marked %s\n",
1095 head, rsrvdcltype(next_cl));
1096 current_cl = prev_cl;
1098 pwarn("Cluster chain starting at %u ends with cluster out of range (%u)\n",
1100 next_cl & boot_of_(fat)->ClustMask);
1104 if (*chainsize > 0) {
1106 next_cl = CLUST_EOF;
1109 next_cl = CLUST_FREE;
1111 if (ask(0, "%s", op)) {
1112 return (fat_set_cl_next(fat, current_cl, next_cl) | FSFATMOD);
1119 * Clear cluster chain from head.
1122 clearchain(struct fat_descriptor *fat, cl_t head)
1124 cl_t current_cl, next_cl;
1125 struct bootblock *boot = boot_of_(fat);
1129 while (valid_cl(fat, current_cl)) {
1130 next_cl = fat_get_cl_next(fat, current_cl);
1131 (void)fat_set_cl_next(fat, current_cl, CLUST_FREE);
1133 current_cl = next_cl;
1139 * Overwrite the n-th FAT with FAT0
1142 copyfat(struct fat_descriptor *fat, int n)
1144 size_t rwsize, tailsize, blobs, i;
1145 off_t dst_off, src_off;
1146 struct bootblock *boot;
1151 boot = boot_of_(fat);
1153 blobs = howmany(fat->fatsize, fat32_cache_size);
1154 tailsize = fat->fatsize % fat32_cache_size;
1155 if (tailsize == 0) {
1156 tailsize = fat32_cache_size;
1158 rwsize = fat32_cache_size;
1160 src_off = fat->fat32_offset;
1161 dst_off = boot->bpbResSectors + n * boot->FATsecs;
1162 dst_off *= boot->bpbBytesPerSec;
1164 for (i = 0; i < blobs;
1165 i++, src_off += fat32_cache_size, dst_off += fat32_cache_size) {
1166 if (i == blobs - 1) {
1169 if ((lseek(fd, src_off, SEEK_SET) != src_off ||
1170 (size_t)read(fd, fat->fatbuf, rwsize) != rwsize) &&
1172 perr("Unable to read FAT0");
1176 if ((lseek(fd, dst_off, SEEK_SET) != dst_off ||
1177 (size_t)write(fd, fat->fatbuf, rwsize) != rwsize) &&
1179 perr("Unable to write FAT %d", n);
1190 writefat(struct fat_descriptor *fat)
1196 struct bootblock *boot;
1197 struct fat32_cache_entry *entry;
1199 boot = boot_of_(fat);
1202 if (fat->use_cache) {
1204 * Attempt to flush all in-flight cache, and bail out
1205 * if we encountered an error (but only emit error
1206 * message once). Stop proceeding with copyfat()
1207 * if any flush failed.
1209 TAILQ_FOREACH(entry, &fat->fat32_cache_head, entries) {
1210 if (fat_flush_fat32_cache_entry(fat, entry) != FSOK) {
1212 perr("Unable to write FAT");
1220 /* Update backup copies of FAT, error is not fatal */
1221 for (i = 1; i < boot->bpbFATs; i++) {
1222 if (copyfat(fat, i) != FSOK)
1226 writesz = fat->fatsize;
1228 for (i = fat->is_mmapped ? 1 : 0; i < boot->bpbFATs; i++) {
1229 dst_base = boot->bpbResSectors + i * boot->FATsecs;
1230 dst_base *= boot->bpbBytesPerSec;
1231 if ((lseek(fd, dst_base, SEEK_SET) != dst_base ||
1232 (size_t)write(fd, fat->fatbuf, writesz) != writesz) &&
1234 perr("Unable to write FAT %d", i);
1235 ret = ((i == 0) ? FSFATAL : FSERROR);
1244 * Check a complete in-memory FAT for lost cluster chains
1247 checklost(struct fat_descriptor *fat)
1252 size_t chains, chainlength;
1253 struct bootblock *boot;
1255 dosfs = fd_of_(fat);
1256 boot = boot_of_(fat);
1259 * At this point, we have already traversed all directories.
1260 * All remaining chain heads in the bitmap are heads of lost
1263 chains = fat_get_head_count(fat);
1264 for (head = CLUST_FIRST;
1265 chains > 0 && head < boot->NumClusters;
1268 * We expect the bitmap to be very sparse, so skip if
1269 * the range is full of 0's
1271 if (head % LONG_BIT == 0 &&
1272 !fat_is_cl_head_in_range(fat, head)) {
1276 if (fat_is_cl_head(fat, head)) {
1277 ret = checkchain(fat, head, &chainlength);
1278 if (ret != FSERROR && chainlength > 0) {
1279 pwarn("Lost cluster chain at cluster %u\n"
1280 "%zd Cluster(s) lost\n",
1282 mod |= ret = reconnect(fat, head,
1287 if (ret == FSERROR && ask(0, "Clear")) {
1288 clearchain(fat, head);
1298 if (boot->bpbFSInfo) {
1300 if (boot->FSFree != 0xffffffffU &&
1301 boot->FSFree != boot->NumFree) {
1302 pwarn("Free space in FSInfo block (%u) not correct (%u)\n",
1303 boot->FSFree, boot->NumFree);
1304 if (ask(1, "Fix")) {
1305 boot->FSFree = boot->NumFree;
1309 if (boot->FSNext != 0xffffffffU &&
1310 (boot->FSNext >= boot->NumClusters ||
1311 (boot->NumFree && fat_get_cl_next(fat, boot->FSNext) != CLUST_FREE))) {
1312 pwarn("Next free cluster in FSInfo block (%u) %s\n",
1314 (boot->FSNext >= boot->NumClusters) ? "invalid" : "not free");
1316 for (head = CLUST_FIRST; head < boot->NumClusters; head++)
1317 if (fat_get_cl_next(fat, head) == CLUST_FREE) {
1318 boot->FSNext = head;
1324 mod |= writefsinfo(dosfs, boot);