2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1980, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 static char sccsid[] = "@(#)tape.c 8.4 (Berkeley) 5/1/95";
36 static const char rcsid[] =
40 #include <sys/param.h>
41 #include <sys/socket.h>
45 #include <ufs/ufs/dinode.h>
46 #include <ufs/ffs/fs.h>
48 #include <protocols/dumprestore.h>
64 ino_t curino; /* current inumber; used globally */
65 int newtape; /* new tape flag */
66 union u_spcl u_spcl; /* mapping of variables in a control block */
68 static int tapefd; /* tape file descriptor */
69 static long asize; /* number of 0.1" units written on cur tape */
70 static int writesize; /* size of malloc()ed buffer for tape */
71 static int64_t lastspclrec = -1; /* tape block number of last written header */
72 static int trecno = 0; /* next record to write in current block */
73 static long blocksthisvol; /* number of blocks on current output file */
74 static char *nexttape;
75 static FILE *popenfp = NULL;
77 static int atomic_read(int, void *, int);
78 static int atomic_write(int, const void *, int);
79 static void worker(int, int);
80 static void create_workers(void);
81 static void flushtape(void);
82 static void killall(void);
83 static void rollforward(void);
86 * Concurrent dump mods (Caltech) - disk block reading and tape writing
87 * are exported to several worker processes. While one worker writes the
88 * tape, the others read disk blocks; they pass control of the tape in
89 * a ring via signals. The parent process traverses the file system and
90 * sends writeheader()'s and lists of daddr's to the workers via pipes.
91 * The following structure defines the instruction packets sent to workers.
99 #define WORKERS 3 /* 1 worker writing, 1 reading, 1 for slack */
100 static struct worker {
101 int64_t tapea; /* header number at start of this chunk */
102 int64_t firstrec; /* record number of this block */
103 int count; /* count to next header (used for TS_TAPE */
105 int inode; /* inode that we are currently dealing with */
106 int fd; /* FD for this worker */
107 int pid; /* PID for this worker */
108 int sent; /* 1 == we've sent this worker requests */
109 char (*tblock)[TP_BSIZE]; /* buffer for data blocks */
110 struct req *req; /* buffer for requests */
111 } workers[WORKERS+1];
112 static struct worker *wp;
114 static char (*nextblock)[TP_BSIZE];
116 static int master; /* pid of master, for sending error signals */
117 static int tenths; /* length of tape used per block written */
118 static volatile sig_atomic_t caught; /* have we caught the signal to proceed? */
119 static volatile sig_atomic_t ready; /* reached the lock point without having */
120 /* received the SIGUSR2 signal from the prev worker? */
121 static jmp_buf jmpbuf; /* where to jump to if we are ready when the */
122 /* SIGUSR2 arrives from the previous worker */
127 int pgoff = getpagesize() - 1;
131 writesize = ntrec * TP_BSIZE;
132 reqsiz = (ntrec + 1) * sizeof(struct req);
134 * CDC 92181's and 92185's make 0.8" gaps in 1600-bpi start/stop mode
135 * (see DEC TU80 User's Guide). The shorter gaps of 6250-bpi require
136 * repositioning after stopping, i.e, streaming mode, where the gap is
137 * variable, 0.30" to 0.45". The gap is maximal when the tape stops.
139 if (blocksperfile == 0 && !unlimited)
140 tenths = writesize / density +
141 (cartridge ? 16 : density == 625 ? 5 : 8);
143 * Allocate tape buffer contiguous with the array of instruction
144 * packets, so flushtape() can write them together with one write().
145 * Align tape buffer on page boundary to speed up tape write().
147 for (i = 0; i <= WORKERS; i++) {
149 malloc((unsigned)(reqsiz + writesize + pgoff + TP_BSIZE));
152 workers[i].tblock = (char (*)[TP_BSIZE])
153 (((long)&buf[ntrec + 1] + pgoff) &~ pgoff);
154 workers[i].req = (struct req *)workers[i].tblock - ntrec - 1;
160 nextblock = wp->tblock;
165 writerec(char *dp, int isspcl)
168 wp->req[trecno].dblk = (ufs2_daddr_t)0;
169 wp->req[trecno].count = 1;
170 /* Can't do a structure assignment due to alignment problems */
171 bcopy(dp, *(nextblock)++, sizeof (union u_spcl));
173 lastspclrec = spcl.c_tapea;
181 dumpblock(ufs2_daddr_t blkno, int size)
186 dblkno = fsbtodb(sblock, blkno);
187 tpblks = size >> tp_bshift;
188 while ((avail = MIN(tpblks, ntrec - trecno)) > 0) {
189 wp->req[trecno].dblk = dblkno;
190 wp->req[trecno].count = avail;
192 spcl.c_tapea += avail;
195 dblkno += avail << (tp_bshift - dev_bshift);
203 tperror(int signo __unused)
207 msg("write error on %s\n", tape);
208 quit("Cannot recover\n");
211 msg("write error %ld blocks into volume %d\n", blocksthisvol, tapeno);
212 broadcast("DUMP WRITE ERROR!\n");
213 if (!query("Do you want to restart?"))
215 msg("Closing this volume. Prepare to restart with new media;\n");
216 msg("this dump volume will be rewritten.\n");
224 sigpipe(int signo __unused)
227 quit("Broken pipe\n");
234 int64_t lastfirstrec;
236 int siz = (char *)nextblock - (char *)wp->req;
238 wp->req[trecno].count = 0; /* Sentinel */
240 if (atomic_write(wp->fd, (const void *)wp->req, siz) != siz)
241 quit("error writing command pipe: %s\n", strerror(errno));
242 wp->sent = 1; /* we sent a request, read the response later */
244 lastfirstrec = wp->firstrec;
246 if (++wp >= &workers[WORKERS])
249 /* Read results back from next worker */
251 if (atomic_read(wp->fd, (void *)&got, sizeof got)
253 perror(" DUMP: error reading command pipe in master");
258 /* Check for end of tape */
259 if (got < writesize) {
260 msg("End of tape detected\n");
263 * Drain the results, don't care what the values were.
264 * If we read them here then trewind won't...
266 for (i = 0; i < WORKERS; i++) {
267 if (workers[i].sent) {
268 if (atomic_read(workers[i].fd,
269 (void *)&got, sizeof got)
271 perror(" DUMP: error reading command pipe in master");
285 if (spcl.c_type != TS_END && spcl.c_type != TS_CLRI &&
286 spcl.c_type != TS_BITS) {
287 assert(spcl.c_count <= TP_NINDIR);
288 for (i = 0; i < spcl.c_count; i++)
289 if (spcl.c_addr[i] != 0)
292 wp->count = lastspclrec + blks + 1 - spcl.c_tapea;
293 wp->tapea = spcl.c_tapea;
294 wp->firstrec = lastfirstrec + ntrec;
296 nextblock = wp->tblock;
299 blockswritten += ntrec;
300 blocksthisvol += ntrec;
301 if (!pipeout && !unlimited && (blocksperfile ?
302 (blocksthisvol >= blocksperfile) : (asize > tsize))) {
316 for (f = 0; f < WORKERS; f++) {
318 * Drain the results, but unlike EOT we DO (or should) care
319 * what the return values were, since if we detect EOT after
320 * we think we've written the last blocks to the tape anyway,
321 * we have to replay those blocks with rollforward.
323 * fixme: punt for now.
325 if (workers[f].sent) {
326 if (atomic_read(workers[f].fd, (void *)&got, sizeof got)
328 perror(" DUMP: error reading command pipe in master");
332 if (got != writesize) {
333 msg("EOT detected in last 2 tape records!\n");
334 msg("Use a longer tape, decrease the size estimate\n");
335 quit("or use no size estimate at all.\n");
338 (void) close(workers[f].fd);
340 while (wait((int *)NULL) >= 0) /* wait for any signals from workers */
346 msg("Closing %s\n", tape);
350 (void)pclose(popenfp);
357 while (rmtopen(tape, 0) < 0)
363 if (fstat(tapefd, &sb) == 0 && S_ISFIFO(sb.st_mode)) {
367 (void) close(tapefd);
368 while ((f = open(tape, 0)) < 0)
376 time_t tstart_changevol, tend_changevol;
381 (void)time((time_t *)&(tstart_changevol));
383 msg("Change Volumes: Mount volume #%d\n", tapeno+1);
384 broadcast("CHANGE DUMP VOLUMES!\a\a\n");
386 while (!query("Is the new volume mounted and ready to go?"))
387 if (query("Do you want to abort?")) {
391 (void)time((time_t *)&(tend_changevol));
392 if ((tstart_changevol != (time_t)-1) && (tend_changevol != (time_t)-1))
393 tstart_writing += (tend_changevol - tstart_changevol);
399 struct req *p, *q, *prev;
403 union u_spcl *ntb, *otb;
404 twp = &workers[WORKERS];
405 ntb = (union u_spcl *)twp->tblock[1];
408 * Each of the N workers should have requests that need to
409 * be replayed on the next tape. Use the extra worker buffers
410 * (workers[WORKERS]) to construct request lists to be sent to
411 * each worker in turn.
413 for (i = 0; i < WORKERS; i++) {
415 otb = (union u_spcl *)wp->tblock;
418 * For each request in the current worker, copy it to twp.
422 for (p = wp->req; p->count > 0; p += p->count) {
425 *ntb++ = *otb++; /* copy the datablock also */
430 quit("rollforward: protocol botch");
442 nextblock = twp->tblock;
443 savedtapea = spcl.c_tapea;
444 spcl.c_tapea = wp->tapea;
446 spcl.c_tapea = savedtapea;
447 lastspclrec = savedtapea - 1;
449 size = (char *)ntb - (char *)q;
450 if (atomic_write(wp->fd, (const void *)q, size) != size) {
451 perror(" DUMP: error writing command pipe");
455 if (++wp >= &workers[WORKERS])
460 if (prev->dblk != 0) {
462 * If the last one was a disk block, make the
463 * first of this one be the last bit of that disk
466 q->dblk = prev->dblk +
467 prev->count * (TP_BSIZE / DEV_BSIZE);
468 ntb = (union u_spcl *)twp->tblock;
471 * It wasn't a disk block. Copy the data to its
472 * new location in the buffer.
475 *((union u_spcl *)twp->tblock) = *ntb;
476 ntb = (union u_spcl *)twp->tblock[1];
480 nextblock = wp->tblock;
486 * Clear the first workers' response. One hopes that it
487 * worked ok, otherwise the tape is much too short!
490 if (atomic_read(wp->fd, (void *)&got, sizeof got)
492 perror(" DUMP: error reading command pipe in master");
497 if (got != writesize) {
498 quit("EOT detected at start of the tape!\n");
504 * We implement taking and restoring checkpoints on the tape level.
505 * When each tape is opened, a new process is created by forking; this
506 * saves all of the necessary context in the parent. The child
507 * continues the dump; the parent waits around, saving the context.
508 * If the child returns X_REWRITE, then it had problems writing that tape;
509 * this causes the parent to fork again, duplicating the context, and
510 * everything continues as if nothing had happened.
513 startnewtape(int top)
519 sig_t interrupt_save;
521 interrupt_save = signal(SIGINT, SIG_IGN);
522 parentpid = getpid();
525 (void)signal(SIGINT, interrupt_save);
527 * All signals are inherited...
529 setproctitle(NULL); /* Restore the proctitle. */
532 msg("Context save fork fails in parent %d\n", parentpid);
538 * save the context by waiting
539 * until the child doing all of the work returns.
540 * don't catch the interrupt
542 signal(SIGINT, SIG_IGN);
544 msg("Tape: %d; parent process: %d child process %d\n",
545 tapeno+1, parentpid, childpid);
547 if (waitpid(childpid, &status, 0) == -1)
548 msg("Waiting for child %d: %s\n", childpid,
551 msg("Child %d returns LOB status %o\n",
552 childpid, status&0xFF);
554 status = (status >> 8) & 0xFF;
558 msg("Child %d finishes X_FINOK\n", childpid);
561 msg("Child %d finishes X_ABORT\n", childpid);
564 msg("Child %d finishes X_REWRITE\n", childpid);
567 msg("Child %d finishes unknown %d\n",
578 goto restore_check_point;
580 msg("Bad return code from dump: %d\n", status);
584 } else { /* we are the child; just continue */
586 sleep(4); /* allow time for parent's message to get out */
587 msg("Child on Tape %d has parent %d, my pid = %d\n",
588 tapeno+1, parentpid, getpid());
591 * If we have a name like "/dev/rmt0,/dev/rmt1",
592 * use the name before the comma first, and save
593 * the remaining names for subsequent volumes.
595 tapeno++; /* current tape sequence */
596 if (nexttape || strchr(tape, ',')) {
597 if (nexttape && *nexttape)
599 if ((p = strchr(tape, ',')) != NULL) {
604 msg("Dumping volume %d on %s\n", tapeno, tape);
607 tapefd = STDOUT_FILENO;
608 } else if (popenout) {
609 char volno[sizeof("2147483647")];
611 (void)sprintf(volno, "%d", spcl.c_volume + 1);
612 if (setenv("DUMP_VOLUME", volno, 1) == -1) {
613 msg("Cannot set $DUMP_VOLUME.\n");
616 popenfp = popen(popenout, "w");
617 if (popenfp == NULL) {
618 msg("Cannot open output pipeline \"%s\".\n",
622 tapefd = fileno(popenfp);
625 while ((tapefd = (host ? rmtopen(tape, 2) :
626 open(tape, O_WRONLY|O_CREAT, 0666))) < 0)
629 open(tape, O_WRONLY|O_CREAT, 0666)) < 0)
632 msg("Cannot open output \"%s\".\n", tape);
633 if (!query("Do you want to retry the open?"))
638 create_workers(); /* Share open tape file descriptor with workers */
640 close(tapefd); /* Give up our copy of it. */
641 signal(SIGINFO, infosch);
646 newtape++; /* new tape signal */
647 spcl.c_count = wp->count;
649 * measure firstrec in TP_BSIZE units since restore doesn't
650 * know the correct ntrec value...
652 spcl.c_firstrec = wp->firstrec;
654 spcl.c_type = TS_TAPE;
655 writeheader((ino_t)wp->inode);
657 msg("Volume %d begins with blocks from inode %d\n",
663 dumpabort(int signo __unused)
666 if (master != 0 && master != getpid())
667 /* Signals master to call dumpabort */
668 (void) kill(master, SIGTERM);
671 msg("The ENTIRE dump is aborted.\n");
684 msg("pid = %d exits with status %d\n", getpid(), status);
690 * proceed - handler for SIGUSR2, used to synchronize IO between the workers.
693 proceed(int signo __unused)
709 signal(SIGTERM, dumpabort); /* Worker sends SIGTERM on dumpabort() */
710 signal(SIGPIPE, sigpipe);
711 signal(SIGUSR1, tperror); /* Worker sends SIGUSR1 on tape errors */
712 signal(SIGUSR2, proceed); /* Worker sends SIGUSR2 to next worker */
714 for (i = 0; i < WORKERS; i++) {
715 if (i == wp - &workers[0]) {
721 if (socketpair(AF_UNIX, SOCK_STREAM, 0, cmd) < 0 ||
722 (workers[i].pid = fork()) < 0)
723 quit("too many workers, %d (recompile smaller): %s\n",
726 workers[i].fd = cmd[1];
728 if (workers[i].pid == 0) { /* Worker starts up here */
729 for (j = 0; j <= i; j++)
730 (void) close(workers[j].fd);
731 signal(SIGINT, SIG_IGN); /* Master handles this */
737 for (i = 0; i < WORKERS; i++)
738 (void) atomic_write(workers[i].fd,
739 (const void *) &workers[(i + 1) % WORKERS].pid,
740 sizeof workers[0].pid);
750 for (i = 0; i < WORKERS; i++)
751 if (workers[i].pid > 0) {
752 (void) kill(workers[i].pid, SIGKILL);
758 * Synchronization - each process has a lockfile, and shares file
759 * descriptors to the following process's lockfile. When our write
760 * completes, we release our lock on the following process's lock-
761 * file, allowing the following process to lock it and proceed. We
762 * get the lock back for the next cycle by swapping descriptors.
765 worker(int cmd, int worker_number)
768 int nextworker, size, wrote, eot_count;
771 * Need our own seek pointer.
773 (void) close(diskfd);
774 if ((diskfd = open(disk, O_RDONLY)) < 0)
775 quit("worker couldn't reopen disk: %s\n", strerror(errno));
778 * Need the pid of the next worker in the loop...
780 if ((nread = atomic_read(cmd, (void *)&nextworker, sizeof nextworker))
781 != sizeof nextworker) {
782 quit("master/worker protocol botched - didn't get pid of next worker.\n");
786 * Get list of blocks to dump, read the blocks into tape buffer
788 while ((nread = atomic_read(cmd, (void *)wp->req, reqsiz)) == reqsiz) {
789 struct req *p = wp->req;
791 for (trecno = 0; trecno < ntrec;
792 trecno += p->count, p += p->count) {
794 blkread(p->dblk, wp->tblock[trecno],
795 p->count * TP_BSIZE);
797 if (p->count != 1 || atomic_read(cmd,
798 (void *)wp->tblock[trecno],
799 TP_BSIZE) != TP_BSIZE)
800 quit("master/worker protocol botched.\n");
803 if (setjmp(jmpbuf) == 0) {
811 /* Try to write the data... */
816 while (eot_count < 10 && size < writesize) {
819 wrote = rmtwrite(wp->tblock[0]+size,
823 wrote = write(tapefd, wp->tblock[0]+size,
826 printf("worker %d wrote %d\n", worker_number, wrote);
836 if (size != writesize)
837 printf("worker %d only wrote %d out of %d bytes and gave up.\n",
838 worker_number, size, writesize);
842 * Handle ENOSPC as an EOT condition.
844 if (wrote < 0 && errno == ENOSPC) {
853 (void) kill(master, SIGUSR1);
858 * pass size of write back to master
861 (void)atomic_write(cmd, (const void *)&size,
866 * If partial write, don't want next worker to go.
867 * Also jolts him awake.
869 (void) kill(nextworker, SIGUSR2);
872 quit("error reading command pipe: %s\n", strerror(errno));
876 * Since a read from a pipe may not return all we asked for,
877 * loop until the count is satisfied (or error).
880 atomic_read(int fd, void *buf, int count)
882 int got, need = count;
884 while ((got = read(fd, buf, need)) > 0 && (need -= got) > 0)
886 return (got < 0 ? got : count - need);
890 * Since a write to a pipe may not write all we ask if we get a signal,
891 * loop until the count is satisfied (or error).
894 atomic_write(int fd, const void *buf, int count)
896 int got, need = count;
898 while ((got = write(fd, buf, need)) > 0 && (need -= got) > 0)
900 return (got < 0 ? got : count - need);