3 # Common code used run regression tests for usr.bin/make.
8 # Output a message and exit with an error.
12 echo "fatal: $*" >/dev/stderr
17 # This test is not very reliable but works for now: the old fmake
18 # does have a -v option while bmake doesn't.
19 ${MAKE_PROG} -f Makefile.non-existent -v 2>&1 | \
20 grep -q "cannot open.*non-existent"
24 # Check whether the working directory exists - it must.
28 if [ ! -d ${WORK_DIR} ] ; then
29 fatal "working directory ${WORK_DIR} does not exist."
34 # Make sure all tests have been run
38 if [ -z "${TEST_N}" ] ; then
44 while [ ${N} -le ${TEST_N} ] ; do
45 if ! skip_test ${N} ; then
46 if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
47 ! -f ${OUTPUT_DIR}/stdout.${N} -o \
48 ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
49 echo "Test ${SUBDIR}/${N} no yet run"
56 if [ ! -z "${FAIL}" ] ; then
62 # Output usage messsage.
66 echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
67 echo " setup - setup working directory"
68 echo " run - run the tests"
69 echo " show - show test results"
70 echo " compare - compare actual and expected results"
71 echo " diff - diff actual and expected results"
72 echo " reset - reset the test to its initial state"
73 echo " clean - delete working and output directory"
74 echo " test - setup + run + compare"
75 echo " prove - setup + run + compare + clean"
76 echo " desc - print short description"
77 echo " update - update the expected results with the current results"
78 echo " help - show this information"
82 # Return 0 if we should skip the test. 1 otherwise
86 eval skip=\${TEST_${1}_SKIP}
87 if [ -z "${skip}" ] ; then
95 # Common function for setup and reset.
100 # If a Makefile exists in the source directory - copy it over
102 if [ -e Makefile -a ! -e ${WORK_DIR}/Makefile ] ; then
103 cp Makefile ${WORK_DIR}/Makefile
107 # If the TEST_MAKE_DIRS variable is set, create those directories
109 set -- ${TEST_MAKE_DIRS}
110 while [ $# -ne 0 ] ; do
111 if [ ! -d ${WORK_DIR}/${1} ] ; then
112 mkdir -p -m ${2} ${WORK_DIR}/${1}
114 chmod ${2} ${WORK_DIR}/${1}
120 # If the TEST_COPY_FILES variable is set, copy those files over to
121 # the working directory. The value is assumed to be pairs of
122 # filenames and modes.
124 set -- ${TEST_COPY_FILES}
125 while [ $# -ne 0 ] ; do
126 if [ ! -e ${WORK_DIR}/${1} ] ; then
127 cp ${1} ${WORK_DIR}/${1}
129 chmod ${2} ${WORK_DIR}/${1}
134 # If the TEST_TOUCH variable is set, it is taken to be a list
135 # of pairs of filenames and arguments to touch(1). The arguments
136 # to touch must be surrounded by single quotes if there are more
139 eval set -- ${TEST_TOUCH}
140 while [ $# -ne 0 ] ; do
141 eval touch ${2} ${WORK_DIR}/${1}
148 eval set -- ${TEST_LINKS}
149 while [ $# -ne 0 ] ; do
150 eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
156 # Setup the test. This creates the working and output directories and
157 # populates it with files. If there is a setup_test() function - call it.
162 # Check whether the working directory exists. If it does exit
163 # fatally so that we don't clobber a test the user is working on.
165 if [ -d ${WORK_DIR} ] ; then
166 fatal "working directory ${WORK_DIR} already exists."
170 # Now create it and the output directory
174 mkdir -p ${OUTPUT_DIR}
182 # Now after all execute the user's setup function if it exists.
188 # Default setup_test function does nothing. This may be overriden by
196 # Reset the test. Here we need to rely on information from the test.
197 # We executed the same steps as in the setup, by try not to clobber existing
199 # All files and directories that are listed on the TEST_CLEAN_FILES
200 # variable are removed. Then the TEST_TOUCH list is executed and finally
201 # the reset_test() function called if it exists.
208 # Clean the output directory
210 rm -rf ${OUTPUT_DIR}/*
220 for f in ${TEST_CLEAN_FILES} ; do
221 rm -rf ${WORK_DIR}/${f}
225 # Execute test's function
231 # Default reset_test function does nothing. This may be overriden by
239 # Clean the test. This simply removes the working and output directories.
244 # If you have special cleaning needs, provide a 'cleanup' shell script.
246 if [ -n "${TEST_CLEANUP}" ] ; then
249 if [ -z "${NO_TEST_CLEANUP}" ] ; then
262 if [ -z "${TEST_N}" ] ; then
267 while [ ${N} -le ${TEST_N} ] ; do
268 if ! skip_test ${N} ; then
270 exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
272 echo $? >${OUTPUT_DIR}/status.${N}
280 # Default run_test() function. It can be replaced by the
281 # user specified regression test. The argument to this function is
286 eval args=\${TEST_${1}-test${1}}
297 if [ -z "${TEST_N}" ] ; then
302 while [ ${N} -le ${TEST_N} ] ; do
303 if ! skip_test ${N} ; then
304 echo "=== Test ${N} Status =================="
305 cat ${OUTPUT_DIR}/status.${N}
306 echo ".......... Stdout .................."
307 cat ${OUTPUT_DIR}/stdout.${N}
308 echo ".......... Stderr .................."
309 cat ${OUTPUT_DIR}/stderr.${N}
316 # Compare results with expected results
323 if [ -z "${TEST_N}" ] ; then
329 while [ ${N} -le ${TEST_N} ] ; do
333 if ! skip_test ${N} ; then
334 do_compare stdout ${N} || fail="${fail}stdout "
335 do_compare stderr ${N} || fail="${fail}stderr "
336 do_compare status ${N} || fail="${fail}status "
337 eval todo=\${TEST_${N}_TODO}
339 eval skip=\${TEST_${N}_SKIP}
341 if [ ! -z "$fail" ]; then
344 echo -n "ok ${N} ${SUBDIR}/${N}"
345 if [ ! -z "$fail" -o ! -z "$todo" -o ! -z "$skip" ]; then
348 if [ ! -z "$skip" ] ; then
349 echo -n "skip $skip; "
351 if [ ! -z "$todo" ] ; then
352 echo -n "TODO $todo; "
354 if [ ! -z "$fail" ] ; then
355 echo "reason: ${fail}"
363 # Check if the test result is the same as the expected result.
370 local EXPECTED RESULT
371 EXPECTED="expected.$1.$2"
372 RESULT="${OUTPUT_DIR}/$1.$2"
374 if [ -f $EXPECTED ]; then
375 diff -q $EXPECTED $RESULT 1>/dev/null 2>/dev/null
383 # Diff current and expected results
390 if [ -z "${TEST_N}" ] ; then
395 while [ ${N} -le ${TEST_N} ] ; do
396 if ! skip_test ${N} ; then
407 # Check if the test result is the same as the expected result.
414 local EXPECTED RESULT
415 EXPECTED="expected.$1.$2"
416 RESULT="${OUTPUT_DIR}/$1.$2"
418 echo diff -u $EXPECTED $RESULT
419 if [ -f $EXPECTED ]; then
420 diff -u $EXPECTED $RESULT
422 echo "${EXPECTED} does not exist"
427 # Update expected results
434 if [ -z "${TEST_N}" ] ; then
440 while [ ${N} -le ${TEST_N} ] ; do
441 if ! skip_test ${N} ; then
442 cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
443 cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
444 cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
455 echo "${SUBDIR}: ${DESC}"
469 # Run the test for prove(1)
480 # Main function. Execute the command(s) on the command line.
484 if [ $# -eq 0 ] ; then
485 # if no arguments given default to 'prove'
489 if ! make_is_fmake ; then
490 for i in $(jot ${TEST_N:-1}) ; do
491 eval TEST_${i}_SKIP=\"make is not fmake\"
499 setup | run | compare | diff | clean | reset | show | \
500 test | prove | desc | update)
510 ##############################################################################
516 # Parse command line arguments.
518 args=`getopt m:w:v $*`
548 # Determine our sub-directory. Argh.
551 SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
552 SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
555 # Construct working directory
557 WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
558 WORK_DIR=${WORK_BASE}/${SUBDIR}
559 OUTPUT_DIR=${WORK_DIR}.OUTPUT
564 MAKE_PROG=${MAKE_PROG:-/usr/bin/make}