3 # Common code used run regression tests for usr.bin/make.
6 # Output a message and exit with an error.
10 echo "fatal: $*" >/dev/stderr
15 # This test is not very reliable but works for now: the old fmake
16 # does have a -v option while bmake doesn't.
17 ${MAKE_PROG} -f Makefile.non-existent -v 2>&1 | \
18 grep -q "cannot open.*non-existent"
22 # Check whether the working directory exists - it must.
26 if [ ! -d ${WORK_DIR} ] ; then
27 fatal "working directory ${WORK_DIR} does not exist."
32 # Make sure all tests have been run
36 if [ -z "${TEST_N}" ] ; then
42 while [ ${N} -le ${TEST_N} ] ; do
43 if ! skip_test ${N} ; then
44 if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
45 ! -f ${OUTPUT_DIR}/stdout.${N} -o \
46 ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
47 echo "Test ${SUBDIR}/${N} no yet run"
54 if [ ! -z "${FAIL}" ] ; then
60 # Output usage messsage.
64 echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
65 echo " setup - setup working directory"
66 echo " run - run the tests"
67 echo " show - show test results"
68 echo " compare - compare actual and expected results"
69 echo " diff - diff actual and expected results"
70 echo " reset - reset the test to its initial state"
71 echo " clean - delete working and output directory"
72 echo " test - setup + run + compare"
73 echo " prove - setup + run + compare + clean"
74 echo " desc - print short description"
75 echo " update - update the expected results with the current results"
76 echo " help - show this information"
80 # Return 0 if we should skip the test. 1 otherwise
84 eval skip=\${TEST_${1}_SKIP}
85 if [ -z "${skip}" ] ; then
93 # Common function for setup and reset.
98 # If a Makefile exists in the source directory - copy it over
100 if [ -e ${SRC_DIR}/Makefile.test -a ! -e ${WORK_DIR}/Makefile ] ; then
101 cp ${SRC_DIR}/Makefile.test ${WORK_DIR}/Makefile
105 # If the TEST_MAKE_DIRS variable is set, create those directories
107 set -- ${TEST_MAKE_DIRS}
108 while [ $# -ne 0 ] ; do
109 if [ ! -d ${WORK_DIR}/${1} ] ; then
110 mkdir -p -m ${2} ${WORK_DIR}/${1}
112 chmod ${2} ${WORK_DIR}/${1}
118 # If the TEST_COPY_FILES variable is set, copy those files over to
119 # the working directory. The value is assumed to be pairs of
120 # filenames and modes.
122 set -- ${TEST_COPY_FILES}
123 while [ $# -ne 0 ] ; do
124 local dstname="$(echo ${1} | sed -e 's,Makefile.test,Makefile,')"
125 if [ ! -e ${WORK_DIR}/${dstname} ] ; then
126 cp ${SRC_DIR}/${1} ${WORK_DIR}/${dstname}
128 chmod ${2} ${WORK_DIR}/${dstname}
133 # If the TEST_TOUCH variable is set, it is taken to be a list
134 # of pairs of filenames and arguments to touch(1). The arguments
135 # to touch must be surrounded by single quotes if there are more
138 eval set -- ${TEST_TOUCH}
139 while [ $# -ne 0 ] ; do
140 eval touch ${2} ${WORK_DIR}/${1}
147 eval set -- ${TEST_LINKS}
148 while [ $# -ne 0 ] ; do
149 eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
155 # Setup the test. This creates the working and output directories and
156 # populates it with files. If there is a setup_test() function - call it.
161 # Check whether the working directory exists. If it does exit
162 # fatally so that we don't clobber a test the user is working on.
164 if [ -d ${WORK_DIR} ] ; then
165 fatal "working directory ${WORK_DIR} already exists."
169 # Now create it and the output directory
173 mkdir -p ${OUTPUT_DIR}
181 # Now after all execute the user's setup function if it exists.
187 # Default setup_test function does nothing. This may be overriden by
195 # Reset the test. Here we need to rely on information from the test.
196 # We executed the same steps as in the setup, by try not to clobber existing
198 # All files and directories that are listed on the TEST_CLEAN_FILES
199 # variable are removed. Then the TEST_TOUCH list is executed and finally
200 # the reset_test() function called if it exists.
207 # Clean the output directory
209 rm -rf ${OUTPUT_DIR}/*
219 for f in ${TEST_CLEAN_FILES} ; do
220 rm -rf ${WORK_DIR}/${f}
224 # Execute test's function
230 # Default reset_test function does nothing. This may be overriden by
238 # Clean the test. This simply removes the working and output directories.
243 # If you have special cleaning needs, provide a 'cleanup' shell script.
245 if [ -n "${TEST_CLEANUP}" ] ; then
248 if [ -z "${NO_TEST_CLEANUP}" ] ; then
261 if [ -z "${TEST_N}" ] ; then
266 while [ ${N} -le ${TEST_N} ] ; do
267 if ! skip_test ${N} ; then
269 exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
271 echo $? >${OUTPUT_DIR}/status.${N}
279 # Default run_test() function. It can be replaced by the
280 # user specified regression test. The argument to this function is
285 eval args=\${TEST_${1}-test${1}}
296 if [ -z "${TEST_N}" ] ; then
301 while [ ${N} -le ${TEST_N} ] ; do
302 if ! skip_test ${N} ; then
303 echo "=== Test ${N} Status =================="
304 cat ${OUTPUT_DIR}/status.${N}
305 echo ".......... Stdout .................."
306 cat ${OUTPUT_DIR}/stdout.${N}
307 echo ".......... Stderr .................."
308 cat ${OUTPUT_DIR}/stderr.${N}
315 # Compare results with expected results
322 if [ -z "${TEST_N}" ] ; then
328 while [ ${N} -le ${TEST_N} ] ; do
332 if ! skip_test ${N} ; then
333 do_compare stdout ${N} || fail="${fail}stdout "
334 do_compare stderr ${N} || fail="${fail}stderr "
335 do_compare status ${N} || fail="${fail}status "
336 eval todo=\${TEST_${N}_TODO}
338 eval skip=\${TEST_${N}_SKIP}
341 if [ ! -z "$fail" ]; then
344 msg="${msg}ok ${N} ${SUBDIR}/${N}"
345 if [ ! -z "$fail" -o ! -z "$todo" -o ! -z "$skip" ]; then
348 if [ ! -z "$skip" ] ; then
349 msg="${msg}skip ${skip}; "
351 if [ ! -z "$todo" ] ; then
352 msg="${msg}TODO ${todo}; "
354 if [ ! -z "$fail" ] ; then
355 msg="${msg}reason: ${fail}"
363 # Check if the test result is the same as the expected result.
370 local EXPECTED RESULT
371 EXPECTED="${SRC_DIR}/expected.$1.$2"
372 RESULT="${OUTPUT_DIR}/$1.$2"
374 if [ -f $EXPECTED ]; then
375 cat $RESULT | sed -e "s,^$(basename $MAKE_PROG):,make:," | \
377 #diff -q $EXPECTED - 1>/dev/null 2>/dev/null
385 # Diff current and expected results
392 if [ -z "${TEST_N}" ] ; then
397 while [ ${N} -le ${TEST_N} ] ; do
398 if ! skip_test ${N} ; then
409 # Check if the test result is the same as the expected result.
416 local EXPECTED RESULT
417 EXPECTED="${SRC_DIR}/expected.$1.$2"
418 RESULT="${OUTPUT_DIR}/$1.$2"
420 echo diff -u $EXPECTED $RESULT
421 if [ -f $EXPECTED ]; then
422 diff -u $EXPECTED $RESULT
424 echo "${EXPECTED} does not exist"
429 # Update expected results
436 if [ -z "${TEST_N}" ] ; then
442 while [ ${N} -le ${TEST_N} ] ; do
443 if ! skip_test ${N} ; then
444 cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
445 cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
446 cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
457 echo "${SUBDIR}: ${DESC}"
471 # Run the test for prove(1)
482 # Main function. Execute the command(s) on the command line.
486 if [ $# -eq 0 ] ; then
487 # if no arguments given default to 'prove'
491 if ! make_is_fmake ; then
492 for i in $(jot ${TEST_N:-1}) ; do
493 eval TEST_${i}_SKIP=\"make is not fmake\"
501 setup | run | compare | diff | clean | reset | show | \
502 test | prove | desc | update)
512 ##############################################################################
518 # Determine our sub-directory. Argh.
520 SRC_DIR=$(dirname $0)
521 SRC_BASE=`cd ${SRC_DIR} ; while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
522 SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
525 # Construct working directory
527 WORK_DIR=$(pwd)/work/${SUBDIR}
528 OUTPUT_DIR=${WORK_DIR}.OUTPUT
533 MAKE_PROG=${MAKE_PROG:-/usr/bin/make}