]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - tools/regression/usr.bin/make/common.sh
MFC: r264832
[FreeBSD/stable/10.git] / tools / regression / usr.bin / make / common.sh
1 #!/bin/sh
2 #
3 # Common code used run regression tests for usr.bin/make.
4 #
5 # $FreeBSD$
6
7 #
8 # Output a message and exit with an error.
9 #
10 fatal()
11 {
12         echo "fatal: $*" >/dev/stderr
13         exit 1
14 }
15
16 make_is_fmake() {
17         # This test is not very reliable but works for now: the old fmake
18         # does have a -v option while bmake doesn't.
19         ${MAKE_PROG} -f Makefile.non-existent -v 2>&1 | \
20             grep -q "cannot open.*non-existent"
21 }
22
23 #
24 # Check whether the working directory exists - it must.
25 #
26 ensure_workdir()
27 {
28         if [ ! -d ${WORK_DIR} ] ; then
29                 fatal "working directory ${WORK_DIR} does not exist."
30         fi
31 }
32
33 #
34 # Make sure all tests have been run
35 #
36 ensure_run()
37 {
38         if [ -z "${TEST_N}" ] ; then
39                 TEST_N=1
40         fi
41
42         FAIL=
43         N=1
44         while [ ${N} -le ${TEST_N} ] ; do
45                 if ! skip_test ${N} ; then
46                         if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
47                              ! -f ${OUTPUT_DIR}/stdout.${N} -o \
48                              ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
49                                 echo "Test ${SUBDIR}/${N} no yet run"
50                                 FAIL=yes
51                         fi
52                 fi
53                 N=$((N + 1))
54         done
55
56         if [ ! -z "${FAIL}" ] ; then
57                 exit 1
58         fi
59 }
60
61 #
62 # Output usage messsage.
63 #
64 print_usage()
65 {
66         echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
67         echo " setup    - setup working directory"
68         echo " run      - run the tests"
69         echo " show     - show test results"
70         echo " compare  - compare actual and expected results"
71         echo " diff     - diff actual and expected results"
72         echo " reset    - reset the test to its initial state"
73         echo " clean    - delete working and output directory"
74         echo " test     - setup + run + compare"
75         echo " prove    - setup + run + compare + clean"
76         echo " desc     - print short description"
77         echo " update   - update the expected results with the current results"
78         echo " help     - show this information"
79 }
80
81 #
82 # Return 0 if we should skip the test. 1 otherwise
83 #
84 skip_test()
85 {
86         eval skip=\${TEST_${1}_SKIP}
87         if [ -z "${skip}" ] ; then
88                 return 1
89         else
90                 return 0
91         fi
92 }
93
94 #
95 # Common function for setup and reset.
96 #
97 common_setup()
98 {
99         #
100         # If a Makefile exists in the source directory - copy it over
101         #
102         if [ -e Makefile -a ! -e ${WORK_DIR}/Makefile ] ; then
103                 cp Makefile ${WORK_DIR}/Makefile
104         fi
105
106         #
107         # If the TEST_MAKE_DIRS variable is set, create those directories
108         #
109         set -- ${TEST_MAKE_DIRS}
110         while [ $# -ne 0 ] ; do
111                 if [ ! -d ${WORK_DIR}/${1} ] ; then
112                         mkdir -p -m ${2} ${WORK_DIR}/${1}
113                 else
114                         chmod ${2} ${WORK_DIR}/${1}
115                 fi
116                 shift ; shift
117         done
118
119         #
120         # If the TEST_COPY_FILES variable is set, copy those files over to
121         # the working directory. The value is assumed to be pairs of
122         # filenames and modes.
123         #
124         set -- ${TEST_COPY_FILES}
125         while [ $# -ne 0 ] ; do
126                 if [ ! -e ${WORK_DIR}/${1} ] ; then
127                         cp ${1} ${WORK_DIR}/${1}
128                 fi
129                 chmod ${2} ${WORK_DIR}/${1}
130                 shift ; shift
131         done
132
133         #
134         # If the TEST_TOUCH variable is set, it is taken to be a list
135         # of pairs of filenames and arguments to touch(1). The arguments
136         # to touch must be surrounded by single quotes if there are more
137         # than one argument.
138         #
139         eval set -- ${TEST_TOUCH}
140         while [ $# -ne 0 ] ; do
141                 eval touch ${2} ${WORK_DIR}/${1}
142                 shift ; shift
143         done
144
145         #
146         # Now create links
147         #
148         eval set -- ${TEST_LINKS}
149         while [ $# -ne 0 ] ; do
150                 eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
151                 shift ; shift
152         done
153 }
154
155 #
156 # Setup the test. This creates the working and output directories and
157 # populates it with files. If there is a setup_test() function - call it.
158 #
159 eval_setup()
160 {
161         #
162         # Check whether the working directory exists. If it does exit
163         # fatally so that we don't clobber a test the user is working on.
164         #
165         if [ -d ${WORK_DIR} ] ; then
166                 fatal "working directory ${WORK_DIR} already exists."
167         fi
168
169         #
170         # Now create it and the output directory
171         #
172         mkdir -p ${WORK_DIR}
173         rm -rf ${OUTPUT_DIR}
174         mkdir -p ${OUTPUT_DIR}
175
176         #
177         # Common stuff
178         #
179         common_setup
180
181         #
182         # Now after all execute the user's setup function if it exists.
183         #
184         setup_test
185 }
186
187 #
188 # Default setup_test function does nothing. This may be overriden by
189 # the test.
190 #
191 setup_test()
192 {
193 }
194
195 #
196 # Reset the test. Here we need to rely on information from the test.
197 # We executed the same steps as in the setup, by try not to clobber existing
198 # files.
199 # All files and directories that are listed on the TEST_CLEAN_FILES
200 # variable are removed. Then the TEST_TOUCH list is executed and finally
201 # the reset_test() function called if it exists.
202 #
203 eval_reset()
204 {
205         ensure_workdir
206
207         #
208         # Clean the output directory
209         #
210         rm -rf ${OUTPUT_DIR}/*
211
212         #
213         # Common stuff
214         #
215         common_setup
216
217         #
218         # Remove files.
219         #
220         for f in ${TEST_CLEAN_FILES} ; do
221                 rm -rf ${WORK_DIR}/${f}
222         done
223
224         #
225         # Execute test's function
226         #
227         reset_test
228 }
229
230 #
231 # Default reset_test function does nothing. This may be overriden by
232 # the test.
233 #
234 reset_test()
235 {
236 }
237
238 #
239 # Clean the test. This simply removes the working and output directories.
240 #
241 eval_clean()
242 {
243         #
244         # If you have special cleaning needs, provide a 'cleanup' shell script.
245         #
246         if [ -n "${TEST_CLEANUP}" ] ; then
247                 . ${SRC_DIR}/cleanup
248         fi
249         if [ -z "${NO_TEST_CLEANUP}" ] ; then
250                 rm -rf ${WORK_DIR}
251                 rm -rf ${OUTPUT_DIR}
252         fi
253 }
254
255 #
256 # Run the test.
257 #
258 eval_run()
259 {
260         ensure_workdir
261
262         if [ -z "${TEST_N}" ] ; then
263                 TEST_N=1
264         fi
265
266         N=1
267         while [ ${N} -le ${TEST_N} ] ; do
268                 if ! skip_test ${N} ; then
269                         ( cd ${WORK_DIR} ;
270                           exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
271                           run_test ${N}
272                           echo $? >${OUTPUT_DIR}/status.${N}
273                         )
274                 fi
275                 N=$((N + 1))
276         done
277 }
278
279 #
280 # Default run_test() function.  It can be replaced by the
281 # user specified regression test. The argument to this function is
282 # the test number.
283 #
284 run_test()
285 {
286         eval args=\${TEST_${1}-test${1}}
287         ${MAKE_PROG} $args
288 }
289
290 #
291 # Show test results.
292 #
293 eval_show()
294 {
295         ensure_workdir
296
297         if [ -z "${TEST_N}" ] ; then
298                 TEST_N=1
299         fi
300
301         N=1
302         while [ ${N} -le ${TEST_N} ] ; do
303                 if ! skip_test ${N} ; then
304                         echo "=== Test ${N} Status =================="
305                         cat ${OUTPUT_DIR}/status.${N}
306                         echo ".......... Stdout .................."
307                         cat ${OUTPUT_DIR}/stdout.${N}
308                         echo ".......... Stderr .................."
309                         cat ${OUTPUT_DIR}/stderr.${N}
310                 fi
311                 N=$((N + 1))
312         done
313 }
314
315 #
316 # Compare results with expected results
317 #
318 eval_compare()
319 {
320         ensure_workdir
321         ensure_run
322
323         if [ -z "${TEST_N}" ] ; then
324                 TEST_N=1
325         fi
326
327         echo "1..${TEST_N}"
328         N=1
329         while [ ${N} -le ${TEST_N} ] ; do
330                 fail=
331                 todo=
332                 skip=
333                 if ! skip_test ${N} ; then
334                         do_compare stdout ${N} || fail="${fail}stdout "
335                         do_compare stderr ${N} || fail="${fail}stderr "
336                         do_compare status ${N} || fail="${fail}status "
337                         eval todo=\${TEST_${N}_TODO}
338                 else
339                         eval skip=\${TEST_${N}_SKIP}
340                 fi
341                 if [ ! -z "$fail" ]; then
342                         echo -n "not "
343                 fi
344                 echo -n "ok ${N} ${SUBDIR}/${N}"
345                 if [ ! -z "$fail" -o ! -z "$todo" -o ! -z "$skip" ]; then
346                         echo -n " # "
347                 fi
348                 if [ ! -z "$skip" ] ; then
349                         echo -n "skip $skip; "
350                 fi
351                 if [ ! -z "$todo" ] ; then
352                         echo -n "TODO $todo; "
353                 fi
354                 if [ ! -z "$fail" ] ; then
355                         echo "reason: ${fail}"
356                 fi
357                 echo
358                 N=$((N + 1))
359         done
360 }
361
362 #
363 # Check if the test result is the same as the expected result.
364 #
365 # $1    Input file
366 # $2    Test number
367 #
368 do_compare()
369 {
370         local EXPECTED RESULT
371         EXPECTED="expected.$1.$2"
372         RESULT="${OUTPUT_DIR}/$1.$2"
373
374         if [ -f $EXPECTED ]; then
375                 diff -q $EXPECTED $RESULT 1>/dev/null 2>/dev/null
376                 return $?
377         else
378                 return 1        # FAIL
379         fi
380 }
381
382 #
383 # Diff current and expected results
384 #
385 eval_diff()
386 {
387         ensure_workdir
388         ensure_run
389
390         if [ -z "${TEST_N}" ] ; then
391                 TEST_N=1
392         fi
393
394         N=1
395         while [ ${N} -le ${TEST_N} ] ; do
396                 if ! skip_test ${N} ; then
397                         FAIL=
398                         do_diff stdout ${N}
399                         do_diff stderr ${N}
400                         do_diff status ${N}
401                 fi
402                 N=$((N + 1))
403         done
404 }
405
406 #
407 # Check if the test result is the same as the expected result.
408 #
409 # $1    Input file
410 # $2    Test number
411 #
412 do_diff()
413 {
414         local EXPECTED RESULT
415         EXPECTED="expected.$1.$2"
416         RESULT="${OUTPUT_DIR}/$1.$2"
417
418         echo diff -u $EXPECTED $RESULT
419         if [ -f $EXPECTED ]; then
420                 diff -u $EXPECTED $RESULT
421         else
422                 echo "${EXPECTED} does not exist"
423         fi
424 }
425
426 #
427 # Update expected results
428 #
429 eval_update()
430 {
431         ensure_workdir
432         ensure_run
433
434         if [ -z "${TEST_N}" ] ; then
435                 TEST_N=1
436         fi
437
438         FAIL=
439         N=1
440         while [ ${N} -le ${TEST_N} ] ; do
441                 if ! skip_test ${N} ; then
442                         cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
443                         cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
444                         cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
445                 fi
446                 N=$((N + 1))
447         done
448 }
449
450 #
451 # Print description
452 #
453 eval_desc()
454 {
455         echo "${SUBDIR}: ${DESC}"
456 }
457
458 #
459 # Run the test
460 #
461 eval_test()
462 {
463         eval_setup
464         eval_run
465         eval_compare
466 }
467
468 #
469 # Run the test for prove(1)
470 #
471 eval_prove()
472 {
473         eval_setup
474         eval_run
475         eval_compare
476         eval_clean
477 }
478
479 #
480 # Main function. Execute the command(s) on the command line.
481 #
482 eval_cmd()
483 {
484         if [ $# -eq 0 ] ; then
485                 # if no arguments given default to 'prove'
486                 set -- prove
487         fi
488
489         if ! make_is_fmake ; then
490                 for i in $(jot ${TEST_N:-1}) ; do
491                         eval TEST_${i}_SKIP=\"make is not fmake\"
492                 done
493         fi
494
495         for i
496         do
497                 case $i in
498
499                 setup | run | compare | diff | clean | reset | show | \
500                 test | prove | desc | update)
501                         eval eval_$i
502                         ;;
503                 * | help)
504                         print_usage
505                         ;;
506                 esac
507         done
508 }
509
510 ##############################################################################
511 #
512 # Main code
513 #
514
515 #
516 # Parse command line arguments.
517 #
518 args=`getopt m:w:v $*`
519 if [ $? != 0 ]; then
520         echo 'Usage: ...'
521         exit 2
522 fi
523 set -- $args
524 for i; do
525         case "$i" in
526         -m)
527                 MAKE_PROG="$2"
528                 shift
529                 shift
530                 ;;
531         -w)
532                 WORK_BASE="$2"
533                 shift
534                 shift
535                 ;;
536         -v)
537                 VERBOSE=1
538                 shift
539                 ;;
540         --)
541                 shift
542                 break
543                 ;;
544         esac
545 done
546
547 #
548 # Determine our sub-directory. Argh.
549 #
550 SRC_DIR=`pwd`
551 SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
552 SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
553
554 #
555 # Construct working directory
556 #
557 WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
558 WORK_DIR=${WORK_BASE}/${SUBDIR}
559 OUTPUT_DIR=${WORK_DIR}.OUTPUT
560
561 #
562 # Make to use
563 #
564 MAKE_PROG=${MAKE_PROG:-/usr/bin/make}