]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - tools/regression/usr.bin/make/common.sh
Copy head (r256279) to stable/10 as part of the 10.0-RELEASE cycle.
[FreeBSD/stable/10.git] / tools / regression / usr.bin / make / common.sh
1 #!/bin/sh
2 #
3 # Common code used run regression tests for usr.bin/make.
4 #
5 # $FreeBSD$
6
7 #
8 # Output a message and exit with an error.
9 #
10 fatal()
11 {
12         echo "fatal: $*" >/dev/stderr
13         exit 1
14 }
15
16 #
17 # Check whether the working directory exists - it must.
18 #
19 ensure_workdir()
20 {
21         if [ ! -d ${WORK_DIR} ] ; then
22                 fatal "working directory ${WORK_DIR} does not exist."
23         fi
24 }
25
26 #
27 # Make sure all tests have been run
28 #
29 ensure_run()
30 {
31         if [ -z "${TEST_N}" ] ; then
32                 TEST_N=1
33         fi
34
35         FAIL=
36         N=1
37         while [ ${N} -le ${TEST_N} ] ; do
38                 if ! skip_test ${N} ; then
39                         if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
40                              ! -f ${OUTPUT_DIR}/stdout.${N} -o \
41                              ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
42                                 echo "Test ${SUBDIR}/${N} no yet run"
43                                 FAIL=yes
44                         fi
45                 fi
46                 N=$((N + 1))
47         done
48
49         if [ ! -z "${FAIL}" ] ; then
50                 exit 1
51         fi
52 }
53
54 #
55 # Output usage messsage.
56 #
57 print_usage()
58 {
59         echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
60         echo " setup    - setup working directory"
61         echo " run      - run the tests"
62         echo " show     - show test results"
63         echo " compare  - compare actual and expected results"
64         echo " diff     - diff actual and expected results"
65         echo " reset    - reset the test to its initial state"
66         echo " clean    - delete working and output directory"
67         echo " test     - setup + run + compare"
68         echo " prove    - setup + run + compare + clean"
69         echo " desc     - print short description"
70         echo " update   - update the expected results with the current results"
71         echo " help     - show this information"
72 }
73
74 #
75 # Return 0 if we should skip the test. 1 otherwise
76 #
77 skip_test()
78 {
79         eval skip=\${TEST_${1}_SKIP}
80         if [ -z "${skip}" ] ; then
81                 return 1
82         else
83                 return 0
84         fi
85 }
86
87 #
88 # Common function for setup and reset.
89 #
90 common_setup()
91 {
92         #
93         # If a Makefile exists in the source directory - copy it over
94         #
95         if [ -e Makefile -a ! -e ${WORK_DIR}/Makefile ] ; then
96                 cp Makefile ${WORK_DIR}/Makefile
97         fi
98
99         #
100         # If the TEST_MAKE_DIRS variable is set, create those directories
101         #
102         set -- ${TEST_MAKE_DIRS}
103         while [ $# -ne 0 ] ; do
104                 if [ ! -d ${WORK_DIR}/${1} ] ; then
105                         mkdir -p -m ${2} ${WORK_DIR}/${1}
106                 else
107                         chmod ${2} ${WORK_DIR}/${1}
108                 fi
109                 shift ; shift
110         done
111
112         #
113         # If the TEST_COPY_FILES variable is set, copy those files over to
114         # the working directory. The value is assumed to be pairs of
115         # filenames and modes.
116         #
117         set -- ${TEST_COPY_FILES}
118         while [ $# -ne 0 ] ; do
119                 if [ ! -e ${WORK_DIR}/${1} ] ; then
120                         cp ${1} ${WORK_DIR}/${1}
121                 fi
122                 chmod ${2} ${WORK_DIR}/${1}
123                 shift ; shift
124         done
125
126         #
127         # If the TEST_TOUCH variable is set, it is taken to be a list
128         # of pairs of filenames and arguments to touch(1). The arguments
129         # to touch must be surrounded by single quotes if there are more
130         # than one argument.
131         #
132         eval set -- ${TEST_TOUCH}
133         while [ $# -ne 0 ] ; do
134                 eval touch ${2} ${WORK_DIR}/${1}
135                 shift ; shift
136         done
137
138         #
139         # Now create links
140         #
141         eval set -- ${TEST_LINKS}
142         while [ $# -ne 0 ] ; do
143                 eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
144                 shift ; shift
145         done
146 }
147
148 #
149 # Setup the test. This creates the working and output directories and
150 # populates it with files. If there is a setup_test() function - call it.
151 #
152 eval_setup()
153 {
154         #
155         # Check whether the working directory exists. If it does exit
156         # fatally so that we don't clobber a test the user is working on.
157         #
158         if [ -d ${WORK_DIR} ] ; then
159                 fatal "working directory ${WORK_DIR} already exists."
160         fi
161
162         #
163         # Now create it and the output directory
164         #
165         mkdir -p ${WORK_DIR}
166         rm -rf ${OUTPUT_DIR}
167         mkdir -p ${OUTPUT_DIR}
168
169         #
170         # Common stuff
171         #
172         common_setup
173
174         #
175         # Now after all execute the user's setup function if it exists.
176         #
177         setup_test
178 }
179
180 #
181 # Default setup_test function does nothing. This may be overriden by
182 # the test.
183 #
184 setup_test()
185 {
186 }
187
188 #
189 # Reset the test. Here we need to rely on information from the test.
190 # We executed the same steps as in the setup, by try not to clobber existing
191 # files.
192 # All files and directories that are listed on the TEST_CLEAN_FILES
193 # variable are removed. Then the TEST_TOUCH list is executed and finally
194 # the reset_test() function called if it exists.
195 #
196 eval_reset()
197 {
198         ensure_workdir
199
200         #
201         # Clean the output directory
202         #
203         rm -rf ${OUTPUT_DIR}/*
204
205         #
206         # Common stuff
207         #
208         common_setup
209
210         #
211         # Remove files.
212         #
213         for f in ${TEST_CLEAN_FILES} ; do
214                 rm -rf ${WORK_DIR}/${f}
215         done
216
217         #
218         # Execute test's function
219         #
220         reset_test
221 }
222
223 #
224 # Default reset_test function does nothing. This may be overriden by
225 # the test.
226 #
227 reset_test()
228 {
229 }
230
231 #
232 # Clean the test. This simply removes the working and output directories.
233 #
234 eval_clean()
235 {
236         #
237         # If you have special cleaning needs, provide a 'cleanup' shell script.
238         #
239         if [ -n "${TEST_CLEANUP}" ] ; then
240                 . ${SRC_DIR}/cleanup
241         fi
242         if [ -z "${NO_TEST_CLEANUP}" ] ; then
243                 rm -rf ${WORK_DIR}
244                 rm -rf ${OUTPUT_DIR}
245         fi
246 }
247
248 #
249 # Run the test.
250 #
251 eval_run()
252 {
253         ensure_workdir
254
255         if [ -z "${TEST_N}" ] ; then
256                 TEST_N=1
257         fi
258
259         N=1
260         while [ ${N} -le ${TEST_N} ] ; do
261                 if ! skip_test ${N} ; then
262                         ( cd ${WORK_DIR} ;
263                           exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
264                           run_test ${N}
265                           echo $? >${OUTPUT_DIR}/status.${N}
266                         )
267                 fi
268                 N=$((N + 1))
269         done
270 }
271
272 #
273 # Default run_test() function.  It can be replaced by the
274 # user specified regression test. The argument to this function is
275 # the test number.
276 #
277 run_test()
278 {
279         eval args=\${TEST_${1}-test${1}}
280         ${MAKE_PROG} $args
281 }
282
283 #
284 # Show test results.
285 #
286 eval_show()
287 {
288         ensure_workdir
289
290         if [ -z "${TEST_N}" ] ; then
291                 TEST_N=1
292         fi
293
294         N=1
295         while [ ${N} -le ${TEST_N} ] ; do
296                 if ! skip_test ${N} ; then
297                         echo "=== Test ${N} Status =================="
298                         cat ${OUTPUT_DIR}/status.${N}
299                         echo ".......... Stdout .................."
300                         cat ${OUTPUT_DIR}/stdout.${N}
301                         echo ".......... Stderr .................."
302                         cat ${OUTPUT_DIR}/stderr.${N}
303                 fi
304                 N=$((N + 1))
305         done
306 }
307
308 #
309 # Compare results with expected results
310 #
311 eval_compare()
312 {
313         ensure_workdir
314         ensure_run
315
316         if [ -z "${TEST_N}" ] ; then
317                 TEST_N=1
318         fi
319
320         echo "1..${TEST_N}"
321         N=1
322         while [ ${N} -le ${TEST_N} ] ; do
323                 fail=
324                 todo=
325                 if ! skip_test ${N} ; then
326                         do_compare stdout ${N} || fail="${fail}stdout "
327                         do_compare stderr ${N} || fail="${fail}stderr "
328                         do_compare status ${N} || fail="${fail}status "
329                         eval todo=\${TEST_${N}_TODO}
330                 fi
331                 if [ ! -z "$fail" ]; then
332                         echo -n "not "
333                 fi
334                 echo -n "ok ${N} ${SUBDIR}/${N}"
335                 if [ ! -z "$fail" -o ! -z "$todo" ]; then
336                         echo -n " # "
337                 fi
338                 if [ ! -z "$todo" ] ; then
339                         echo -n "TODO $todo; "
340                 fi
341                 if [ ! -z "$fail" ] ; then
342                         echo "reason: ${fail}"
343                 fi
344                 echo
345                 N=$((N + 1))
346         done
347 }
348
349 #
350 # Check if the test result is the same as the expected result.
351 #
352 # $1    Input file
353 # $2    Test number
354 #
355 do_compare()
356 {
357         local EXPECTED RESULT
358         EXPECTED="expected.$1.$2"
359         RESULT="${OUTPUT_DIR}/$1.$2"
360
361         if [ -f $EXPECTED ]; then
362                 diff -q $EXPECTED $RESULT 1>/dev/null 2>/dev/null
363                 return $?
364         else
365                 return 1        # FAIL
366         fi
367 }
368
369 #
370 # Diff current and expected results
371 #
372 eval_diff()
373 {
374         ensure_workdir
375         ensure_run
376
377         if [ -z "${TEST_N}" ] ; then
378                 TEST_N=1
379         fi
380
381         N=1
382         while [ ${N} -le ${TEST_N} ] ; do
383                 if ! skip_test ${N} ; then
384                         FAIL=
385                         do_diff stdout ${N}
386                         do_diff stderr ${N}
387                         do_diff status ${N}
388                 fi
389                 N=$((N + 1))
390         done
391 }
392
393 #
394 # Check if the test result is the same as the expected result.
395 #
396 # $1    Input file
397 # $2    Test number
398 #
399 do_diff()
400 {
401         local EXPECTED RESULT
402         EXPECTED="expected.$1.$2"
403         RESULT="${OUTPUT_DIR}/$1.$2"
404
405         echo diff -u $EXPECTED $RESULT
406         if [ -f $EXPECTED ]; then
407                 diff -u $EXPECTED $RESULT
408         else
409                 echo "${EXPECTED} does not exist"
410         fi
411 }
412
413 #
414 # Update expected results
415 #
416 eval_update()
417 {
418         ensure_workdir
419         ensure_run
420
421         if [ -z "${TEST_N}" ] ; then
422                 TEST_N=1
423         fi
424
425         FAIL=
426         N=1
427         while [ ${N} -le ${TEST_N} ] ; do
428                 if ! skip_test ${N} ; then
429                         cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
430                         cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
431                         cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
432                 fi
433                 N=$((N + 1))
434         done
435 }
436
437 #
438 # Print description
439 #
440 eval_desc()
441 {
442         echo "${SUBDIR}: ${DESC}"
443 }
444
445 #
446 # Run the test
447 #
448 eval_test()
449 {
450         eval_setup
451         eval_run
452         eval_compare
453 }
454
455 #
456 # Run the test for prove(1)
457 #
458 eval_prove()
459 {
460         eval_setup
461         eval_run
462         eval_compare
463         eval_clean
464 }
465
466 #
467 # Main function. Execute the command(s) on the command line.
468 #
469 eval_cmd()
470 {
471         if [ $# -eq 0 ] ; then
472                 # if no arguments given default to 'prove'
473                 set -- prove
474         fi
475
476         for i
477         do
478                 case $i in
479
480                 setup | run | compare | diff | clean | reset | show | \
481                 test | prove | desc | update)
482                         eval eval_$i
483                         ;;
484                 * | help)
485                         print_usage
486                         ;;
487                 esac
488         done
489 }
490
491 ##############################################################################
492 #
493 # Main code
494 #
495
496 #
497 # Parse command line arguments.
498 #
499 args=`getopt m:w:v $*`
500 if [ $? != 0 ]; then
501         echo 'Usage: ...'
502         exit 2
503 fi
504 set -- $args
505 for i; do
506         case "$i" in
507         -m)
508                 MAKE_PROG="$2"
509                 shift
510                 shift
511                 ;;
512         -w)
513                 WORK_BASE="$2"
514                 shift
515                 shift
516                 ;;
517         -v)
518                 VERBOSE=1
519                 shift
520                 ;;
521         --)
522                 shift
523                 break
524                 ;;
525         esac
526 done
527
528 #
529 # Determine our sub-directory. Argh.
530 #
531 SRC_DIR=`pwd`
532 SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
533 SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
534
535 #
536 # Construct working directory
537 #
538 WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
539 WORK_DIR=${WORK_BASE}/${SUBDIR}
540 OUTPUT_DIR=${WORK_DIR}.OUTPUT
541
542 #
543 # Make to use
544 #
545 MAKE_PROG=${MAKE_PROG:-/usr/bin/make}