]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - tools/regression/usr.bin/make/common.sh
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / tools / regression / usr.bin / make / common.sh
1 #!/bin/sh
2 #
3 # Common code used run regression tests for usr.bin/make.
4 #
5 # $FreeBSD$
6
7 #
8 # Output a message and exit with an error.
9 #
10 fatal()
11 {
12         echo "fatal: $*" >/dev/stderr
13         exit 1
14 }
15
16 #
17 # Check whether the working directory exists - it must.
18 #
19 ensure_workdir()
20 {
21         if [ ! -d ${WORK_DIR} ] ; then
22                 fatal "working directory ${WORK_DIR} does not exist."
23         fi
24 }
25
26 #
27 # Make sure all tests have been run
28 #
29 ensure_run()
30 {
31         if [ -z "${TEST_N}" ] ; then
32                 TEST_N=1
33         fi
34
35         FAIL=
36         N=1
37         while [ ${N} -le ${TEST_N} ] ; do
38                 if ! skip_test ${N} ; then
39                         if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
40                              ! -f ${OUTPUT_DIR}/stdout.${N} -o \
41                              ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
42                                 echo "Test ${SUBDIR}/${N} no yet run"
43                                 FAIL=yes
44                         fi
45                 fi
46                 N=$((N + 1))
47         done
48
49         if [ ! -z "${FAIL}" ] ; then
50                 exit 1
51         fi
52 }
53
54 #
55 # Output usage messsage.
56 #
57 print_usage()
58 {
59         echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
60         echo " setup    - setup working directory"
61         echo " run      - run the tests"
62         echo " show     - show test results"
63         echo " compare  - compare actual and expected results"
64         echo " diff     - diff actual and expected results"
65         echo " reset    - reset the test to its initial state"
66         echo " clean    - delete working and output directory"
67         echo " test     - setup + run + compare"
68         echo " prove    - setup + run + compare + clean"
69         echo " desc     - print short description"
70         echo " update   - update the expected results with the current results"
71         echo " help     - show this information"
72 }
73
74 #
75 # Return 0 if we should skip the test. 1 otherwise
76 #
77 skip_test()
78 {
79         eval skip=\${TEST_${1}_SKIP}
80         if [ -z "${skip}" ] ; then
81                 return 1
82         else
83                 return 0
84         fi
85 }
86
87 #
88 # Common function for setup and reset.
89 #
90 common_setup()
91 {
92         #
93         # If a Makefile exists in the source directory - copy it over
94         #
95         if [ -e Makefile -a ! -e ${WORK_DIR}/Makefile ] ; then
96                 cp Makefile ${WORK_DIR}/Makefile
97         fi
98
99         #
100         # If the TEST_MAKE_DIRS variable is set, create those directories
101         #
102         set -- ${TEST_MAKE_DIRS}
103         while [ $# -ne 0 ] ; do
104                 if [ ! -d ${WORK_DIR}/${1} ] ; then
105                         mkdir -p -m ${2} ${WORK_DIR}/${1}
106                 else
107                         chmod ${2} ${WORK_DIR}/${1}
108                 fi
109                 shift ; shift
110         done
111
112         #
113         # If the TEST_COPY_FILES variable is set, copy those files over to
114         # the working directory. The value is assumed to be pairs of
115         # filenames and modes.
116         #
117         set -- ${TEST_COPY_FILES}
118         while [ $# -ne 0 ] ; do
119                 if [ ! -e ${WORK_DIR}/${1} ] ; then
120                         cp ${1} ${WORK_DIR}/${1}
121                 fi
122                 chmod ${2} ${WORK_DIR}/${1}
123                 shift ; shift
124         done
125
126         #
127         # If the TEST_TOUCH variable is set, it is taken to be a list
128         # of pairs of filenames and arguments to touch(1). The arguments
129         # to touch must be surrounded by single quotes if there are more
130         # than one argument.
131         #
132         eval set -- ${TEST_TOUCH}
133         while [ $# -ne 0 ] ; do
134                 eval touch ${2} ${WORK_DIR}/${1}
135                 shift ; shift
136         done
137
138         #
139         # Now create links
140         #
141         eval set -- ${TEST_LINKS}
142         while [ $# -ne 0 ] ; do
143                 eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
144                 shift ; shift
145         done
146 }
147
148 #
149 # Setup the test. This creates the working and output directories and
150 # populates it with files. If there is a setup_test() function - call it.
151 #
152 eval_setup()
153 {
154         #
155         # Check whether the working directory exists. If it does exit
156         # fatally so that we don't clobber a test the user is working on.
157         #
158         if [ -d ${WORK_DIR} ] ; then
159                 fatal "working directory ${WORK_DIR} already exists."
160         fi
161
162         #
163         # Now create it and the output directory
164         #
165         mkdir -p ${WORK_DIR}
166         rm -rf ${OUTPUT_DIR}
167         mkdir -p ${OUTPUT_DIR}
168
169         #
170         # Common stuff
171         #
172         common_setup
173
174         #
175         # Now after all execute the user's setup function if it exists.
176         #
177         setup_test
178 }
179
180 #
181 # Default setup_test function does nothing. This may be overriden by
182 # the test.
183 #
184 setup_test()
185 {
186 }
187
188 #
189 # Reset the test. Here we need to rely on information from the test.
190 # We executed the same steps as in the setup, by try not to clobber existing
191 # files.
192 # All files and directories that are listed on the TEST_CLEAN_FILES
193 # variable are removed. Then the TEST_TOUCH list is executed and finally
194 # the reset_test() function called if it exists.
195 #
196 eval_reset()
197 {
198         ensure_workdir
199
200         #
201         # Clean the output directory
202         #
203         rm -rf ${OUTPUT_DIR}/*
204
205         #
206         # Common stuff
207         #
208         common_setup
209
210         #
211         # Remove files.
212         #
213         for f in ${TEST_CLEAN_FILES} ; do
214                 rm -rf ${WORK_DIR}/${f}
215         done
216
217         #
218         # Execute test's function
219         #
220         reset_test
221 }
222
223 #
224 # Default reset_test function does nothing. This may be overriden by
225 # the test.
226 #
227 reset_test()
228 {
229 }
230
231 #
232 # Clean the test. This simply removes the working and output directories.
233 #
234 eval_clean()
235 {
236         #
237         # If you have special cleaning needs, provide a 'cleanup' shell script.
238         #
239         if [ -n "${TEST_CLEANUP}" ] ; then
240                 . ${SRC_DIR}/cleanup
241         fi
242         rm -rf ${WORK_DIR}
243         rm -rf ${OUTPUT_DIR}
244 }
245
246 #
247 # Run the test.
248 #
249 eval_run()
250 {
251         ensure_workdir
252
253         if [ -z "${TEST_N}" ] ; then
254                 TEST_N=1
255         fi
256
257         N=1
258         while [ ${N} -le ${TEST_N} ] ; do
259                 if ! skip_test ${N} ; then
260                         ( cd ${WORK_DIR} ;
261                           exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
262                           run_test ${N}
263                           echo $? >${OUTPUT_DIR}/status.${N}
264                         )
265                 fi
266                 N=$((N + 1))
267         done
268 }
269
270 #
271 # Default run_test() function.  It can be replaced by the
272 # user specified regression test. The argument to this function is
273 # the test number.
274 #
275 run_test()
276 {
277         eval args=\${TEST_${1}-test${1}}
278         ${MAKE_PROG} $args
279 }
280
281 #
282 # Show test results.
283 #
284 eval_show()
285 {
286         ensure_workdir
287
288         if [ -z "${TEST_N}" ] ; then
289                 TEST_N=1
290         fi
291
292         N=1
293         while [ ${N} -le ${TEST_N} ] ; do
294                 if ! skip_test ${N} ; then
295                         echo "=== Test ${N} Status =================="
296                         cat ${OUTPUT_DIR}/status.${N}
297                         echo ".......... Stdout .................."
298                         cat ${OUTPUT_DIR}/stdout.${N}
299                         echo ".......... Stderr .................."
300                         cat ${OUTPUT_DIR}/stderr.${N}
301                 fi
302                 N=$((N + 1))
303         done
304 }
305
306 #
307 # Compare results with expected results
308 #
309 eval_compare()
310 {
311         ensure_workdir
312         ensure_run
313
314         if [ -z "${TEST_N}" ] ; then
315                 TEST_N=1
316         fi
317
318         echo "1..${TEST_N}"
319         N=1
320         while [ ${N} -le ${TEST_N} ] ; do
321                 fail=
322                 todo=
323                 if ! skip_test ${N} ; then
324                         do_compare stdout ${N} || fail="${fail}stdout "
325                         do_compare stderr ${N} || fail="${fail}stderr "
326                         do_compare status ${N} || fail="${fail}status "
327                         eval todo=\${TEST_${N}_TODO}
328                 fi
329                 if [ ! -z "$fail" ]; then
330                         echo -n "not "
331                 fi
332                 echo -n "ok ${N} ${SUBDIR}/${N}"
333                 if [ ! -z "$fail" -o ! -z "$todo" ]; then
334                         echo -n " # "
335                 fi
336                 if [ ! -z "$todo" ] ; then
337                         echo -n "TODO $todo; "
338                 fi
339                 if [ ! -z "$fail" ] ; then
340                         echo "reason: ${fail}"
341                 fi
342                 echo
343                 N=$((N + 1))
344         done
345 }
346
347 #
348 # Check if the test result is the same as the expected result.
349 #
350 # $1    Input file
351 # $2    Test number
352 #
353 do_compare()
354 {
355         local EXPECTED RESULT
356         EXPECTED="expected.$1.$2"
357         RESULT="${OUTPUT_DIR}/$1.$2"
358
359         if [ -f $EXPECTED ]; then
360                 diff -q $EXPECTED $RESULT 1>/dev/null 2>/dev/null
361                 return $?
362         else
363                 return 1        # FAIL
364         fi
365 }
366
367 #
368 # Diff current and expected results
369 #
370 eval_diff()
371 {
372         ensure_workdir
373         ensure_run
374
375         if [ -z "${TEST_N}" ] ; then
376                 TEST_N=1
377         fi
378
379         N=1
380         while [ ${N} -le ${TEST_N} ] ; do
381                 if ! skip_test ${N} ; then
382                         FAIL=
383                         do_diff stdout ${N}
384                         do_diff stderr ${N}
385                         do_diff status ${N}
386                 fi
387                 N=$((N + 1))
388         done
389 }
390
391 #
392 # Check if the test result is the same as the expected result.
393 #
394 # $1    Input file
395 # $2    Test number
396 #
397 do_diff()
398 {
399         local EXPECTED RESULT
400         EXPECTED="expected.$1.$2"
401         RESULT="${OUTPUT_DIR}/$1.$2"
402
403         echo diff -u $EXPECTED $RESULT
404         if [ -f $EXPECTED ]; then
405                 diff -u $EXPECTED $RESULT
406         else
407                 echo "${EXPECTED} does not exist"
408         fi
409 }
410
411 #
412 # Update expected results
413 #
414 eval_update()
415 {
416         ensure_workdir
417         ensure_run
418
419         if [ -z "${TEST_N}" ] ; then
420                 TEST_N=1
421         fi
422
423         FAIL=
424         N=1
425         while [ ${N} -le ${TEST_N} ] ; do
426                 if ! skip_test ${N} ; then
427                         cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
428                         cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
429                         cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
430                 fi
431                 N=$((N + 1))
432         done
433 }
434
435 #
436 # Print description
437 #
438 eval_desc()
439 {
440         echo "${SUBDIR}: ${DESC}"
441 }
442
443 #
444 # Run the test
445 #
446 eval_test()
447 {
448         eval_setup
449         eval_run
450         eval_compare
451 }
452
453 #
454 # Run the test for prove(1)
455 #
456 eval_prove()
457 {
458         eval_setup
459         eval_run
460         eval_compare
461         eval_clean
462 }
463
464 #
465 # Main function. Execute the command(s) on the command line.
466 #
467 eval_cmd()
468 {
469         if [ $# -eq 0 ] ; then
470                 # if no arguments given default to 'prove'
471                 set -- prove
472         fi
473
474         for i
475         do
476                 case $i in
477
478                 setup | run | compare | diff | clean | reset | show | \
479                 test | prove | desc | update)
480                         eval eval_$i
481                         ;;
482                 * | help)
483                         print_usage
484                         ;;
485                 esac
486         done
487 }
488
489 ##############################################################################
490 #
491 # Main code
492 #
493
494 #
495 # Parse command line arguments.
496 #
497 args=`getopt m:w:v $*`
498 if [ $? != 0 ]; then
499         echo 'Usage: ...'
500         exit 2
501 fi
502 set -- $args
503 for i; do
504         case "$i" in
505         -m)
506                 MAKE_PROG="$2"
507                 shift
508                 shift
509                 ;;
510         -w)
511                 WORK_BASE="$2"
512                 shift
513                 shift
514                 ;;
515         -v)
516                 VERBOSE=1
517                 shift
518                 ;;
519         --)
520                 shift
521                 break
522                 ;;
523         esac
524 done
525
526 #
527 # Determine our sub-directory. Argh.
528 #
529 SRC_DIR=`pwd`
530 SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
531 SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
532
533 #
534 # Construct working directory
535 #
536 WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
537 WORK_DIR=${WORK_BASE}/${SUBDIR}
538 OUTPUT_DIR=${WORK_DIR}.OUTPUT
539
540 #
541 # Make to use
542 #
543 MAKE_PROG=${MAKE_PROG:-/usr/bin/make}