Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update test reporting #428

Merged
merged 1 commit into from
Apr 10, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
update test reporting
  • Loading branch information
apcraig committed Apr 3, 2020
commit 0d9278f573b3a990aacec26e8c8749e345dda290
51 changes: 36 additions & 15 deletions cice.setup
Original file line number Diff line number Diff line change
Expand Up @@ -941,9 +941,6 @@ EOF2
exit -1
endif

# # Initial test_output file
# echo "#---" >! test_output
# echo "PEND ${testname_noid} " >> test_output
endif

#------------------------------------------------------------
Expand Down Expand Up @@ -1004,31 +1001,55 @@ EOF0
# Add code to results.csh to count the number of failures
cat >> ${tsdir}/results.csh << EOF
cat ./results.log
set pends = \`cat ./results.log | grep PEND | wc -l\`
set failures = \`cat ./results.log | grep FAIL | wc -l\`
set success = \`cat ./results.log | grep 'PASS\|COPY' | wc -l\`
set comments = \`cat ./results.log | grep "#" | wc -l\`
set alltotal = \`cat ./results.log | wc -l\`
set pends = \`cat ./results.log | grep PEND | wc -l\`
set misses = \`cat ./results.log | grep MISS | wc -l\`
set failures = \`cat ./results.log | grep FAIL | wc -l\`
set failbuild = \`cat ./results.log | grep FAIL | grep " build " | wc -l\`
set failrun = \`cat ./results.log | grep FAIL | grep " run " | wc -l\`
set failtest = \`cat ./results.log | grep FAIL | grep " test " | wc -l\`
set failcomp = \`cat ./results.log | grep FAIL | grep " compare " | wc -l\`
set failbfbc = \`cat ./results.log | grep FAIL | grep " bfbcomp " | wc -l\`
set failgen = \`cat ./results.log | grep FAIL | grep " generate " | wc -l\`
set success = \`cat ./results.log | grep 'PASS\|COPY' | wc -l\`
set comments = \`cat ./results.log | grep "#" | wc -l\`
set alltotal = \`cat ./results.log | wc -l\`
@ total = \$alltotal - \$comments
@ chkcnt = \$pends + \$misses + \$failures + \$success

echo "#------- " >> results.log
echo " " >> results.log
echo "#totl = \$total" >> results.log
echo "#totl = \$total total" >> results.log
echo "#chkd = \$chkcnt checked" >> results.log
echo "#pass = \$success" >> results.log
echo "#fail = \$failures" >> results.log
echo "#pend = \$pends" >> results.log
echo "#miss = \$misses" >> results.log
echo "#fail = \$failures" >> results.log
echo " #failbuild = \$failbuild" >> results.log
echo " #failrun = \$failrun" >> results.log
echo " #failtest = \$failtest" >> results.log
echo " #failcomp = \$failcomp" >> results.log
echo " #failbfbc = \$failbfbc" >> results.log
echo " #failgen = \$failgen" >> results.log

echo ""
echo "Descriptors:"
echo " PASS - successful completion"
echo " COPY - previously compiled code was copied for new test"
echo " MISS - comparison data is missing"
echo " PEND - run has been submitted to queue and is waiting or failed submission"
echo " FAIL - test is still executing, did not complete, or completed and failed"
echo " PEND - status is undertermined; test may still be queued, running, or timed out"
echo " FAIL - test failed"
echo ""
echo "\$success of \$total tests PASSED"
echo "\$failures of \$total tests FAILED"
echo "\$pends of \$total tests PENDING"
echo "\$chkcnt measured results of \$total total results"
echo "\$success of \$chkcnt tests PASSED"
echo "\$pends of \$chkcnt tests PENDING"
echo "\$misses of \$chkcnt tests MISSING data"
echo "\$failures of \$chkcnt tests FAILED"
#echo " \$failbuild of \$failures FAILED build"
#echo " \$failrun of \$failures FAILED run"
#echo " \$failtest of \$failures FAILED test"
#echo " \$failcomp of \$failures FAILED compare"
#echo " \$failbfbc of \$failures FAILED bfbcomp"
#echo " \$failgen of \$failures FAILED generate"
exit \$failures
EOF

Expand Down
7 changes: 7 additions & 0 deletions configuration/scripts/cice.test.setup.csh
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,13 @@ if ( ! -f ${ICE_RUNDIR}/cice ) then
exit 99
endif

# Initial test results and Reset test results for rerun
mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev
echo "#---" >! ${ICE_CASEDIR}/test_output
cat ${ICE_CASEDIR}/test_output.prev | grep -i "${ICE_TESTNAME} build" >> ${ICE_CASEDIR}/test_output
echo "PEND ${ICE_TESTNAME} run" >> ${ICE_CASEDIR}/test_output
rm -f ${ICE_CASEDIR}/test_output.prev

EOF2

if ( -f ${ICE_SCRIPTS}/tests/test_${ICE_TEST}.script) then
Expand Down
31 changes: 27 additions & 4 deletions configuration/scripts/tests/report_results.csh
Original file line number Diff line number Diff line change
@@ -1,5 +1,19 @@
#!/bin/csh -f

if ($#argv == 0) then
echo "${0}: Running results.csh"
./results.csh >& /dev/null
else if ($#argv == 1) then
if ("$argv[1]" =~ "-n") then
#continue
else
echo "$0 usage:"
echo "$0 [-n]"
echo " -n : do NOT run results.csh (by default it does)"
exit -1
endif
endif

if (! -e results.log) then
echo " "
echo "${0}: ERROR results.log does not exist, try running results.csh"
Expand Down Expand Up @@ -79,12 +93,21 @@ unset noglob

foreach compiler ( ${compilers} )

set ofile = "${shhash}.${mach}.${compiler}.${xcdat}.${xctim}"
set outfile = "${wikiname}/${tsubdir}/${ofile}.md"
set cnt = 0
set found = 1
while ($found == 1)
set ofile = "${shhash}.${mach}.${compiler}.${xcdat}.${xctim}.$cnt"
set outfile = "${wikiname}/${tsubdir}/${ofile}.md"
if (-e ${outfile}) then
@ cnt = $cnt + 1
else
set found = 0
endif
end

mkdir -p ${wikiname}/${tsubdir}
echo "${0}: writing to ${outfile}"

if (-e ${outfile}) rm -f ${outfile}

cat >! ${outfile} << EOF

Expand All @@ -103,7 +126,7 @@ EOF
foreach case ( ${cases} )
if ( ${case} =~ *_${compiler}_* ) then

# check thata case results are meaningful
# check that case results are meaningful
set fbuild = `grep " ${case} " results.log | grep " build" | cut -c 1-4`
set frun = `grep " ${case} " results.log | grep " run" | cut -c 1-4`
set ftest = `grep " ${case} " results.log | grep " test" | cut -c 1-4`
Expand Down
5 changes: 0 additions & 5 deletions configuration/scripts/tests/test_logbfb.script
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,6 @@
# This is identical to a smoke test, but triggers bfbcompare with log files instead of restarts
# cice.run returns -1 if run did not complete successfully

mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev
cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output
rm -f ${ICE_CASEDIR}/test_output.prev
echo "RUN ${ICE_TESTNAME} run " >> ${ICE_CASEDIR}/test_output

./cice.run
set res="$status"

Expand Down
10 changes: 0 additions & 10 deletions configuration/scripts/tests/test_restart.script
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,6 @@ cp ice_in ice_in.0
${ICE_CASEDIR}/casescripts/parse_namelist.sh ice_in ${ICE_CASEDIR}/casescripts/test_nml.restart1
cp ice_in ice_in.1

mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev
cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output
mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev
cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} test" >! ${ICE_CASEDIR}/test_output
rm -f ${ICE_CASEDIR}/test_output.prev
echo "RUN ${ICE_TESTNAME} run " >> ${ICE_CASEDIR}/test_output
echo "PEND ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output

./cice.run
set res="$status"

Expand All @@ -27,8 +19,6 @@ if ( $res != 0 ) then
echo "FAIL ${ICE_TESTNAME} run" >> ${ICE_CASEDIR}/test_output
echo "FAIL ${ICE_TESTNAME} test " >> ${ICE_CASEDIR}/test_output
exit 99
else
echo "PASS ${ICE_TESTNAME} initialrun" >> ${ICE_CASEDIR}/test_output
endif

# Prepend 'base_' to the final restart file to save for comparison
Expand Down
5 changes: 0 additions & 5 deletions configuration/scripts/tests/test_smoke.script
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,6 @@
# Run the CICE model
# cice.run returns -1 if run did not complete successfully

mv -f ${ICE_CASEDIR}/test_output ${ICE_CASEDIR}/test_output.prev
cat ${ICE_CASEDIR}/test_output.prev | grep -iv "${ICE_TESTNAME} run" >! ${ICE_CASEDIR}/test_output
rm -f ${ICE_CASEDIR}/test_output.prev
echo "RUN ${ICE_TESTNAME} run " >> ${ICE_CASEDIR}/test_output

./cice.run
set res="$status"

Expand Down
6 changes: 5 additions & 1 deletion doc/source/user_guide/ug_testing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,11 @@ To post results, once a test suite is complete, run ``results.csh`` and
./results.csh
./report_results.csh

The reporting can also be automated by adding ``--report`` to ``cice.setup``
``report_results.csh`` will run ``results.csh`` by default automatically, but
we recommmend running it manually first to verify results before publishing
them. ``report_results.csh -n`` will turn off automatic running of ``results.csh``.

The reporting can also be automated in a test suite by adding ``--report`` to ``cice.setup``
::

./cice.setup --suite base_suite --mach conrad --env cray --testid v01a --report
Expand Down