Index: Makefile ================================================================== --- Makefile +++ Makefile @@ -154,16 +154,16 @@ $(PREFIX)/bin/newdashboard $(PREFIX)/bin/mdboard $(PREFIX)/bin/.$(ARCHSTR) : mkdir -p $(PREFIX)/bin/.$(ARCHSTR) -test: tests/tests.scm tests/.fslckout - cd tests;csi -I .. -b -n tests.scm +test: ext-tests/.fslckout + cd ext-tests;csi -I .. -b -n tests.scm -tests/.fslckout tests/tests.scm : $(MTQA_FOSSIL) - mkdir -p tests - cd tests;fossil open --nested $(MTQA_FOSSIL) +ext-tests/.fslckout : $(MTQA_FOSSIL) + mkdir -p ext-tests + cd ext-tests;fossil open --nested $(MTQA_FOSSIL) $(MTQA_FOSSIL) : fossil clone https://www.kiatoa.com/fossils/megatest_qa $(MTQA_FOSSIL) clean : ADDED tests/Makefile Index: tests/Makefile ================================================================== --- /dev/null +++ tests/Makefile @@ -0,0 +1,211 @@ +# +# run some tests + +BINPATH = $(shell readlink -m $(PWD)/../bin) +MEGATEST = $(BINPATH)/megatest +DASHBOARD = $(BINPATH)/dashboard +PATH := $(BINPATH):$(PATH) +RUNNAME := $(shell date +w%V.%u.%H.%M) +IPADDR := "-" +RUNID := 1 +SERVER = +DEBUG = 1 +LOGGING = +ROWS = 20 + +OS = $(shell grep ID /etc/*-release|cut -d= -f2) +FS = $(shell df -T .|tail -1|awk '{print $$2}') +VER = $(shell fsl info|grep checkout|awk '{print $$2}'|cut -c 1-5) + +# The NEWTARGET causes some tests to fail. Do not use until this is fixed. +NEWTARGET = "$(OS)/$(FS)/$(VER)" +TARGET = "ubuntu/nfs/none" + +all : build unit test1 test2 test3 test4 test5 test6 test7 test8 test9 + +unit : basicserver.log runs.log misc.log + +rel : + cd release;dashboard -rows 25 & + +## basicserver.log : unittests/basicserver.scm +## script -c "./rununittest.sh basicserver $(DEBUG)" basicserver.log + +%.log : build unittests/%.scm + script -c "./rununittest.sh $* $(DEBUG)" $*.log + if logpro unit.logpro $*.html < $*.log > /dev/null;then echo ALLPASS;else echo ALLFAIL;mv $*.log $*.log.FAIL;fi + +server : + cd fullrun;$(MEGATEST) -server - -debug $(DEBUG) -run-id $(RUNID) + +stopserver : + cd fullrun;$(MEGATEST) -stop-server 0 + +repl : + cd fullrun;$(MEGATEST) -:b -repl + +test0 : cleanprep + cd simplerun ; $(MEGATEST) -server - -debug $(DEBUG) + +test1 : cleanprep + +test2 : fullprep + cd fullrun;$(MEGATEST) -preclean -runtests ez_pass,runfirst/a/% -reqtarg ubuntu/nfs/none :runname $(RUNNAME) -debug $(DEBUG) $(LOGGING) + cd fullrun;megatest -preclean -runtests % -target ubuntu/nfs/none :runname $(RUNNAME)_01 -testpatt %/,%/ai -debug $(DEBUG) + cd fullrun;megatest -preclean -runtests %/,%/ai -target ubuntu/nfs/none :runname $(RUNAME)_02 -debug $(DEBUG) + cd fullrun;megatest -preclean -runtests runfirst/%,%/ai -target ubuntu/nfs/none :runname $(RUNNAME)_02 -debug $(DEBUG) + cd fullrun;megatest -runtests %/,%/winter -target ubuntu/nfs/none :runname $(RUNNAME)_03 -debug $(DEBUG) + sleep 40;cd fullrun;megatest -target ubuntu/nfs/none :runname $(RUNNAME) -set-state-status COMPLETED,FORCED :state COMPLETED :status PASS -testpatt ez_p%s,runfirst/ -debug $(DEBUG) $(LOGGING) + + +test3 : fullprep test3a test3b + +test3a : + @echo Run runfirst and any waitons. + cd fullrun;$(MEGATEST) -preclean -runtests runfirst -reqtarg ubuntu/nfs/none :runname $(RUNNAME)_b + +test3b : + @echo Run all_toplevel and all waitons + cd fullrun;$(MEGATEST) -preclean -runtests all_toplevel -reqtarg ubuntu/nfs/none :runname $(RUNNAME)_c + +test4 : cleanprep + @echo "WARNING: No longer running fullprep, test converage may be lessened" + cd fullrun;time $(MEGATEST) -debug $(DEBUG) -run-wait -runtests % -reqtarg ubuntu/nfs/none :runname $(RUNNAME)_b -m "This is a comment specific to a run" -v $(LOGGING) + +test4a : cleanprep + cd fullrun;time $(MEGATEST) -debug $(DEBUG) -preclean -runtests all_toplevel -reqtarg ubuntu/nfs/none :runname $(RUNNAME)_b -m "This is a comment specific to a run" -v $(LOGGING) + +# NOTE: Only one instance can be a server +test5 : cleanprep + rm -f fullrun/a*.log fullrun/logs/* + @echo "WARNING: No longer running fullprep, test converage may be lessened" + cd fullrun;sleep 0;$(MEGATEST) -preclean -runtests % -target $(TARGET) :runname $(RUNNAME)_aa -debug $(DEBUG) $(LOGGING) > aa.log 2> aa.log & + cd fullrun;sleep 0;$(MEGATEST) -preclean -runtests % -target ubuntu/nfs/sleep1 :runname $(RUNNAME)_ae -debug $(DEBUG) $(LOGGING) > ae.log 2> ae.log & + cd fullrun;sleep 0;$(MEGATEST) -preclean -runtests % -target ubuntu/nfs/sleep10 :runname $(RUNNAME)_ab -debug $(DEBUG) $(LOGGING) > ab.log 2> ab.log & + cd fullrun;sleep 5;$(MEGATEST) -preclean -runtests % -target ubuntu/nfs/sleep60 :runname $(RUNNAME)_ac -debug $(DEBUG) $(LOGGING) > ac.log 2> ac.log & + cd fullrun;sleep 8;$(MEGATEST) -preclean -runtests % -target ubuntu/nfs/sleep240 :runname $(RUNNAME)_ad -debug $(DEBUG) $(LOGGING) > ad.log 2> ad.log & +# cd fullrun;sleep 0;$(MEGATEST) -preclean -runtests % -target $(TARGET) :runname $(RUNNAME)_af -debug $(DEBUG) $(LOGGING) > af.log 2> af.log & + +# MUST ADD THIS BACK IN ASAP!!!! + # cd fullrun;sleep 10;$(MEGATEST) -run-wait -target $(TARGET) :runname % -testpatt % :state RUNNING,LAUNCHED,NOT_STARTED,REMOTEHOSTSTART;echo ALL DONE + +test6: fullprep + cd fullrun;$(MEGATEST) -preclean -runtests runfirst -testpatt %/1 -reqtarg ubuntu/nfs/none :runname $(RUNNAME)_itempatt -v + cd fullrun;$(MEGATEST) -preclean -runtests runfirst -testpatt %blahha% -reqtarg ubuntu/nfs/none :runname $(RUNNAME)_itempatt -debug 10 + cd fullrun;$(MEGATEST) -rollup :runname newrun -target ubuntu/nfs/none -debug 10 + +test7: + @echo Only a/c testname c should remain. If there is a run a/b/c then there is a cache issue. + cd simplerun;$(DASHBOARD) & + (cd simplerun; \ + $(MEGATEST) -server - -daemonize; \ + $(MEGATEST) -remove-runs -target %/% :runname % -testpatt %; \ + $(MEGATEST) -preclean -runtests % -target a/b :runname c; sleep 5; \ + $(MEGATEST) -remove-runs -target a/c :runname c; \ + $(MEGATEST) -preclean -runtests % -target a/c :runname c; \ + $(MEGATEST) -remove-runs -target a/b :runname c -testpatt % ; \ + $(MEGATEST) -preclean -runtests % -target a/d :runname c;$(MEGATEST) -list-runs %|egrep ^Run:) > test7.log 2> test7.log + logpro test7.logpro test7.html < test7.log + @echo + @echo Run \"firefox test7.html\" to see the results. + +# This one failed with v1.55 +test8a : + cd fullrun;$(MEGATEST) -preclean -runtests priority_10_waiton_1 -target ubuntu/nfs/none :runname $(RUNNAME)_waiton_single + +test8 : test8a + cd fullrun;$(MEGATEST) -preclean -runtests lineitem_fail 1 -target ubuntu/nfs/none :runname $(RUNNAME)_singletest + cd fullrun;$(MEGATEST) -preclean -runtests runfirst/fall 1 -target ubuntu/nfs/none :runname $(RUNNAME)_singleitem + cd fullrun;$(MEGATEST) -preclean -runtests test_mt_vars/2 -target ubuntu/nfs/none :runname $(RUNNAME)_singleitem_waiton + +# Some simple checks for bootstrapping and run loop logic + +test9 : minsetup test9a test9b test9c test9d test9e + +test9a : + @echo Run super-simple mintest e, no waitons. + cd mintest;$(DASHBOARD)& + cd mintest;$(MEGATEST) -preclean -runtests e -target $(VER) -runname $(shell date +%H.%M.%S) -debug $(DEBUG) + +test9b : + @echo Run simple mintest d with one waiton c + cd mintest;$(MEGATEST) -preclean -runtests d -target $(VER) -runname `date +%H.%M.%S` -debug $(DEBUG) + +test9c : + @echo Run mintest a with full waiton chain a -> b -> c -> d -> e + cd mintest;$(MEGATEST) -preclean -runtests a -target $(VER) -runname `date +%H.%M.%S` -debug $(DEBUG) + +test9d : + @echo Run an itemized test with no items + cd mintest;$(MEGATEST) -preclean -runtests g -target $(VER) -runname `date +%H.%M.%S` -debug $(DEBUG) + +test9e : + @echo Run mintest a1 with full waiton chain with d1fail: a1 -> b1 -> c1 -> d1fail -> e1 + cd mintest;$(MEGATEST) -preclean -runtests a1 -target $(VER) -runname `date +%H.%M.%S` -debug $(DEBUG) + +test10 : + @echo Run a bunch of different targets simultaneously + (cd fullrun;$(MEGATEST) -server - ;sleep 2)& + for targ in mint/btrfs/mintdir sunos/sshfs/loc; do \ + (cd fullrun;$(MEGATEST) -preclean -runtests priority_10_waiton_1 -target $$targ :runname $(RUNNAME) &); done + for sys in ubuntu suse redhat debian;do \ + for fs in afs nfs zfs; do \ + for dpath in none tmp; do \ + (cd fullrun;$(MEGATEST) -preclean -runtests priority_10_waiton_1 -target $$sys/$$fs/$$dpath :runname $(RUNNAME) &);\ + done;done;done + +test11 : + cd fullrun;time (for a in 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 10 ;do (megatest -test-paths -target %/%/% > /dev/null ) & done; wait; ) + +build : ../*.scm + cd ..;make -j && make install + touch build + +cleanstart : + if killall mtest -v ;then sleep 5;killall mtest -v -9;fi;true + killall mtest -v;if [ ! $$? ];then sleep 5;killall mtest -v -9;fi + +minsetup : build + mkdir -p mintest/runs mintest/links + cd mintest;$(MEGATEST) -stop-server 0 + cd mintest;$(MEGATEST) -server - -debug $(DEBUG) > server.log 2> server.log & + sleep 3 + cd mintest;$(DASHBOARD) -rows 18 & + +cleanprep : ../*.scm Makefile */*.config build + mkdir -p fullrun/tmp/mt_runs fullrun/tmp/mt_links /tmp/$(USER)/adisk1 + rm -f */logging.db + touch cleanprep + +fullprep : cleanprep + cd fullrun;$(MEGATEST) -remove-runs :runname $(RUNNAME)% -target %/%/% -testpatt %/% + cd fullrun;$(BINPATH)/dashboard -rows 15 & + +dashboard : cleanprep + cd fullrun && $(BINPATH)/dashboard -rows $(ROWS) & + +newdashboard : cleanprep + cd fullrun && $(BINPATH)/newdashboard & + +mdboard : cleanprep + cd fullrun && $(BINPATH)/mdboard & + +remove : + cd fullrun;$(MEGATEST) -remove-runs :runname $(RUN) -testpatt % -itempatt % :sysname % :fsname % :datapath % + +clean : + rm cleanprep + +kill : + killall -v mtest main.sh dboard || true + rm -rf /tmp/.$(USER)-portlogger.db *run/db/* */megatest.db */logging.db */monitor.db fullrun/tmp/mt_*/* fullrun/tmp/mt_*/.db* fullrun/logs/*.log fullrun/*.log || true + killall -v mtest dboard || true + +hardkill : kill + sleep 2;killall -v mtest main.sh dboard -9 + +listservers : + cd fullrun;$(MEGATEST) -list-servers + +runforever : + while(ls); do runname=`date +%F-%R:%S`;(cd fullrun;$(MEGATEST) -runall -target ubuntu/nfs/none :runname $$runname;/home/matt/data/megatest/megatest -runall -target ubuntu/nfs/none :runname $$runname;/home/matt/data/megatest/megatest -runall -target ubuntu/nfs/none :runname $$runname);done ADDED tests/dep-tests/common.testconfig Index: tests/dep-tests/common.testconfig ================================================================== --- /dev/null +++ tests/dep-tests/common.testconfig @@ -0,0 +1,34 @@ +[ezsteps] +delay sleep $SPEED;echo "Delayed $SPEED seconds" + +# lookup table for waitons +# +[std] +genlib setup +test1 genlib +aggregate test1 +test2 aggregate +results test2 + +# simple removes the challenging "aggregate" dependency between test1 and test2. +# and the itempatt irregularity from genlib -> test1 +# +[simple] +test1 setup +test2 test1 +results test2 + +[test_meta] +author matt +owner matt +description This is a common testconfig shared by all the tests + +[logpro] +delay ;; Delay step logpro + (expect:required in "LogFileBody" > 0 "Delayed message" #/Delayed \d+ seconds/) + +reviewed 09/10/2011, by Matt + +[requirements] +mode itemwait + ADDED tests/dep-tests/common_itemstable.testconfig Index: tests/dep-tests/common_itemstable.testconfig ================================================================== --- /dev/null +++ tests/dep-tests/common_itemstable.testconfig @@ -0,0 +1,4 @@ +[itemstable] +VIEW layout layout layout schematic schematic schematic +CELL ntran ptran diode ntran ptran diode + ADDED tests/dep-tests/megatest.config Index: tests/dep-tests/megatest.config ================================================================== --- /dev/null +++ tests/dep-tests/megatest.config @@ -0,0 +1,67 @@ +[fields] +# this field changes the dep tree +DEPS TEXT + +# this field changes the test run time; 0 .. N or random +SPEED TEXT + +[dashboard] +pre-command xterm -geometry 180x20 -e " +post-command |& tee results.log ;echo Press any key to continue;bash -c 'read -n 1 -s'" & +testsort -event_time + +[misc] +home #{shell readlink -f $MT_RUN_AREA_HOME} +parent #{shell readlink -f $MT_RUN_AREA_HOME/..} + +[setup] +linktree #{get misc parent}/links +max_concurrent_jobs 100000 +# It is possible (but not recommended) to override the rsync command used +# to populate the test directories. For test development the following +# example can be useful +# +testcopycmd cp --remove-destination -rsv TEST_SRC_PATH/. TEST_TARG_PATH/. >> TEST_TARG_PATH/mt_launch.log 2>> TEST_TARG_PATH/mt_launch.log + +# or for hard links + +# testcopycmd cp --remove-destination -rlv TEST_SRC_PATH/. TEST_TARG_PATH/. + +# override the logview command +# +logviewer (%MTCMD%) 2> /dev/null > /dev/null + +# override the html viewer launch command +# +# htmlviewercmd firefox -new-window +htmlviewercmd arora + +[env-override] +# MT_XTERM_CMD overrides the terminal command +# MT_XTERM_CMD xterm -bg lightgreen -fg black + +## disks are: +## name host:/path/to/area +## -or- +## name /path/to/area +[disks] +disk0 #{get misc parent}/runs + +#====================================================================== +# Machine flavors +# +# These specify lists of hosts or scripts to use or call for various +# flavors of task. +# +#====================================================================== + +[flavors] + +plain hosts: xena, phoebe +strong command: NBFAKE_HOST=zeus nbfake +arm hosts: cubian + +# Uncomment these to emulate a job queue with a long time (look in bin/sleeprunner for the time) +[jobtools] +launcher nbfake +maxload 2.0 ADDED tests/dep-tests/runconfigs.config Index: tests/dep-tests/runconfigs.config ================================================================== --- /dev/null +++ tests/dep-tests/runconfigs.config @@ -0,0 +1,8 @@ +[default] + +# [DEPS/SPEED] + +[simple/0] + +[std/0] + ADDED tests/dep-tests/tests/aggregate/testconfig Index: tests/dep-tests/tests/aggregate/testconfig ================================================================== --- /dev/null +++ tests/dep-tests/tests/aggregate/testconfig @@ -0,0 +1,4 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + +[requirements] +waiton #{get #{getenv DEPS} aggregate} ADDED tests/dep-tests/tests/genlib/testconfig Index: tests/dep-tests/tests/genlib/testconfig ================================================================== --- /dev/null +++ tests/dep-tests/tests/genlib/testconfig @@ -0,0 +1,8 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + +[itemstable] +VIEWTYPE layout schematic + +[requirements] +waiton #{get #{getenv DEPS} genlib} +# itemmap /.* ADDED tests/dep-tests/tests/results/testconfig Index: tests/dep-tests/tests/results/testconfig ================================================================== --- /dev/null +++ tests/dep-tests/tests/results/testconfig @@ -0,0 +1,5 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + +[requirements] +waiton #{get #{getenv DEPS} results} + ADDED tests/dep-tests/tests/setup/testconfig Index: tests/dep-tests/tests/setup/testconfig ================================================================== --- /dev/null +++ tests/dep-tests/tests/setup/testconfig @@ -0,0 +1,2 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + ADDED tests/dep-tests/tests/test1/testconfig Index: tests/dep-tests/tests/test1/testconfig ================================================================== --- /dev/null +++ tests/dep-tests/tests/test1/testconfig @@ -0,0 +1,11 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + +[include #{getenv MT_RUN_AREA_HOME}/common_itemstable.testconfig] + +[requirements] +waiton #{get #{getenv DEPS} test1} + +# itemmap maps these items back to previous test +# NB// mapping is in reverse - NOT forwards! +# +itemmap /.* ADDED tests/dep-tests/tests/test2/testconfig Index: tests/dep-tests/tests/test2/testconfig ================================================================== --- /dev/null +++ tests/dep-tests/tests/test2/testconfig @@ -0,0 +1,7 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + +[include #{getenv MT_RUN_AREA_HOME}/common_itemstable.testconfig] + +[requirements] +waiton #{get #{getenv DEPS} test2} + ADDED tests/dynamic-waiton-example/common.testconfig Index: tests/dynamic-waiton-example/common.testconfig ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/common.testconfig @@ -0,0 +1,16 @@ +[ezsteps] +delay sleep $SPEED;echo "Delayed $SPEED seconds" + +[requirements] +#{getenv WAITON_#{getenv MT_TEST_NAME}} + +[test_meta] +author matt +owner matt +description This is a common testconfig shared by all the tests + +[logpro] +delay ;; Delay step logpro + (expect:required in "LogFileBody" > 0 "Delayed message" #/Delayed \d+ seconds/) + +reviewed 09/10/2011, by Matt ADDED tests/dynamic-waiton-example/common_itemstable.testconfig Index: tests/dynamic-waiton-example/common_itemstable.testconfig ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/common_itemstable.testconfig @@ -0,0 +1,4 @@ +[itemstable] +VIEW layout layout layout schematic schematic schematic +CELL ntran ptran diode ntran ptran diode + ADDED tests/dynamic-waiton-example/megatest.config Index: tests/dynamic-waiton-example/megatest.config ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/megatest.config @@ -0,0 +1,67 @@ +[fields] +# this field changes the dep tree +DEPS TEXT + +# this field changes the test run time; 0 .. N or random +SPEED TEXT + +[dashboard] +pre-command xterm -geometry 180x20 -e " +post-command |& tee results.log ;echo Press any key to continue;bash -c 'read -n 1 -s'" & +testsort -event_time + +[misc] +home #{shell readlink -f $MT_RUN_AREA_HOME} +parent #{shell readlink -f $MT_RUN_AREA_HOME/..} + +[setup] +linktree #{get misc parent}/links +max_concurrent_jobs 100000 +# It is possible (but not recommended) to override the rsync command used +# to populate the test directories. For test development the following +# example can be useful +# +testcopycmd cp --remove-destination -rsv TEST_SRC_PATH/. TEST_TARG_PATH/. >> TEST_TARG_PATH/mt_launch.log 2>> TEST_TARG_PATH/mt_launch.log + +# or for hard links + +# testcopycmd cp --remove-destination -rlv TEST_SRC_PATH/. TEST_TARG_PATH/. + +# override the logview command +# +logviewer (%MTCMD%) 2> /dev/null > /dev/null + +# override the html viewer launch command +# +# htmlviewercmd firefox -new-window +htmlviewercmd arora + +[env-override] +# MT_XTERM_CMD overrides the terminal command +# MT_XTERM_CMD xterm -bg lightgreen -fg black + +## disks are: +## name host:/path/to/area +## -or- +## name /path/to/area +[disks] +disk0 #{get misc parent}/runs + +#====================================================================== +# Machine flavors +# +# These specify lists of hosts or scripts to use or call for various +# flavors of task. +# +#====================================================================== + +[flavors] + +plain hosts: xena, phoebe +strong command: NBFAKE_HOST=zeus nbfake +arm hosts: cubian + +# Uncomment these to emulate a job queue with a long time (look in bin/sleeprunner for the time) +[jobtools] +launcher nbfake +maxload 2.0 ADDED tests/dynamic-waiton-example/runconfigs.config Index: tests/dynamic-waiton-example/runconfigs.config ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/runconfigs.config @@ -0,0 +1,11 @@ +[default] +WAITON_setup +WAITON_genlib waiton setup +WAITON_test1 waiton genlib +WAITON_aggregate waiton test1 +WAITON_test2 waiton aggregate + +# [DEPS/SPEED] + +[std/0] + ADDED tests/dynamic-waiton-example/tests/aggregate/testconfig Index: tests/dynamic-waiton-example/tests/aggregate/testconfig ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/tests/aggregate/testconfig @@ -0,0 +1,2 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + ADDED tests/dynamic-waiton-example/tests/genlib/testconfig Index: tests/dynamic-waiton-example/tests/genlib/testconfig ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/tests/genlib/testconfig @@ -0,0 +1,5 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + +[itemstable] +VIEWTYPE layout schematic + ADDED tests/dynamic-waiton-example/tests/results/testconfig Index: tests/dynamic-waiton-example/tests/results/testconfig ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/tests/results/testconfig @@ -0,0 +1,2 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + ADDED tests/dynamic-waiton-example/tests/setup/testconfig Index: tests/dynamic-waiton-example/tests/setup/testconfig ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/tests/setup/testconfig @@ -0,0 +1,2 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + ADDED tests/dynamic-waiton-example/tests/test1/testconfig Index: tests/dynamic-waiton-example/tests/test1/testconfig ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/tests/test1/testconfig @@ -0,0 +1,3 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + +[include #{getenv MT_RUN_AREA_HOME}/common_itemstable.testconfig] ADDED tests/dynamic-waiton-example/tests/test2/testconfig Index: tests/dynamic-waiton-example/tests/test2/testconfig ================================================================== --- /dev/null +++ tests/dynamic-waiton-example/tests/test2/testconfig @@ -0,0 +1,3 @@ +[include #{getenv MT_RUN_AREA_HOME}/common.testconfig] + +[include #{getenv MT_RUN_AREA_HOME}/common_itemstable.testconfig] ADDED tests/fdktestqa/fdk.config Index: tests/fdktestqa/fdk.config ================================================================== --- /dev/null +++ tests/fdktestqa/fdk.config @@ -0,0 +1,36 @@ +[fields] +SYSTEM TEXT +RELEASE TEXT + +[setup] +# Adjust max_concurrent_jobs to limit how much you load your machines +# max_concurrent_jobs 150 +max_concurrent_jobs 1000 + +# This is your link path, you can move it but it is generally better to keep it stable +linktree #{shell readlink -f #{getenv MT_RUN_AREA_HOME}/../simplelinks} + +[include testqa/configs/megatest.abc.config] + +# timeout 0.025 + +[jobtools] +maxload 4 +launcher nbfake + +[server] +# timeout 0.01 +# homehost xena +# homehost 143.182.225.38 + +# force server +server-query-threshold 0 + + +[jobtools] +# launcher nbq -P ch_vp -C SLES11_EM64T_4G -Q /ciaf/fdk +launcher nbfake +maxload 4 + +# launcher bsub -q priority -o $MT_TEST_RUN_DIR/openlava.log + ADDED tests/fdktestqa/testqa/Makefile Index: tests/fdktestqa/testqa/Makefile ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/Makefile @@ -0,0 +1,46 @@ +BINDIR = $(PWD)/../../../bin +PATH := $(BINDIR):$(PATH) +MEGATEST = $(BINDIR)/megatest +DASHBOARD = $(BINDIR)/dashboard +NEWDASHBOARD = $(BINDIR)/newdashboard +RUNNAME = a +NUMTESTS = 20 + +all : + $(MEGATEST) -remove-runs -target a/b :runname c -testpatt %/% + $(MEGATEST) -runtests % -target a/b :runname c + +bigbig : + for tn in a b c d;do \ + ($(MEGATEST) -runtests % -target a/b :runname $tn & ) ; \ + done + +waitonpatt : + megatest -remove-runs -runname waitonpatt -target a/b -testpatt % + NUMTESTS=15 megatest -run -target a/b -runname waitonpatt -testpatt bigrun3/%8 + +waitonall : + megatest -remove-runs -runname waitonall -target a/b -testpatt % + NUMTESTS=20 megatest -run -target a/b -runname waitonall -testpatt alltop + +bigrun : + NUMTESTS=$(NUMTESTS) $(MEGATEST) -runtests bigrun -target a/bigrun :runname a$(shell date +%V) + +bigrun2 : + NUMTESTS=$(NUMTESTS) $(MEGATEST) -runtests bigrun2 -target a/bigrun2 :runname a$(shell date +%V) + +bigrun3 : + NUMTESTS=$(NUMTESTS) $(MEGATEST) -runtests bigrun3 -target a/bigrun3 :runname $(RUNNAME) + +dashboard : + mkdir -p ../simpleruns + $(DASHBOARD) -rows 20 & + +newdashboard : + $(NEWDASHBOARD) & + +compile : + (cd ../../..;make -j && make install) + +clean : + rm -rf ../simple*/*/* megatest.db db/* ../simple*/.db/* logs/* monitor.db ADDED tests/fdktestqa/testqa/README Index: tests/fdktestqa/testqa/README ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/README @@ -0,0 +1,1 @@ +set NUMTESTS to set the number of tests that will be run. A small number (say 20) illustrates itemwait well. ADDED tests/fdktestqa/testqa/configs/megatest.abc.config Index: tests/fdktestqa/testqa/configs/megatest.abc.config ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/configs/megatest.abc.config @@ -0,0 +1,9 @@ +# Valid values for state and status for steps, NB// It is not recommended you use this +[validvalues] +state start end completed + +# Job tools are more advanced ways to control how your jobs are launched +[jobtools] +# useshell yes + +[include megatest.def.config] ADDED tests/fdktestqa/testqa/configs/megatest.def.config Index: tests/fdktestqa/testqa/configs/megatest.def.config ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/configs/megatest.def.config @@ -0,0 +1,8 @@ +# You can override environment variables for all your tests here +[env-override] +EXAMPLE_VAR example value + +# As you run more tests you may need to add additional disks, the names are arbitrary but must be unique +[disks] +disk0 #{scheme (nice-path "#{getenv MT_RUN_AREA_HOME}/../simpleruns")} + ADDED tests/fdktestqa/testqa/megatest.config Index: tests/fdktestqa/testqa/megatest.config ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/megatest.config @@ -0,0 +1,11 @@ +[setup] +testcopycmd cp --remove-destination -rlv TEST_SRC_PATH/. TEST_TARG_PATH/. >> TEST_TARG_PATH/mt_launch.log 2>> TEST_TARG_PATH/mt_launch.log +# launchwait no + +# All these are overridden in ../fdk.config +# [jobtools] +# launcher nbfake +# launcher bsub -q priority -o $MT_TEST_RUN_DIR/openlava.log + +[include ../fdk.config] + ADDED tests/fdktestqa/testqa/runconfigs.config Index: tests/fdktestqa/testqa/runconfigs.config ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/runconfigs.config @@ -0,0 +1,6 @@ +[default] +ALLTESTS see this variable + +# Your variables here are grouped by targets [SYSTEM/RELEASE] +[SYSTEM_val/RELEASE_val] +ANOTHERVAR only defined if target is SYSTEM_val/RELEASE_val ADDED tests/fdktestqa/testqa/runsuite.sh Index: tests/fdktestqa/testqa/runsuite.sh ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/runsuite.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +(cd ../../..;make && make install) || exit 1 +export PATH=$PWD/../../../bin:$PATH + +for i in a b c d e f;do + # g h i j k l m n o p q r s t u v w x y z;do + megatest -runtests % -target a/b :runname $i & +done + +echo "" > num-running.log +while true; do + foo=`megatest -list-runs % | grep RUNNING | wc -l` + echo "Num running at `date` $foo" + echo "$foo at `date`" >> num-running.log + # to make the test go at a reasonable clip only gather this info ever minute + sleep 1m +done ADDED tests/fdktestqa/testqa/tests/alltop/testconfig Index: tests/fdktestqa/testqa/tests/alltop/testconfig ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/tests/alltop/testconfig @@ -0,0 +1,19 @@ +# Add additional steps here. Format is "stepname script" +[vars] +step1var step1.sh + +[ezsteps] +step1 megatest -list-runs $MT_RUNNAME -target $MT_TARGET -itempatt % + +# Test requirements are specified here +[requirements] +waiton setup bigrun bigrun3 bigrun2 +priority 0 + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description An example test +tags tagone,tagtwo +reviewed never ADDED tests/fdktestqa/testqa/tests/bigrun/step1.sh Index: tests/fdktestqa/testqa/tests/bigrun/step1.sh ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/tests/bigrun/step1.sh @@ -0,0 +1,13 @@ +#!/bin/bash +if [ $NUMBER -lt 10 ];then + sleep 20 + sleep `echo 4 * $NUMBER | bc` +else + sleep 130 +fi + +if [[ $RANDOM -lt 10000 ]];then + exit 1 +else + exit 0 +fi ADDED tests/fdktestqa/testqa/tests/bigrun/testconfig Index: tests/fdktestqa/testqa/tests/bigrun/testconfig ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/tests/bigrun/testconfig @@ -0,0 +1,23 @@ +# Add additional steps here. Format is "stepname script" +[vars] +step1var step1.sh + +[ezsteps] +step1 #{get vars step1var} + +# Test requirements are specified here +[requirements] +waiton setup +priority 0 + +# Iteration for your tests are controlled by the items section +[items] +NUMBER #{scheme (string-intersperse (map number->string (sort (let loop ((a 0)(res '()))(if (<= a (or (any->number (get-environment-variable "NUMTESTS")) 2500))(loop (+ a 1)(cons a res)) res)) <)) " ")} + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description An example test +tags tagone,tagtwo +reviewed never ADDED tests/fdktestqa/testqa/tests/bigrun2/step1.sh Index: tests/fdktestqa/testqa/tests/bigrun2/step1.sh ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/tests/bigrun2/step1.sh @@ -0,0 +1,9 @@ +#!/bin/sh +# prev_test=`$MT_MEGATEST -test-paths -target $MT_TARGET :runname $MT_RUNNAME -testpatt bigrun/$NUMBER` +# if [ -e $prev_test/testconfig ]; then +# exit 0 +# else +# exit 1 +# fi + +exit 0 ADDED tests/fdktestqa/testqa/tests/bigrun2/testconfig Index: tests/fdktestqa/testqa/tests/bigrun2/testconfig ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/tests/bigrun2/testconfig @@ -0,0 +1,26 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +step1 step1.sh + +# Test requirements are specified here +[requirements] +waiton bigrun +priority 0 +mode itemwait +itemmap .*/ + +# Iteration for your tests are controlled by the items section +[items] +NUMBER #{scheme (string-intersperse (map (lambda (x)(conc "blah/" x)) \ + (map number->string (sort (let loop ((a 0)(res '())) \ + (if (<= a (or (any->number (get-environment-variable "NUMTESTS")) 2500)) \ + (loop (+ a 1)(cons a res)) res)) <))) " ")} + + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description An example test +tags tagone,tagtwo +reviewed never ADDED tests/fdktestqa/testqa/tests/bigrun3/step1.sh Index: tests/fdktestqa/testqa/tests/bigrun3/step1.sh ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/tests/bigrun3/step1.sh @@ -0,0 +1,9 @@ +#!/bin/sh +# prev_test=`$MT_MEGATEST -test-paths -target $MT_TARGET :runname $MT_RUNNAME -testpatt bigrun/$NUMBER` +# if [ -e $prev_test/testconfig ]; then +# exit 0 +# else +# exit 1 +# fi + +exit 0 ADDED tests/fdktestqa/testqa/tests/bigrun3/testconfig Index: tests/fdktestqa/testqa/tests/bigrun3/testconfig ================================================================== --- /dev/null +++ tests/fdktestqa/testqa/tests/bigrun3/testconfig @@ -0,0 +1,33 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +step1 step1.sh + +# Test requirements are specified here +[requirements] +waiton bigrun2 +priority 0 +mode itemwait +# pattern replacement +# +# Remove everything up to the last / +# itemmap .*/ +# +# Replace foo/ with bar/ +# itemmap foo/ bar/ +# +itemmap .*/ + +# Iteration for your tests are controlled by the items section +[items] +NUMBER #{scheme (string-intersperse (map (lambda (x)(conc "blah/" x)) \ + (map number->string (sort (let loop ((a 0)(res '())) \ + (if (<= a (or (any->number (get-environment-variable "NUMTESTS")) 2500)) \ + (loop (+ a 1)(cons a res)) res)) <))) " ")} + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description An example test +tags tagone,tagtwo +reviewed never ADDED tests/fixpath.csh Index: tests/fixpath.csh ================================================================== --- /dev/null +++ tests/fixpath.csh @@ -0,0 +1,1 @@ +setenv PATH `readlink -f ../bin`:$PATH ADDED tests/fixpath.sh Index: tests/fixpath.sh ================================================================== --- /dev/null +++ tests/fixpath.sh @@ -0,0 +1,1 @@ +export PATH=$(readlink -f ../bin):$PATH ADDED tests/fslsync/megatest.config Index: tests/fslsync/megatest.config ================================================================== --- /dev/null +++ tests/fslsync/megatest.config @@ -0,0 +1,20 @@ +[fields] +YEAR TEXT +WEEKNUM TEXT +DAY TEXT + +[setup] +# Adjust max_concurrent_jobs to limit how much you load your machines +max_concurrent_jobs 50 + +# This is your link path, you can move it but it is generally better to keep it stable +linktree #{shell readlink -f #{getenv MT_RUN_AREA_HOME}/fslsynclinks} + +# Job tools are more advanced ways to control how your jobs are launched +[jobtools] +useshell yes +launcher nbfind + +# As you run more tests you may need to add additional disks, the names are arbitrary but must be unique +[disks] +disk0 #{shell readlink -f #{getenv MT_RUN_AREA_HOME}/fslsyncruns} ADDED tests/fslsync/runconfigs.config Index: tests/fslsync/runconfigs.config ================================================================== --- /dev/null +++ tests/fslsync/runconfigs.config @@ -0,0 +1,5 @@ +[default] +WORKAREA /tmp/#{getenv USER}/fslsync +FSLSAREA /tmp/#{getenv USER}/fsls +AREANAMES code data +SITENAMES #{shell cat $MT_RUN_AREA_HOME/sites.dat} ADDED tests/fslsync/sites.dat.template Index: tests/fslsync/sites.dat.template ================================================================== --- /dev/null +++ tests/fslsync/sites.dat.template @@ -0,0 +1,1 @@ +site1 ADDED tests/fslsync/tests/setup/mkdirs.logpro Index: tests/fslsync/tests/setup/mkdirs.logpro ================================================================== --- /dev/null +++ tests/fslsync/tests/setup/mkdirs.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "done" #/done/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fslsync/tests/setup/mkdirs.sh Index: tests/fslsync/tests/setup/mkdirs.sh ================================================================== --- /dev/null +++ tests/fslsync/tests/setup/mkdirs.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +# Create needed directories both local and remote + +# Remote +ssh $SITENAME mkdir -vp $WORKAREA/$SITENAME/$AREANAME + +# Local +mkdir -vp $WORKAREA/$SITENAME/$AREANAME + +echo done ADDED tests/fslsync/tests/setup/seedcache.logpro Index: tests/fslsync/tests/setup/seedcache.logpro ================================================================== --- /dev/null +++ tests/fslsync/tests/setup/seedcache.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "done" #/done/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fslsync/tests/setup/seedcache.sh Index: tests/fslsync/tests/setup/seedcache.sh ================================================================== --- /dev/null +++ tests/fslsync/tests/setup/seedcache.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +# Copy any non-existant files to the cache before doing the rsync +# in the hopes of saving some time. + +echo done ADDED tests/fslsync/tests/setup/testconfig Index: tests/fslsync/tests/setup/testconfig ================================================================== --- /dev/null +++ tests/fslsync/tests/setup/testconfig @@ -0,0 +1,21 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +mkdirs mkdirs.sh +seedcache seedcache.sh + +# Test requirements are specified here +[requirements] +priority 0 + +# Iteration for your tests are controlled by the items section +[items] +AREANAME #{getenv AREANAMES} +SITENAME #{getenv SITENAMES} + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Setup needed directories and seed the caches +tags tagone,tagtwo +reviewed never ADDED tests/fslsync/tests/sync/fsync.logpro Index: tests/fslsync/tests/sync/fsync.logpro ================================================================== --- /dev/null +++ tests/fslsync/tests/sync/fsync.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "done" #/done/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fslsync/tests/sync/fsync.sh Index: tests/fslsync/tests/sync/fsync.sh ================================================================== --- /dev/null +++ tests/fslsync/tests/sync/fsync.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# Get the list of fossils from the cache + +FILES=$(ls $FSLSAREA/$AREANAME|grep fossil) + +# Do the remote sync from CACHE to FOSSILS +ssh $SITENAME /bin/bash < 0 "done" #/done/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fslsync/tests/sync/rsync.sh Index: tests/fslsync/tests/sync/rsync.sh ================================================================== --- /dev/null +++ tests/fslsync/tests/sync/rsync.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +# Sync to remote cache +rsync -avz $FSLSAREA/$AREANAME/ $SITENAME:$WORKAREA/$SITENAME/$AREANAME/ & +# Sync to local cache +rsync -avz $SITENAME:$FSLSAREA/$AREANAME/ $WORKAREA/$SITENAME/$AREANAME/ & + +# Wait until rsyncs complete +wait + +echo done ADDED tests/fslsync/tests/sync/testconfig Index: tests/fslsync/tests/sync/testconfig ================================================================== --- /dev/null +++ tests/fslsync/tests/sync/testconfig @@ -0,0 +1,22 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +rsync rsync.sh +fsync fsync.sh + +# Test requirements are specified here +[requirements] +waiton setup +priority 0 + +# Iteration for your tests are controlled by the items section +[items] +AREANAME #{getenv AREANAMES} +SITENAME #{getenv SITENAMES} + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Sync fossils to remote +tags tagone,tagtwo +reviewed never ADDED tests/fullrun/afs.config Index: tests/fullrun/afs.config ================================================================== --- /dev/null +++ tests/fullrun/afs.config @@ -0,0 +1,1 @@ +TESTSTORUN priority_6 sqlitespeed/ag ADDED tests/fullrun/common_runconfigs.config Index: tests/fullrun/common_runconfigs.config ================================================================== --- /dev/null +++ tests/fullrun/common_runconfigs.config @@ -0,0 +1,17 @@ +[default] +FOOBARBAZZZZ not a useful value +BIGBOB $FOOBARBAZZZZ/bobby +FREDDY $sysname/$fsname +TOMMY [system pwd] + +[/tmp/mrwellan/env/ubuntu/afs] +BOGOUS Bob + +[default/ubuntu/nfs] +CURRENT /blah +ALT_VAR we should not see this one + +[ubuntu/nfs/none] +CURRENT /tmp/nada +UNIQUEVAR this one should be set + ADDED tests/fullrun/configs/mt_include_1.config Index: tests/fullrun/configs/mt_include_1.config ================================================================== --- /dev/null +++ tests/fullrun/configs/mt_include_1.config @@ -0,0 +1,23 @@ +[setup] +# exectutable /path/to/megatest +max_concurrent_jobs 250 + +linktree #{getenv MT_RUN_AREA_HOME}/tmp/mt_links + +[jobtools] +useshell yes +# ## launcher launches jobs, the job is managed on the target host +## by megatest, comment out launcher to run local +# workhosts localhost hermes +# launcher exec nbfake + +launcher nbfake +# launcher echo + +# launcher nbfind +# launcher nodanggood + +## use "xterm -e csi -- " as a launcher to examine the launch environment. +## exit with (exit) +## get a shell with (system "bash") +# launcher xterm -e csi -- ADDED tests/fullrun/configs/mt_include_2.config Index: tests/fullrun/configs/mt_include_2.config ================================================================== --- /dev/null +++ tests/fullrun/configs/mt_include_2.config @@ -0,0 +1,2 @@ +[disks] +disk0 #{getenv MT_RUN_AREA_HOME}/tmp/mt_runs ADDED tests/fullrun/ez_pass_linked/testconfig Index: tests/fullrun/ez_pass_linked/testconfig ================================================================== --- /dev/null +++ tests/fullrun/ez_pass_linked/testconfig @@ -0,0 +1,13 @@ +[setup] + +[ezsteps] +lookittmp ls /tmp +lookithome ls /home + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass, no logpro file. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/megatest.config Index: tests/fullrun/megatest.config ================================================================== --- /dev/null +++ tests/fullrun/megatest.config @@ -0,0 +1,298 @@ +[fields] +sysname TEXT +fsname TEXT +datapath TEXT + +# refareas can be searched to find previous runs +# the path points to where megatest.db exists +[refareas] +area1 /tmp/oldarea/megatest + +[include ./configs/mt_include_1.config] + +[dashboard] +pre-command xterm -geometry 180x20 -e " +post-command |& tee results.log ;echo Press any key to continue;bash -c 'read -n 1 -s'" & +testsort -event_time + +[misc] +home #{shell readlink -f $MT_RUN_AREA_HOME} +parent #{shell readlink -f $MT_RUN_AREA_HOME/..} +testsuite #{shell basename $MT_RUN_AREA_HOME} + +[tests-paths] +1 #{get misc parent}/simplerun/tests + +[setup] + +# turn off faststart, put monitor.db in MT_RUN_AREA_HOME/db +# and set the dbdir to /var/tmp/$USER/mt_db to enable keeping +# the raw db in /var/tmp/$USER +# +faststart no +monitordir #{getenv MT_RUN_AREA_HOME}/db +dbdir #{getenv MT_RUN_AREA_HOME}/db + +# sync more aggressively to megatest-db +megatest-db yes + +# Set launchwait to no to use the more agressive code that does not wait for the launch to complete before proceeding +# this may save a few milliseconds on launching tests +# launchwait no +waivercommentpatt ^WW\d+ [a-z].* +incomplete-timeout 1 + +# wait for runs to completely complete. yes, anything else is no +run-wait yes + +# If set to "default" the old code is used. Otherwise defaults to 200 or uses +# numeric value given. +# +runqueue 20 + +# Default runtimelim 1d 1h 1m 10s +# +runtimelim 20m + +# Deadtime - when to consider tests dead (i.e. haven't heard from them in too long) +# Number in seconds, set to 20 seconds here to trigger a little trouble. Default is +# 1800 +# +deadtime 600 + +# It is possible (but not recommended) to override the rsync command used +# to populate the test directories. For test development the following +# example can be useful +# +testcopycmd cp --remove-destination -rsv TEST_SRC_PATH/. TEST_TARG_PATH/. >> TEST_TARG_PATH/mt_launch.log 2>> TEST_TARG_PATH/mt_launch.log + +# or for hard links + +# testcopycmd cp --remove-destination -rlv TEST_SRC_PATH/. TEST_TARG_PATH/. + +# FULL or 2, NORMAL or 1, OFF or 0 +synchronous 0 +# Throttle roughly scales the db access milliseconds to seconds delay +throttle 0.2 +# Max retries allows megatest to re-check that a tests status has changed +# as tests can have transient FAIL status occasionally +maxretries 20 + +# Setup continued. +[setup] + +# override the logview command +# +logviewer (%MTCMD%) 2> /dev/null > /dev/null + +# override the html viewer launch command +# +# htmlviewercmd firefox -new-window +htmlviewercmd arora + +# -runtests automatically deletes the records for tests with the listed states on starting up a run allowing them to re-run +# (nb// this is in addition to NOT_STARTED which is automatically re-run) +# +allow-auto-rerun INCOMPLETE ZERO_ITEMS +# could add: STUCK STUCK/DEAD UNKNOWN KILLED KILLREQ PREQ_DISCARD + +[validvalues] +state start end 0 1 - 2 +status pass fail n/a 0 1 running - 2 + +# These are set before all tests, override them +# in the testconfig [pre-launch-env-overrides] section +[env-override] + + +ALL_TOPLEVEL_TESTS exit_0 exit_1 ez_exit2_fail ez_fail ez_pass ezlog_fail \ + ezlog_fail_then_pass ezlog_pass ezlog_warn lineitem_fail lineitem_pass logpro_required_fail \ + manual_example neverrun priority_1 priority_10 priority_10_waiton_1 \ + priority_3 priority_4 priority_5 priority_6 priority_7 priority_8 \ + priority_9 runfirst singletest singletest2 sqlitespeed test_mt_vars \ + ez_fail_quick test1 test2 + +# This variable is honored by the loadrunner script. The value is in percent +MAX_ALLOWED_LOAD 200 + +# MT_XTERM_CMD overrides the terminal command +# MT_XTERM_CMD xterm -bg lightgreen -fg black + +SPECIAL_ENV_VARS overide them here - should be seen at launch and in the runs +TESTVAR [system readlink -f .] +DEADVAR [system ls] +VARWITHDOLLAR $HOME/.zshrc +WACKYVAR #{system ls > /dev/null} +WACKYVAR2 #{get validvalues state} +WACKYVAR3 #{getenv USER} +WACKYVAR4 #{scheme (+ 5 6 7)} +WACKYVAR5 #{getenv sysname}/#{getenv fsname}/#{getenv datapath} +WACKYVAR6 #{scheme (args:get-arg "-target")} +PREDICTABLE the_ans +MRAH MT_RUN_AREA_HOME=#{getenv MT_RUN_AREA_HOME} +# The empty var should have a definition with null string +EMPTY_VAR + +WRAPPEDVAR This var should have the work blah thrice: \ +blah \ +blah + +MYRUNNAME1 /this/is/#{getenv MT_RUNNAME}/my/runname +MYRUNNAME2 /this/is/[system echo $MT_RUNNAME]/my/runname + + +# XTERM [system xterm] +# RUNDEAD [system exit 56] + +[server] + +# force use of server always +required yes + +# Use http instead of direct filesystem access +transport http +# transport fs +# transport nmsg + +synchronous 0 + +# If the server can't be started on this port it will try the next port until +# it succeeds +port 9080 + +# This server will keep running this number of hours after last access. +# Three minutes is 0.05 hours +# timeout 0.025 +timeout 0.061 + +# faststart; unless no, start server but proceed with writes until server started +faststart no +# faststart yes + +# Start server when average query takes longer than this +# server-query-threshold 55500 +server-query-threshold 1000 +timeout 0.01 + +# daemonize yes +# hostname #{scheme (get-host-name)} + +## disks are: +## name host:/path/to/area +## -or- +## name /path/to/area +[disks] +disk0 /foobarbazz +disk1 not-a-disk + +[include ./configs/mt_include_2.config] + +[include #{getenv USER}_testing.config] + +[jobgroups] + +# NOTE: job groups will falsely count the toplevel test as a job. If possible add N +# to your jobgroups where N is the number of parallel runs you are likely to see +# +sqlite3 6 +blockz 10 +# to your jobgroups where N is the number of parallel runs you are likely to see +# + +#====================================================================== +# Machine flavors +# +# These specify lists of hosts or scripts to use or call for various +# flavors of task. +# +#====================================================================== + +[flavors] + +plain hosts: xena, phoebe +strong command: NBFAKE_HOST=zeus nbfake +arm hosts: cubian + +[archive] + +# where to get bup executable +# bup /path/to/bup + +# use machines of these flavor +useflavors plain +targsize 2G + +# minimum space required on an archive disk before allowing archiving to start (MB) +minspace 10 + +[archive-disks] + +# Archives will be organised under these paths like this: +# / +# Within the archive the data is structured like this: +# /// +disk0 /tmp/#{getenv USER}/adisk1 + +# Uncomment these to emulate a job queue with a long time (look in bin/sleeprunner for the time) +[jobtools] +launcher #{scheme (case (string->symbol (conc (getenv "datapath"))) \ + ((none) "nbfake") \ + ((openlava) "bsub -o $MT_LINKTREE/$MT_TARGET/$MT_RUNNAME.$MT_TESTNAME-$MT_ITEM_PATH.log") \ + ((sleeprunner) "sleeprunner") \ + (else "nbfake"))} + +# launcher bsub -q priority -o $MT_TEST_RUN_DIR/openlava.log + +# launcher #{shell if which bsub > /dev/null;then echo bsub -q priority -o openlava.log;else echo sleeprunner;fi} +# launcher nbfake + +[configf:settings trim-trailing-spaces yes] + +# Override the rollup for specific tests +[testrollup] +runfirst ls + +[test] +# VAL1 has trailing spaces +VAL1 Foo +VAL2 ==>#{get test VAL1}Bar<== no spaces between Foo and Bar to pass + +ltest #{scheme (case (string->symbol (conc (getenv "datapath"))) \ + ((none) "nbfake") \ + ((openlava) "bsub -o $MT_LINKTREE/$MT_TARGET/$MT_RUNNAME.$MT_TESTNAME-$MT_ITEM_PATH.log") \ + (else "sleeprunner"))} + +#================================================================ +# Flexi-launcher +#================================================================ +# +# [host-types] +# general ssh #{getbgesthost general} +# nbgeneral nbjob run JOBCOMMAND -log $MT_LINKTREE/$MT_TARGET/$MT_RUNNAME.$MT_TESTNAME-$MT_ITEM_PATH.lgo +# +# [hosts] +# general cubian xena +# +# [launchers] +# envsetup general +# xor/%/n 4C16G +# % nbgeneral +# +# [jobtools] +# launcher bsub +# # if defined and not "no" flexi-launcher will bypass launcher unless there is no +# # match. +# flexi-launcher yes + +[jobtools] +flexi-launcher yes + +[host-types] +general bsub +alt #{get jobtools launcher} +local nbfake +remote #{get jobtools launcher} + +[launchers] +runfirst/sum% remote +% general ADDED tests/fullrun/multi-dboard-load-all.scm Index: tests/fullrun/multi-dboard-load-all.scm ================================================================== --- /dev/null +++ tests/fullrun/multi-dboard-load-all.scm @@ -0,0 +1,13 @@ + +(require-library margs) +(load "../../common.scm") +(load "../../common_records.scm") +(load "../../margs.scm") +(load "../../megatest-version.scm") +(load "../../portlogger.scm") +(load "../../tasks.scm") +(load "../../db.scm") +(load "../../configf.scm") +(load "../../keys.scm") +(load "../../tree.scm") +(load "../../multi-dboard.scm") ADDED tests/fullrun/multi-dboard.sh Index: tests/fullrun/multi-dboard.sh ================================================================== --- /dev/null +++ tests/fullrun/multi-dboard.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +csi -I ../.. multi-dboard-load-all.scm ADDED tests/fullrun/nfs.config Index: tests/fullrun/nfs.config ================================================================== --- /dev/null +++ tests/fullrun/nfs.config @@ -0,0 +1,1 @@ +TESTSTORUN priority_4 test_mt_vars ADDED tests/fullrun/run-each-proc.sh Index: tests/fullrun/run-each-proc.sh ================================================================== --- /dev/null +++ tests/fullrun/run-each-proc.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +for x in `cat all-db-procs.txt`;do + cat > ~/.megatestrc <' '-_g'` + megatest -runtests sqlitespeed,test2,ez% -target ubuntu/nfs/none :runname $fname > $fname.log +done + + ADDED tests/fullrun/runconfigs.config Index: tests/fullrun/runconfigs.config ================================================================== --- /dev/null +++ tests/fullrun/runconfigs.config @@ -0,0 +1,46 @@ +[default] +SOMEVAR This should show up in SOMEVAR3 + +# target based getting of config file, look at afs.config and nfs.config +[include #{getenv fsname}.config] + +[include #{getenv MT_RUN_AREA_HOME}/common_runconfigs.config] + +# #{system echo 'VACKYVAR #{shell pwd}' > $MT_RUN_AREA_HOME/configs/$USER.config} +[include ./configs/#{getenv USER}.config] + + +WACKYVAR0 #{get ubuntu/nfs/none CURRENT} +WACKYVAR1 #{scheme (args:get-arg "-target")} + +[default/ubuntu/nfs] +WACKYVAR2 #{runconfigs-get CURRENT} + +[ubuntu/nfs/none] +WACKYVAR2 #{runconfigs-get CURRENT} +SOMEVAR2 This should show up in SOMEVAR4 if the target is ubuntu/nfs/none +VARWITHDOLLARSIGNS The$USER/signs/should/be/replaced/with/variable + +[default] +SOMEVAR3 #{rget SOMEVAR} +SOMEVAR4 #{rget SOMEVAR2} +SOMEVAR5 #{runconfigs-get SOMEVAR2} + +[ubuntu/nfs/all_toplevel] +TESTPATT all_toplevel + +[this/a/test] +BLAHFOO 123 + +[ubuntu/nfs/sleep1] +SLEEPRUNNER 1 + +[ubuntu/nfs/sleep10] +SLEEPRUNNER 10 + +[ubuntu/nfs/sleep60] +SLEEPRUNNER 60 + +[ubuntu/nfs/sleep240] +SLEEPRUNNER 240 + ADDED tests/fullrun/tests/all_toplevel/calcresults.logpro Index: tests/fullrun/tests/all_toplevel/calcresults.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/all_toplevel/calcresults.logpro @@ -0,0 +1,140 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ("exit_0" 1 20) + ("ezlog_fail_then_pass" 1 20) + ("ezlog_pass" 1 20) + ("ez_pass" 1 20) + ("lineitem_pass" 1 20) + ("priority_1" 1 20) + ("priority_10" 1 20) + ("priority_10_waiton_1" 1 20) + ("priority_3" 1 20) + ("priority_4" 1 20) + ;; ("priority_5" 1 20) + ("priority_6" 1 20) +;; ("priority_7" 1 20) + ("priority_8" 1 20) + ("priority_9" 1 20) + ("runfirst" 7 20) + ("singletest" 1 20) + ("singletest2" 1 20) + ("special" 1 20) + ("sqlitespeed" 10 20) + ("test1" 1 20) + ("test2" 6 20) + ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ("exit_1" 1 20) + ("ez_exit2_fail" 1 20) + ("ez_fail" 1 20) + ("ez_fail_quick" 1 20) + ("ezlog_fail" 1 20) + ("lineitem_fail" 1 20) + ("logpro_required_fail" 1 20) + ("manual_example" 1 20) + ("neverrun" 1 20))) + +(define warn-specs '(("ezlog_warn" 1 20))) + +(define nost-specs '(("wait_no_items1" 1 20) + ("wait_no_items2" 1 20) + ("wait_no_items3" 1 20) + ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +(expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +(expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +(expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +(expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +(expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +(expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +(expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/fullrun/tests/all_toplevel/testconfig Index: tests/fullrun/tests/all_toplevel/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/all_toplevel/testconfig @@ -0,0 +1,8 @@ +[ezsteps] +calcresults megatest -list-runs $MT_RUNNAME -target $MT_TARGET + +[requirements] +waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +mode toplevel ADDED tests/fullrun/tests/blocktestxz/main.sh Index: tests/fullrun/tests/blocktestxz/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/blocktestxz/main.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +$MT_MEGATEST -test-status :state $THESTATE :status $THESTATUS -setlog "nada.html" + +# By exiting with non-zero we tell Megatest to preseve the state and status +exit 1 ADDED tests/fullrun/tests/blocktestxz/testconfig Index: tests/fullrun/tests/blocktestxz/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/blocktestxz/testconfig @@ -0,0 +1,22 @@ +[setup] +runscript main.sh + +[items] +THESTATE UNKNOWN INCOMPLETE KILLED KILLREQ STUCK BOZZLEBLONKED STUCK/DEAD +THESTATUS PASS FAIL STUCK/DEAD SKIP + +[requirements] +waiton sqlitespeed + +[test_meta] +author matt +owner bob +description This test will fail causing the dependent test "testxz"\ + to never run. This triggers the code that must determine\ + that a test will never be run and thus remove it from\ + the queue of tests to be run. + +tags first,single +reviewed 1/1/1965 + +jobgroup blockz ADDED tests/fullrun/tests/db_sync/calcresults.logpro Index: tests/fullrun/tests/db_sync/calcresults.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/db_sync/calcresults.logpro @@ -0,0 +1,44 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/fullrun/tests/db_sync/dbdelta.scm Index: tests/fullrun/tests/db_sync/dbdelta.scm ================================================================== --- /dev/null +++ tests/fullrun/tests/db_sync/dbdelta.scm @@ -0,0 +1,44 @@ + +(use sql-de-lite) + +(define megatest.db (conc (get-environment-variable "MT_RUN_AREA_HOME") "/megatest.db")) + +(define runsquery "sysname||'/'||fsname||'/'||datapath||'/'||runname||'/'||runs.state||'-'||runs.status") +(define bigquery + (conc + "SELECT " runsquery "||testname||'/'||item_path||'-'||'-'||tests.state||'-'||tests.status||'-'||runs.id AS outdat FROM runs INNER JOIN tests ON runs.id=tests.run_id WHERE runs.state NOT LIKE 'deleted' AND tests.state NOT LIKE 'deleted' AND testname NOT LIKE 'db_sync' ORDER BY outdat ASC ;")) + +(print "Creating file for legacy db") +(with-output-to-file "legacy-db-dump" + (lambda () + (let ((db (open-database megatest.db))) + (query (for-each-row + (lambda (res) + (print res))) + (sql db bigquery)) + (close-database db)))) + +(define main.db (conc (get-environment-variable "MT_DBDIR") "/main.db")) + +(print "Creating file for current db") +(with-output-to-file "current-db-dump" + (lambda () + (let* ((mdb (open-database main.db)) + (run-ids (query fetch-column (sql mdb (conc "select id," runsquery " AS rq from runs ORDER BY rq ASC;")))) + (dbdir (get-environment-variable "MT_DBDIR"))) + (for-each + (lambda (rid) + (let ((dbfile (conc dbdir "/" rid ".db"))) + (if (file-exists? dbfile) + (begin + (exec (sql mdb (conc "ATTACH DATABASE '" dbfile "' AS testsdb;"))) + (query (for-each-row + (lambda (res) + (print res))) + (sql mdb bigquery)) + (exec (sql mdb "DETACH DATABASE testsdb;"))) + (print "ERROR: No file " dbfile " found")))) + run-ids) + (close-database mdb)))) + + ADDED tests/fullrun/tests/db_sync/getdbdir.scm Index: tests/fullrun/tests/db_sync/getdbdir.scm ================================================================== --- /dev/null +++ tests/fullrun/tests/db_sync/getdbdir.scm @@ -0,0 +1,1 @@ +(db:dbfile-path #f) ADDED tests/fullrun/tests/db_sync/showdiff.logpro Index: tests/fullrun/tests/db_sync/showdiff.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/db_sync/showdiff.logpro @@ -0,0 +1,46 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) + +(expect:error in "LogFileBody" = 0 "Any diff is failure" #/.+/) ADDED tests/fullrun/tests/db_sync/testconfig Index: tests/fullrun/tests/db_sync/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/db_sync/testconfig @@ -0,0 +1,13 @@ +[pre-launch-env-vars] + +MT_DBDIR #{scheme (db:dbfile-path #f)} + +[ezsteps] +calcresults csi -b dbdelta.scm +showdiff diff current-db-dump legacy-db-dump + +[requirements] +waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +mode toplevel ADDED tests/fullrun/tests/dynamic_waiton/testconfig Index: tests/fullrun/tests/dynamic_waiton/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/dynamic_waiton/testconfig @@ -0,0 +1,21 @@ +[ezsteps] +listfiles ls + +[requirements] +waiton #{scheme (string-intersperse \ + (tests:filter-test-names \ + (hash-table-keys (tests:get-all)) \ + (or (args:get-arg "-runtests") \ + (args:get-arg "-testpatt") "")) " ")} + +[items] + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass \ +but there is an items definition with no items. This should evoke an \ +error. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/exit_0/main.sh Index: tests/fullrun/tests/exit_0/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/exit_0/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/exit_0/testconfig Index: tests/fullrun/tests/exit_0/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/exit_0/testconfig @@ -0,0 +1,15 @@ +[setup] +runscript main.sh + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt + +[triggers] +NOT_STARTED/ xterm -e bash -s -- +RUNNING/ xterm -e bash -s -- + ADDED tests/fullrun/tests/exit_1/main.sh Index: tests/fullrun/tests/exit_1/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/exit_1/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 1 ADDED tests/fullrun/tests/exit_1/testconfig Index: tests/fullrun/tests/exit_1/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/exit_1/testconfig @@ -0,0 +1,13 @@ +[setup] +runscript main.sh + +[requirements] +priority 9 + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/ez_exit2_fail/testconfig Index: tests/fullrun/tests/ez_exit2_fail/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/ez_exit2_fail/testconfig @@ -0,0 +1,15 @@ +[setup] + +[ezsteps] +exit2 exit 2 +lookithome ls /home + +[test_meta] +author matt +owner bob +description This test runs two steps; the first exits with\ + code 2 (a fail because not using logpro) and the second\ + is a pass + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/ez_fail/testconfig Index: tests/fullrun/tests/ez_fail/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/ez_fail/testconfig @@ -0,0 +1,19 @@ +[setup] + +[requirements] +priority 10 + +[ezsteps] +lookittmp sleep 5s;ls /tmp +lookithome sleep 2;ls /home +# should fail on next step +lookitnada sleep 3;ls /nada +lookitusr sleep 2;ls /usr + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass, no logpro file. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/ez_fail_quick/testconfig Index: tests/fullrun/tests/ez_fail_quick/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/ez_fail_quick/testconfig @@ -0,0 +1,18 @@ +[requirements] +priority 10 + +[ezsteps] +# should fail on next step +lookitnada ls /nada + +[triggers] +# run like this: cmd test-id test-rundir trigger +COMPLETED/FAIL xterm;echo + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which fails immediately. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/ez_pass/testconfig Index: tests/fullrun/tests/ez_pass/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/ez_pass/testconfig @@ -0,0 +1,15 @@ +[setup] + +[ezsteps] +lookittmp sleep 1;ls /tmp +lookithome sleep 1;ls /home +isrunname1 sleep 1;echo $MYRUNNAME1 | grep -v '#f' +isrunname2 sleep 1;echo $MYRUNNAME2 | grep -v '#f' + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass, no logpro file. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/ez_pass_linked Index: tests/fullrun/tests/ez_pass_linked ================================================================== --- /dev/null +++ tests/fullrun/tests/ez_pass_linked @@ -0,0 +1,1 @@ +../ez_pass_linked/ ADDED tests/fullrun/tests/ezlog_fail/example.logpro Index: tests/fullrun/tests/ezlog_fail/example.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_fail/example.logpro @@ -0,0 +1,44 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +;; define your hooks +(hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +(hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +(hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") + +;; first ensure your run at least started +;; +(trigger "Init" #/This is a header/) +(trigger "InitEnd" #/^\s*$/) +(section "Init" "Init" "InitEnd") + +(trigger "Body" #/^.*$/) ;; anything starts the body +;; (trigger "EndBody" #/This had better never match/) + +(section "Body" "Body" "EndBody") + +(trigger "Blah2" #/^begin Blah2/) +(trigger "Blah2End" #/^end Blah2/) +(section "Blah2" "Blah2" "Blah2End") + +(expect:required in "Init" = 1 "Header" #/This is a header/) +(expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +(expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +(expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +(expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +(expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +(expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) + +;; Using match number +(expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) + +;; Comparison instead of tolerance +(expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) + +(expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +(expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "Body" = 0 "Any warning" #/WARNING/) +(expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors + +;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/fullrun/tests/ezlog_fail/lookithome.logpro Index: tests/fullrun/tests/ezlog_fail/lookithome.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_fail/lookithome.logpro @@ -0,0 +1,10 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + + +(expect:required in "LogFileBody" > 0 "Must be some files in the dir" #/.*/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/WARNING/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fullrun/tests/ezlog_fail/lookittmp.logpro Index: tests/fullrun/tests/ezlog_fail/lookittmp.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_fail/lookittmp.logpro @@ -0,0 +1,6 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(expect:warning in "LogFileBody" = 0 "Any warning" #/WARNING/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/.*/)) ;; force an error ADDED tests/fullrun/tests/ezlog_fail/testconfig Index: tests/fullrun/tests/ezlog_fail/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_fail/testconfig @@ -0,0 +1,28 @@ +[setup] + +[ezsteps] +lookittmp ls /tmp +lookithome ls /home + +# logpro_file input_glob +# matching file(s) will be diff'd with previous run and logpro applied +# if PASS or WARN result from logpro then WAIVER state is set +# +[waivers] +waiver_1 logpro lookittmp.log + +[waiver_rules] + +# This builtin rule is the default if there is no .logpro file +# diff diff %file1% %file2% + +# This builtin rule is applied if a .logpro file exists +# logpro diff %file1% %file2% | logpro %waivername%.logpro %waivername%.html + +[test_meta] +author matt +owner bob +description This test runs two ezstep, the first of which is expected to fail using a simple logpro file. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/ezlog_fail/waiver_1.logpro Index: tests/fullrun/tests/ezlog_fail/waiver_1.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_fail/waiver_1.logpro @@ -0,0 +1,1 @@ +(expect:warning in "Body" = 0 "Any warning" #/WARNING/) ADDED tests/fullrun/tests/ezlog_fail_then_pass/firststep.logpro Index: tests/fullrun/tests/ezlog_fail_then_pass/firststep.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_fail_then_pass/firststep.logpro @@ -0,0 +1,10 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + + +(expect:required in "LogFileBody" > 0 "Must be some files in the dir" #/.*/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/WARNING/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fullrun/tests/ezlog_fail_then_pass/main.sh Index: tests/fullrun/tests/ezlog_fail_then_pass/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_fail_then_pass/main.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +megatest -step yepstep :state start :status n/a +ls /tmp +megatest -step yepstep :state end :status $? + +megatest -load-test-data << EOF +OPER,du, 1.2, 1.2, < , GBytes ,System didn't use too much space +EOF + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +megatest -test-status :state COMPLETED :status AUTO ADDED tests/fullrun/tests/ezlog_fail_then_pass/testconfig Index: tests/fullrun/tests/ezlog_fail_then_pass/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_fail_then_pass/testconfig @@ -0,0 +1,13 @@ +[setup] + +[ezsteps] +firststep main.sh + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is logpro clean\ + but fails based on -test-data loaded. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/ezlog_pass/example.logpro Index: tests/fullrun/tests/ezlog_pass/example.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_pass/example.logpro @@ -0,0 +1,44 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +;; define your hooks +(hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +(hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +(hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") + +;; first ensure your run at least started +;; +(trigger "Init" #/This is a header/) +(trigger "InitEnd" #/^\s*$/) +(section "Init" "Init" "InitEnd") + +(trigger "Body" #/^.*$/) ;; anything starts the body +;; (trigger "EndBody" #/This had better never match/) + +(section "Body" "Body" "EndBody") + +(trigger "Blah2" #/^begin Blah2/) +(trigger "Blah2End" #/^end Blah2/) +(section "Blah2" "Blah2" "Blah2End") + +(expect:required in "Init" = 1 "Header" #/This is a header/) +(expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +(expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +(expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +(expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +(expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +(expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) + +;; Using match number +(expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) + +;; Comparison instead of tolerance +(expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) + +(expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +(expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "Body" = 0 "Any warning" #/WARNING/) +(expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors + +;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/fullrun/tests/ezlog_pass/lookittmp.logpro Index: tests/fullrun/tests/ezlog_pass/lookittmp.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_pass/lookittmp.logpro @@ -0,0 +1,10 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + + +(expect:required in "LogFileBody" > 0 "Must be some files in the dir" #/.*/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/WARNING/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fullrun/tests/ezlog_pass/testconfig Index: tests/fullrun/tests/ezlog_pass/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_pass/testconfig @@ -0,0 +1,13 @@ +[setup] + +[ezsteps] +lookittmp ls /tmp +lookithome ls /home + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass using a simple logpro file. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/ezlog_warn/lookithome.logpro Index: tests/fullrun/tests/ezlog_warn/lookithome.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_warn/lookithome.logpro @@ -0,0 +1,11 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + + +;; Force a warn for this test +(expect:required in "LogFileBody" > 0 "Must be some files in the dir" #/.*/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fullrun/tests/ezlog_warn/lookittmp.logpro Index: tests/fullrun/tests/ezlog_warn/lookittmp.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_warn/lookittmp.logpro @@ -0,0 +1,12 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + + +(expect:warning in "LogFileBody" = 0 "Any warning" #/.*/) +;; Can't have a required since it will mask the warns! Could make the warn non-overlapping with the +;; required I suppose... +;; (expect:required in "LogFileBody" > 0 "Must be some files in the dir" #/.*/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/fullrun/tests/ezlog_warn/testconfig Index: tests/fullrun/tests/ezlog_warn/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/ezlog_warn/testconfig @@ -0,0 +1,13 @@ +[setup] + +[ezsteps] +lookittmp ls /tmp +lookithome ls $HOME + +[test_meta] +author matt +owner bob +description This test runs two ezsteps the first of which is expected to fail using a simple logpro file. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/lineitem_fail/main.sh Index: tests/fullrun/tests/lineitem_fail/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/lineitem_fail/main.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +$MT_MEGATEST -load-test-data << EOF +foo,bar, 1.2, 1.9, > +foo,rab, 1.0e9, 10e9, 1e9 +foo,bla, 1.2, 1.9, < +foo,bal, 1.2, 1.2, < , ,Check for overload +foo,alb, 1.2, 1.2, <= , Amps,This is the high power circuit test +foo,abl, 1.2, 1.3, 0.1 +foo,bra, 1.2, pass, silly stuff +faz,bar, 10, 8mA, , ,"this is a comment" +EOF + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +# Needed to force rolling up the results and set the test to COMPLETED +$MT_MEGATEST -test-status :state COMPLETED :status AUTO + ADDED tests/fullrun/tests/lineitem_fail/testconfig Index: tests/fullrun/tests/lineitem_fail/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/lineitem_fail/testconfig @@ -0,0 +1,10 @@ +[setup] +runscript main.sh + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/lineitem_pass/main.sh Index: tests/fullrun/tests/lineitem_pass/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/lineitem_pass/main.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# category variable value expected tol/comp units comment +$MT_MEGATEST -load-test-data << EOF +foo, bar, 1.9, 1.8, > +foo, rab, 1.0e9, 2e9, 1e9 +foo, bla, 1.2, 1.9, < +foo, bal, -1.1, 0, < , , Check for overload +foo, alb, 1.2, 1.2, <= , Amps, This is the high power circuit test +foo, abl, 1.2, 1.3, 0.1 +foo, bra, 1.2, pass, silly stuff +faz, bar, 10, 8mA, , ,"this is a comment" +EOF + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done +# Needed to force rolling up the results and set the test to COMPLETED +$MT_MEGATEST -test-status :state COMPLETED :status AUTO ADDED tests/fullrun/tests/lineitem_pass/testconfig Index: tests/fullrun/tests/lineitem_pass/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/lineitem_pass/testconfig @@ -0,0 +1,10 @@ +[setup] +runscript main.sh + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/logpro_required_fail/testconfig Index: tests/fullrun/tests/logpro_required_fail/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/logpro_required_fail/testconfig @@ -0,0 +1,23 @@ +[setup] + +[ezsteps] +lookittmp ls /tmp + +[test_meta] +author matt +owner bob +description This test runs two ezstep, the first of which is expected to fail using a simple logpro file. + +[logpro] +lookittmp ;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com + ;; + ;; License GPL. + ;; + (expect:required in "LogFileBody" > 0 "A file name that should never exist!" #/This is a awfully stupid file name that should never be found in the temp dir/) + ;; + ;; (expect:warning in "LogFileBody" = 0 "Any warning" #/WARNING/) + ;; (expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/.*/)) ;; force an error + + +tags logpro +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/manual_example/results/results.csv Index: tests/fullrun/tests/manual_example/results/results.csv ================================================================== --- /dev/null +++ tests/fullrun/tests/manual_example/results/results.csv @@ -0,0 +1,1 @@ +category, variable, expected, value, tol, units, comment ADDED tests/fullrun/tests/manual_example/runsetupxterm.sh Index: tests/fullrun/tests/manual_example/runsetupxterm.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/manual_example/runsetupxterm.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [[ $TARGETDISPLAY = "" || $TARGETHOST = "" || $TARGETDIR = "" || $TARGETUSER = "" ]]; then + msg="You must set the TARGETDISPLAY, TARGETHOST, TARGETDIR and TARGETUSER variables for manual tests" + echo $msg + megatest -test-status :state COMPLETED :status FAIL -m $msg + exit 1 +else + megatest -step setup :state start :status n/a + xterm -display $TARGETDISPLAY -e ./setupremote.sh + megatest -step setup :state end :status $? +fi + ADDED tests/fullrun/tests/manual_example/setupremote.sh Index: tests/fullrun/tests/manual_example/setupremote.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/manual_example/setupremote.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +megatest -step rsyncto :state start :status n/a +echo "First, lets populate your area with necessary files, you may be prompted for your Unix password several times" +rsync -avz $MT_TEST_RUN_DIR/ $TARGETUSER@$TARGETHOST:$TARGETDIR +megatest -step rsyncto :state end :status n/a + +megatest -step runtest :state start :status n/a +remotecmd="cd $TARGETDIR;xterm -display $TARGETDISPLAY" +echo Launching $remotecmd on $TARGETHOST as $TARGETUSER +ssh $TARGETUSER@$TARGETHOST $remotecmd +megatest -step runtest :state end :status $? + +megatest -step gatherdata :state start :status n/a +rsync -avz $TARGETUSER@$TARGETHOST:$TARGETDIR/results/ $MT_TEST_RUN_DIR/results/ +if [[ -e $MT_TEST_RUN_DIR/results/results.csv ]]; then + megatest -load-test-data < $MT_TEST_RUN_DIR/results/results.csv +fi + +if [[ -e $MT_TEST_RUN_DIR/results/final_results.log && $MT_TEST_RUN_DIR/final_results.logpro ]]; then + logpro $MT_TEST_RUN_DIR/final_results.logpro $MT_TEST_RUN_DIR/final_results.html < $MT_TEST_RUN_DIR/results/final_results.log + if [[ $? = 0 ]]; then + finalstatus=PASS + else + finalstatus=FAIL + fi + megatest -test-status :state COMPLETED :status $finalstatus -setlog final_results.html +fi ADDED tests/fullrun/tests/manual_example/testconfig Index: tests/fullrun/tests/manual_example/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/manual_example/testconfig @@ -0,0 +1,13 @@ +[setup] + +[ezsteps] +setup ./runsetupxterm.sh +# launch launchxterm + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass\ + using a simple logpro file. +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/neverrun/testconfig Index: tests/fullrun/tests/neverrun/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/neverrun/testconfig @@ -0,0 +1,4 @@ +[setup] +runscript idontexist + + ADDED tests/fullrun/tests/no_items/testconfig Index: tests/fullrun/tests/no_items/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/no_items/testconfig @@ -0,0 +1,15 @@ +[ezsteps] +listfiles ls + +[items] +FOO + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass \ +but there is an items definition with no items. This should evoke an \ +error. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_1/main.sh Index: tests/fullrun/tests/priority_1/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_1/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_1/testconfig Index: tests/fullrun/tests/priority_1/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_1/testconfig @@ -0,0 +1,17 @@ +[setup] +runscript main.sh + +[requirements] +priority 1 + +[test_meta] +jobgroup sqlite3 +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt + +[triggers] +COMPLETED/ echo $MT_TEST_NAME > $MT_RUN_AREA_HOME/foo ADDED tests/fullrun/tests/priority_10/main.sh Index: tests/fullrun/tests/priority_10/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_10/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_10/testconfig Index: tests/fullrun/tests/priority_10/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_10/testconfig @@ -0,0 +1,13 @@ +[setup] +runscript main.sh + +[requirements] +priority 10 + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_10_waiton_1/main.sh Index: tests/fullrun/tests/priority_10_waiton_1/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_10_waiton_1/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_10_waiton_1/testconfig Index: tests/fullrun/tests/priority_10_waiton_1/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_10_waiton_1/testconfig @@ -0,0 +1,14 @@ +[setup] +runscript main.sh + +[requirements] +priority 10 +waiton priority_1 + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_2/main.sh Index: tests/fullrun/tests/priority_2/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_2/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 5 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_2/testconfig Index: tests/fullrun/tests/priority_2/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_2/testconfig @@ -0,0 +1,16 @@ +[setup] +runscript main.sh + +[requirements] +priority 2 +# runtimelim 1d 1h 1m 10s +runtimelim 20s + +[test_meta] +jobgroup sqlite3 +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_3/README Index: tests/fullrun/tests/priority_3/README ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_3/README @@ -0,0 +1,3 @@ +This test used to look for envfile.txt but that file should NOT have been there. + +By changing to lookithome.log it is possible that an error is masked. ADDED tests/fullrun/tests/priority_3/main.sh Index: tests/fullrun/tests/priority_3/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_3/main.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + echo "
Results$i
Nothing but faux results here!" > results$i.html + $MT_MEGATEST -step step$i :state end :status 0 +done + +# get a previous test +export EZFAILPATH=`$MT_MEGATEST -test-files lookithome.log -target $MT_TARGET :runname $MT_RUNNAME -testpatt ez_fail` +if [[ -e $EZFAILPATH ]];then + echo All good! +else + echo NOT good! + exit 1 +fi + +exit 0 ADDED tests/fullrun/tests/priority_3/testconfig Index: tests/fullrun/tests/priority_3/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_3/testconfig @@ -0,0 +1,15 @@ +[setup] +runscript main.sh + +[requirements] +priority 3 + + +[test_meta] +jobgroup sqlite3 +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_4/main.sh Index: tests/fullrun/tests/priority_4/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_4/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_4/testconfig Index: tests/fullrun/tests/priority_4/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_4/testconfig @@ -0,0 +1,14 @@ +[setup] +runscript main.sh + +[requirements] +priority 4 + +[test_meta] +jobgroup sqlite3 +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_5/main.sh Index: tests/fullrun/tests/priority_5/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_5/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_5/testconfig Index: tests/fullrun/tests/priority_5/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_5/testconfig @@ -0,0 +1,16 @@ +[setup] +runscript main.sh + +[requirements] +priority 5 + +[skip] +prevrunning #t + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_6/main.sh Index: tests/fullrun/tests/priority_6/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_6/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_6/testconfig Index: tests/fullrun/tests/priority_6/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_6/testconfig @@ -0,0 +1,13 @@ +[setup] +runscript main.sh + +[requirements] +priority 6 + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_7/main.sh Index: tests/fullrun/tests/priority_7/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_7/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_7/testconfig Index: tests/fullrun/tests/priority_7/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_7/testconfig @@ -0,0 +1,17 @@ +[setup] +runscript main.sh + +[requirements] +priority 7 + +[skip] +# Run only if this much time since last run of this test +rundelay 10m 5s + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_8/main.sh Index: tests/fullrun/tests/priority_8/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_8/main.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + echo "start step before $i: `date`" + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + echo "start step after $i: `date`" + sleep 2 + echo "end step before $i: `date`" + $MT_MEGATEST -step step$i :state end :status 0 + echo "end step after $i: `date`" +done + +exit 0 ADDED tests/fullrun/tests/priority_8/testconfig Index: tests/fullrun/tests/priority_8/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_8/testconfig @@ -0,0 +1,13 @@ +[setup] +runscript main.sh + +[requirements] +priority 8 + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/priority_9/main.sh Index: tests/fullrun/tests/priority_9/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_9/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep 2 + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/fullrun/tests/priority_9/testconfig Index: tests/fullrun/tests/priority_9/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/priority_9/testconfig @@ -0,0 +1,13 @@ +[setup] +runscript main.sh + +[requirements] +priority 9 + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/runfirst/main.sh Index: tests/fullrun/tests/runfirst/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/runfirst/main.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# (export DISPLAY=:0;xterm) + +# megatest -step wasting_time :state start :status n/a -m "This is a test step comment" +# sleep 20 +# megatest -step wasting_time :state end :status $? + +touch ../I_was_here +mkdir -p $MT_RUN_AREA_HOME/tmp/$USER/$sysname/$fsname/$datapath/$MT_RUNNAME +echo 1 2 3 4 5 > $MT_RUN_AREA_HOME/tmp/$USER/$sysname/$fsname/$datapath/$MT_RUNNAME/the_ans + +$MT_MEGATEST -runstep wasting_time -logpro wasting_time.logpro "sleep 8;echo all done eh?" -m "This is a test step comment" + +$MT_MEGATEST -load-test-data << EOF +foo,bar,1.2,1.9,> +foo,rab,1.0e9,10e9,1e9 +foo,bla,1.2,1.9,< +foo,bal,1.2,1.2,<,,Check for overload +foo,alb,1.2,1.2,<=,Amps,This is the high power circuit test +foo,abl,1.2,1.3,0.1 +foo,bra,1.2,pass,silly stuff +faz,bar,10,8mA,,,"this is a comment" +EOF + +$MT_MEGATEST -load-test-data << EOF +cat, var, val, exp, comp, units, comment, status, type +ameas,iout,1.2,1.9,>,Amps,Comment,,meas +EOF +loadstatus=$? + +if [[ `basename $PWD` == "mustfail" ]];then + $MT_MEGATEST -test-status :state COMPLETED :status FAIL +else + $MT_MEGATEST -test-status :state COMPLETED :status $loadstatus -m "This is a test level comment" :value 10e6 :expected_value 1.1e6 :tol 100e3 :category nada :variable sillyvar :units mFarks :comment "This is the value/expected comment" +fi + +env > envfile.txt + +# $MT_MEGATEST -test-status :state COMPLETED :status FAIL ADDED tests/fullrun/tests/runfirst/testconfig Index: tests/fullrun/tests/runfirst/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/runfirst/testconfig @@ -0,0 +1,24 @@ +[setup] +runscript main.sh + +[pre-launch-env-vars] +# These are set before the test is launched on the originating +# host. This can be used to control remote launch tools, e.g. to +# to choose the target host, select the launch tool etc. +SPECIAL_ENV_VAR override with everything after the first space. + +[items] +SEASON summer winter fall spring + +[itemstable] +BLOCK a b +TOCK 1 2 + +[test_meta] +author matt +owner bob +description This test must\ + be run before the other tests + +tags first,single +reviewed 1/1/1965 ADDED tests/fullrun/tests/runfirst/wasting_time.logpro Index: tests/fullrun/tests/runfirst/wasting_time.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/runfirst/wasting_time.logpro @@ -0,0 +1,15 @@ +;; put stuff here + +;; NOTE: This is not legit logpro code!!! + +;; Test for 0=PASS, 1=WARN, >2 = FAIL + +;; (define season (get-environment-variable "SEASON")) +;; +;; (exit +;; (case (string->symbol season) +;; ((summer) 0) +;; ((winter) 1) +;; ((fall) 2) +;; (else 0))) + ADDED tests/fullrun/tests/singletest/main.sh Index: tests/fullrun/tests/singletest/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/singletest/main.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# megatest -step wasting_time :state start :status n/a -m "This is a test step comment" +# sleep 20 +# megatest -step wasting_time :state end :status $? + +$MT_MEGATEST -runstep wasting_time -logpro wasting_time.logpro "sleep 5;echo alldone" -m "This is a test step comment" + +$MT_MEGATEST -test-status :state COMPLETED :status $? -m "This is a test level comment" -set-toplog the_top_log.html :first_err "This is the first error" ADDED tests/fullrun/tests/singletest/testconfig Index: tests/fullrun/tests/singletest/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/singletest/testconfig @@ -0,0 +1,13 @@ +[setup] +runscript main.sh + +[requirements] +diskspace 1M +memory 1G + +[pre-launch-env-vars] +# These are set before the test is launched on the originating +# host. This can be used to control remote launch tools, e.g. to +# to choose the target host, select the launch tool etc. +SPECIAL_ENV_VAR override with everything after the first space. + ADDED tests/fullrun/tests/singletest/wasting_time.logpro Index: tests/fullrun/tests/singletest/wasting_time.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/singletest/wasting_time.logpro @@ -0,0 +1,15 @@ +;; put stuff here + +;; NOTE: This is not legit logpro code!!! + +;; Test for 0=PASS, 1=WARN, >2 = FAIL + +;; (define season (get-environment-variable "SEASON")) +;; +;; (exit +;; (case (string->symbol season) +;; ((summer) 0) +;; ((winter) 1) +;; ((fall) 2) +;; (else 0))) + ADDED tests/fullrun/tests/singletest2/main.sh Index: tests/fullrun/tests/singletest2/main.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/singletest2/main.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# megatest -step wasting_time :state start :status n/a -m "This is a test step comment" +# sleep 20 +# megatest -step wasting_time :state end :status $? + +$MT_MEGATEST -runstep wasting_time -logpro wasting_time.logpro "sleep 5;echo all done eh?" -m "This is a test step comment" + +$MT_MEGATEST -test-status :state COMPLETED :status $? -m "This is a test level comment" -set-toplog the_top_log.html :first_warn "This is the first warning" ADDED tests/fullrun/tests/singletest2/testconfig Index: tests/fullrun/tests/singletest2/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/singletest2/testconfig @@ -0,0 +1,14 @@ +[setup] +runscript main.sh + +[requirements] +diskspace 1M +memory 1G +waiton singletest + +[pre-launch-env-vars] +# These are set before the test is launched on the originating +# host. This can be used to control remote launch tools, e.g. to +# to choose the target host, select the launch tool etc. +SPECIAL_ENV_VAR override with everything after the first space. + ADDED tests/fullrun/tests/singletest2/wasting_time.logpro Index: tests/fullrun/tests/singletest2/wasting_time.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/singletest2/wasting_time.logpro @@ -0,0 +1,15 @@ +;; put stuff here + +;; NOTE: This is not legit logpro code!!! + +;; Test for 0=PASS, 1=WARN, >2 = FAIL + +;; (define season (get-environment-variable "SEASON")) +;; +;; (exit +;; (case (string->symbol season) +;; ((summer) 0) +;; ((winter) 1) +;; ((fall) 2) +;; (else 0))) + ADDED tests/fullrun/tests/special/testconfig Index: tests/fullrun/tests/special/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/special/testconfig @@ -0,0 +1,8 @@ +[ezsteps] +# calcresults megatest -list-runs $MT_RUNNAME -target $MT_TARGET + +[requirements] +waiton #{rget TESTSTORUN} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +mode toplevel ADDED tests/fullrun/tests/sqlitespeed/runscript.rb Index: tests/fullrun/tests/sqlitespeed/runscript.rb ================================================================== --- /dev/null +++ tests/fullrun/tests/sqlitespeed/runscript.rb @@ -0,0 +1,38 @@ +#! /usr/bin/env ruby + +require "#{ENV['MT_RUN_AREA_HOME']}/../resources/ruby/librunscript.rb" + +# run_record(stepname, cmd) - will record in db if exit code of script was zero or not +run_and_record('create db',"sqlite3 testing.db << EOF\ncreate table if not exists blah(id INTEGER PRIMARY KEY,name TEXT);\n.q\nEOF","") + +if (! File.exists?("../../runfirst/I_was_here")) + puts "ERROR: This test was started before the prerequisites ran!" + system "megatest -test-status :state INCOMPLETE :status FAIL" + exit 1 +end + +# file_size_checker(stepname, filename, minsize, maxsize) - negative means ignore +# file_size_checker('create db','testing.db',100,-1) + +num_records=rand(5) # 0000 +record_step("add #{num_records}","start","n/a") +status=false +(0..num_records).each do |i| + randstring="abc"; + # "a;lskdfja;sdfj;alsdfj;aslfdj;alsfja;lsfdj;alsfja;lsjfd;lasfjl;asdfja;slfj;alsjf;asljf;alsjf;lasdjf;lasjf;lasjf;alsjf;lashflkashflkerhflkdsvnlasldhlfaldf" + # status=system "sqlite3 testing.db \"insert into blah (name) values ('#{randstring}');\"" + system "megatest -step testing :state wrote_junk :status #{num_records}" + sleep(5) + puts "i=#{i}" +end +if status==0 + status='pass' +else + status='fail' +end + +record_step("add #{num_records}","end",status) + + + + ADDED tests/fullrun/tests/sqlitespeed/testconfig Index: tests/fullrun/tests/sqlitespeed/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/sqlitespeed/testconfig @@ -0,0 +1,13 @@ +[setup] +runscript runscript.rb +tags non important,dumb junk + +[requirements] +waiton runfirst + +[items] +MANYITEMS [system (env > envfile.txt;echo aa ab ac ad ae af ag ah ai)] +# BORKED + +[test_meta] +jobgroup sqlite3 ADDED tests/fullrun/tests/test_mt_vars/altvarnotset.logpro Index: tests/fullrun/tests/test_mt_vars/altvarnotset.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/altvarnotset.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/test_mt_vars/altvarnotset.sh Index: tests/fullrun/tests/test_mt_vars/altvarnotset.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/altvarnotset.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +! grep ALT_VAR megatest.sh ADDED tests/fullrun/tests/test_mt_vars/bogousnotset.logpro Index: tests/fullrun/tests/test_mt_vars/bogousnotset.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/bogousnotset.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/test_mt_vars/bogousnotset.sh Index: tests/fullrun/tests/test_mt_vars/bogousnotset.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/bogousnotset.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +! grep BOGOUS megatest.sh ADDED tests/fullrun/tests/test_mt_vars/currentisblah.logpro Index: tests/fullrun/tests/test_mt_vars/currentisblah.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/currentisblah.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/test_mt_vars/currentisblah.sh Index: tests/fullrun/tests/test_mt_vars/currentisblah.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/currentisblah.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +grep -e '^export CURRENT' megatest.sh | grep /tmp/nada ADDED tests/fullrun/tests/test_mt_vars/empty_var.logpro Index: tests/fullrun/tests/test_mt_vars/empty_var.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/empty_var.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/test_mt_vars/empty_var.sh Index: tests/fullrun/tests/test_mt_vars/empty_var.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/empty_var.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +if [ x$EMPTY_VAR != "x" ];then + echo BAD EMPTY VAR! + exit 1 +fi ADDED tests/fullrun/tests/test_mt_vars/eval_vars.sh Index: tests/fullrun/tests/test_mt_vars/eval_vars.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/eval_vars.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if env | grep VARWITHDOLLARSIGNS | grep USER;then + exit 1 # fails! +else + exit 0 # good! +fi ADDED tests/fullrun/tests/test_mt_vars/lookithome.logpro Index: tests/fullrun/tests/test_mt_vars/lookithome.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/lookithome.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/test_mt_vars/lookittmp.logpro Index: tests/fullrun/tests/test_mt_vars/lookittmp.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/lookittmp.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/test_mt_vars/test-path-file.sh Index: tests/fullrun/tests/test_mt_vars/test-path-file.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/test-path-file.sh @@ -0,0 +1,28 @@ +#!/bin/bash + + +# get a previous test +export EZFAILPATH=`$MT_MEGATEST -test-files envfile.txt -target $MT_TARGET :runname $MT_RUNNAME -testpatt runfirst/a%` + +echo "Found |$EZFAILPATH|" + +if [ -e $EZFAILPATH ];then + echo All good! +else + echo NOT good! + exit 1 +fi + +export EZFAILPATH2=`$MT_MEGATEST -test-paths -target $MT_TARGET :runname $MT_RUNNAME -testpatt runfirst/a%` + +echo "Found |$EZFAILPATH2|" + +if [ -e $EZFAILPATH2 ];then + echo All good! +else + echo NOT good! + exit 1 +fi + + +exit 0 ADDED tests/fullrun/tests/test_mt_vars/test-path.logpro Index: tests/fullrun/tests/test_mt_vars/test-path.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/test-path.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/test_mt_vars/testconfig Index: tests/fullrun/tests/test_mt_vars/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/testconfig @@ -0,0 +1,40 @@ +[setup] + +[ezsteps] +lookittmp ls /tmp +lookithome ls /home +# $CURRENT should be /tmp/nada +currentisblah currentisblah.sh + +# $BOGOUS should NOT be set +bogousnotset bogousnotset.sh + +# ALT_VAR should NOT be set +altvarnotset altvarnotset.sh + +# EMPTY_VAR should be an empty string +empty_var empty_var.sh + +# VACKYVAR should be set to a path +vackyvar vackyvar.sh + +# test-path and test-file +test-path test-path-file.sh + +# verify that vars with $ signs get expanded +varwithdollar eval_vars.sh + +[requirements] +waiton runfirst +priority 0 + +[items] +NUMNUM [system cat $MT_RUN_AREA_HOME/tmp/$USER/$sysname/$fsname/$datapath/$MT_RUNNAME/$PREDICTABLE] + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass, no logpro file. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/test_mt_vars/vackyvar.logpro Index: tests/fullrun/tests/test_mt_vars/vackyvar.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/vackyvar.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/test_mt_vars/vackyvar.sh Index: tests/fullrun/tests/test_mt_vars/vackyvar.sh ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/vackyvar.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +grep VACKYVAR megatest.sh | grep fullrun ADDED tests/fullrun/tests/test_mt_vars/varwithdollar.logpro Index: tests/fullrun/tests/test_mt_vars/varwithdollar.logpro ================================================================== --- /dev/null +++ tests/fullrun/tests/test_mt_vars/varwithdollar.logpro @@ -0,0 +1,1 @@ +(expect:error in "LogFileBody" = 0 "a file that should never exist" #/what a dumb filename this is/) ADDED tests/fullrun/tests/testxz/testconfig Index: tests/fullrun/tests/testxz/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/testxz/testconfig @@ -0,0 +1,15 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +listfiles ls + +# Test requirements are specified here +[requirements] +waiton blocktestxz + +# test_meta is a section for storing additional data on your test +[test_meta] +author mrwellan +owner mrwellan +description This test should never get run due to blocktestxz failing +tags tagone,tagtwo +reviewed never ADDED tests/fullrun/tests/wait_no_items1/testconfig Index: tests/fullrun/tests/wait_no_items1/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/wait_no_items1/testconfig @@ -0,0 +1,17 @@ +[ezsteps] +listfiles ls + +[requirements] +waiton no_items + +[items] + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass \ +but there is an items definition with no items. This should evoke an \ +error. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/wait_no_items2/testconfig Index: tests/fullrun/tests/wait_no_items2/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/wait_no_items2/testconfig @@ -0,0 +1,17 @@ +[ezsteps] +listfiles ls + +[requirements] +waiton wait_no_items1 + +[items] + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass \ +but there is an items definition with no items. This should evoke an \ +error. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/wait_no_items3/testconfig Index: tests/fullrun/tests/wait_no_items3/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/wait_no_items3/testconfig @@ -0,0 +1,17 @@ +[ezsteps] +listfiles ls + +[requirements] +waiton wait_no_items2 + +[items] + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass \ +but there is an items definition with no items. This should evoke an \ +error. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/fullrun/tests/wait_no_items4/testconfig Index: tests/fullrun/tests/wait_no_items4/testconfig ================================================================== --- /dev/null +++ tests/fullrun/tests/wait_no_items4/testconfig @@ -0,0 +1,17 @@ +[ezsteps] +listfiles ls + +[requirements] +waiton wait_no_items3 + +[items] + +[test_meta] +author matt +owner bob +description This test runs a single ezstep which is expected to pass \ +but there is an items definition with no items. This should evoke an \ +error. + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/installall/config/megatest.config.dat Index: tests/installall/config/megatest.config.dat ================================================================== --- /dev/null +++ tests/installall/config/megatest.config.dat @@ -0,0 +1,1 @@ +../megatest.config ADDED tests/installall/config/runconfigs.config.dat Index: tests/installall/config/runconfigs.config.dat ================================================================== --- /dev/null +++ tests/installall/config/runconfigs.config.dat @@ -0,0 +1,1 @@ +../runconfigs.config ADDED tests/installall/config/sheet-names.cfg Index: tests/installall/config/sheet-names.cfg ================================================================== --- /dev/null +++ tests/installall/config/sheet-names.cfg @@ -0,0 +1,2 @@ +megatest.config +runconfigs.config ADDED tests/installall/config/sxml/_sheets.sxml Index: tests/installall/config/sxml/_sheets.sxml ================================================================== --- /dev/null +++ tests/installall/config/sxml/_sheets.sxml @@ -0,0 +1,51 @@ +((@ (http://www.w3.org/2001/XMLSchema-instance:schemaLocation + "http://www.gnumeric.org/v9.xsd")) + (http://www.gnumeric.org/v10.dtd:Version + (@ (Minor "17") (Major "10") (Full "1.10.17") (Epoch "1"))) + (http://www.gnumeric.org/v10.dtd:Attributes + (http://www.gnumeric.org/v10.dtd:Attribute + (http://www.gnumeric.org/v10.dtd:type "4") + (http://www.gnumeric.org/v10.dtd:name + "WorkbookView::show_horizontal_scrollbar") + (http://www.gnumeric.org/v10.dtd:value "TRUE")) + (http://www.gnumeric.org/v10.dtd:Attribute + (http://www.gnumeric.org/v10.dtd:type "4") + (http://www.gnumeric.org/v10.dtd:name + "WorkbookView::show_vertical_scrollbar") + (http://www.gnumeric.org/v10.dtd:value "TRUE")) + (http://www.gnumeric.org/v10.dtd:Attribute + (http://www.gnumeric.org/v10.dtd:type "4") + (http://www.gnumeric.org/v10.dtd:name "WorkbookView::show_notebook_tabs") + (http://www.gnumeric.org/v10.dtd:value "TRUE")) + (http://www.gnumeric.org/v10.dtd:Attribute + (http://www.gnumeric.org/v10.dtd:type "4") + (http://www.gnumeric.org/v10.dtd:name "WorkbookView::do_auto_completion") + (http://www.gnumeric.org/v10.dtd:value "TRUE")) + (http://www.gnumeric.org/v10.dtd:Attribute + (http://www.gnumeric.org/v10.dtd:type "4") + (http://www.gnumeric.org/v10.dtd:name "WorkbookView::is_protected") + (http://www.gnumeric.org/v10.dtd:value "FALSE"))) + (urn:oasis:names:tc:opendocument:xmlns:office:1.0:document-meta + (@ (urn:oasis:names:tc:opendocument:xmlns:office:1.0:version "1.2")) + (urn:oasis:names:tc:opendocument:xmlns:office:1.0:meta + (http://purl.org/dc/elements/1.1/:date "2013-07-21T23:45:07Z") + (urn:oasis:names:tc:opendocument:xmlns:meta:1.0:creation-date + "2013-07-21T23:42:35Z"))) + (http://www.gnumeric.org/v10.dtd:Calculation + (@ (MaxIterations "100") + (ManualRecalc "0") + (IterationTolerance "0.001") + (FloatRadix "2") + (FloatDigits "53") + (EnableIteration "1"))) + (http://www.gnumeric.org/v10.dtd:SheetNameIndex + (http://www.gnumeric.org/v10.dtd:SheetName + (@ (http://www.gnumeric.org/v10.dtd:Rows "65536") + (http://www.gnumeric.org/v10.dtd:Cols "256")) + "megatest.config") + (http://www.gnumeric.org/v10.dtd:SheetName + (@ (http://www.gnumeric.org/v10.dtd:Rows "65536") + (http://www.gnumeric.org/v10.dtd:Cols "256")) + "runconfigs.config")) + (http://www.gnumeric.org/v10.dtd:Geometry (@ (Width "1440") (Height "647"))) + (http://www.gnumeric.org/v10.dtd:UIData (@ (SelectedTab "0")))) ADDED tests/installall/config/sxml/_workbook.sxml Index: tests/installall/config/sxml/_workbook.sxml ================================================================== --- /dev/null +++ tests/installall/config/sxml/_workbook.sxml @@ -0,0 +1,1 @@ +(*TOP* (*PI* xml "version=\"1.0\" encoding=\"UTF-8\"")) ADDED tests/installall/config/sxml/megatest.config.sxml Index: tests/installall/config/sxml/megatest.config.sxml ================================================================== --- /dev/null +++ tests/installall/config/sxml/megatest.config.sxml @@ -0,0 +1,108 @@ +(http://www.gnumeric.org/v10.dtd:Sheet + (@ (Visibility "GNM_SHEET_VISIBILITY_VISIBLE") + (OutlineSymbolsRight "1") + (OutlineSymbolsBelow "1") + (HideZero "0") + (HideRowHeader "0") + (HideGrid "0") + (HideColHeader "0") + (GridColor "0:0:0") + (DisplayOutlines "1") + (DisplayFormulas "0")) + (http://www.gnumeric.org/v10.dtd:MaxCol "5") + (http://www.gnumeric.org/v10.dtd:MaxRow "7") + (http://www.gnumeric.org/v10.dtd:Zoom "1") + (http://www.gnumeric.org/v10.dtd:Names + (http://www.gnumeric.org/v10.dtd:Name + (http://www.gnumeric.org/v10.dtd:name "Print_Area") + (http://www.gnumeric.org/v10.dtd:value "#REF!") + (http://www.gnumeric.org/v10.dtd:position "A1")) + (http://www.gnumeric.org/v10.dtd:Name + (http://www.gnumeric.org/v10.dtd:name "Sheet_Title") + (http://www.gnumeric.org/v10.dtd:value "\"megatest.config\"") + (http://www.gnumeric.org/v10.dtd:position "A1"))) + (http://www.gnumeric.org/v10.dtd:PrintInformation + (http://www.gnumeric.org/v10.dtd:Margins + (http://www.gnumeric.org/v10.dtd:top (@ (PrefUnit "mm") (Points "120"))) + (http://www.gnumeric.org/v10.dtd:bottom + (@ (PrefUnit "mm") (Points "120"))) + (http://www.gnumeric.org/v10.dtd:left (@ (PrefUnit "mm") (Points "72"))) + (http://www.gnumeric.org/v10.dtd:right (@ (PrefUnit "mm") (Points "72"))) + (http://www.gnumeric.org/v10.dtd:header + (@ (PrefUnit "mm") (Points "72"))) + (http://www.gnumeric.org/v10.dtd:footer + (@ (PrefUnit "mm") (Points "72")))) + (http://www.gnumeric.org/v10.dtd:Scale + (@ (type "percentage") (percentage "100"))) + (http://www.gnumeric.org/v10.dtd:vcenter (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:hcenter (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:grid (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:even_if_only_styles (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:monochrome (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:draft (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:titles (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:do_not_print (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:print_range (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:order "d_then_r") + (http://www.gnumeric.org/v10.dtd:orientation "portrait") + (http://www.gnumeric.org/v10.dtd:Header + (@ (Right "") (Middle "&[TAB]") (Left ""))) + (http://www.gnumeric.org/v10.dtd:Footer + (@ (Right "") (Middle "Page &[PAGE]") (Left ""))) + (http://www.gnumeric.org/v10.dtd:paper "na_letter") + (http://www.gnumeric.org/v10.dtd:comments "in_place") + (http://www.gnumeric.org/v10.dtd:errors "as_displayed")) + (http://www.gnumeric.org/v10.dtd:Styles + (http://www.gnumeric.org/v10.dtd:StyleRegion + (@ (startRow "0") (startCol "0") (endRow "65535") (endCol "255")) + (http://www.gnumeric.org/v10.dtd:Style + (@ (WrapText "0") + (VAlign "2") + (ShrinkToFit "0") + (Shade "0") + (Rotation "0") + (PatternColor "0:0:0") + (Locked "1") + (Indent "0") + (Hidden "0") + (HAlign "1") + (Format "General") + (Fore "0:0:0") + (Back "FFFF:FFFF:FFFF")) + (http://www.gnumeric.org/v10.dtd:Font + (@ (Unit "10") + (Underline "0") + (StrikeThrough "0") + (Script "0") + (Italic "0") + (Bold "0")) + "Sans")))) + (http://www.gnumeric.org/v10.dtd:Cols + (@ (DefaultSizePts "48")) + (http://www.gnumeric.org/v10.dtd:ColInfo + (@ (Unit "112.5") (No "0") (HardSize "1"))) + (http://www.gnumeric.org/v10.dtd:ColInfo (@ (Unit "48") (No "1"))) + (http://www.gnumeric.org/v10.dtd:ColInfo + (@ (Unit "63.75") (No "2") (HardSize "1"))) + (http://www.gnumeric.org/v10.dtd:ColInfo (@ (Unit "48") (No "3"))) + (http://www.gnumeric.org/v10.dtd:ColInfo + (@ (Unit "86.25") (No "4") (HardSize "1"))) + (http://www.gnumeric.org/v10.dtd:ColInfo (@ (Unit "48") (No "5")))) + (http://www.gnumeric.org/v10.dtd:Rows + (@ (DefaultSizePts "12.75")) + (http://www.gnumeric.org/v10.dtd:RowInfo + (@ (Unit "12.75") (No "0") (Count "8")))) + (http://www.gnumeric.org/v10.dtd:Selections + (@ (CursorRow "0") (CursorCol "0")) + (http://www.gnumeric.org/v10.dtd:Selection + (@ (startRow "0") (startCol "0") (endRow "0") (endCol "0")))) + (http://www.gnumeric.org/v10.dtd:SheetLayout (@ (TopLeft "A1"))) + (http://www.gnumeric.org/v10.dtd:Solver + (@ (ProgramR "0") + (ProblemType "0") + (NonNeg "1") + (ModelType "0") + (MaxTime "60") + (MaxIter "1000") + (Discr "0") + (AutoScale "0")))) ADDED tests/installall/config/sxml/runconfigs.config.sxml Index: tests/installall/config/sxml/runconfigs.config.sxml ================================================================== --- /dev/null +++ tests/installall/config/sxml/runconfigs.config.sxml @@ -0,0 +1,111 @@ +(http://www.gnumeric.org/v10.dtd:Sheet + (@ (Visibility "GNM_SHEET_VISIBILITY_VISIBLE") + (OutlineSymbolsRight "1") + (OutlineSymbolsBelow "1") + (HideZero "0") + (HideRowHeader "0") + (HideGrid "0") + (HideColHeader "0") + (GridColor "0:0:0") + (DisplayOutlines "1") + (DisplayFormulas "0")) + (http://www.gnumeric.org/v10.dtd:MaxCol "3") + (http://www.gnumeric.org/v10.dtd:MaxRow "7") + (http://www.gnumeric.org/v10.dtd:Zoom "1") + (http://www.gnumeric.org/v10.dtd:Names + (http://www.gnumeric.org/v10.dtd:Name + (http://www.gnumeric.org/v10.dtd:name "Print_Area") + (http://www.gnumeric.org/v10.dtd:value "#REF!") + (http://www.gnumeric.org/v10.dtd:position "A1")) + (http://www.gnumeric.org/v10.dtd:Name + (http://www.gnumeric.org/v10.dtd:name "Sheet_Title") + (http://www.gnumeric.org/v10.dtd:value "\"runconfigs.config\"") + (http://www.gnumeric.org/v10.dtd:position "A1"))) + (http://www.gnumeric.org/v10.dtd:PrintInformation + (http://www.gnumeric.org/v10.dtd:Margins + (http://www.gnumeric.org/v10.dtd:top (@ (PrefUnit "mm") (Points "120"))) + (http://www.gnumeric.org/v10.dtd:bottom + (@ (PrefUnit "mm") (Points "120"))) + (http://www.gnumeric.org/v10.dtd:left (@ (PrefUnit "mm") (Points "72"))) + (http://www.gnumeric.org/v10.dtd:right (@ (PrefUnit "mm") (Points "72"))) + (http://www.gnumeric.org/v10.dtd:header + (@ (PrefUnit "mm") (Points "72"))) + (http://www.gnumeric.org/v10.dtd:footer + (@ (PrefUnit "mm") (Points "72")))) + (http://www.gnumeric.org/v10.dtd:Scale + (@ (type "percentage") (percentage "100"))) + (http://www.gnumeric.org/v10.dtd:vcenter (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:hcenter (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:grid (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:even_if_only_styles (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:monochrome (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:draft (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:titles (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:do_not_print (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:print_range (@ (value "0"))) + (http://www.gnumeric.org/v10.dtd:order "d_then_r") + (http://www.gnumeric.org/v10.dtd:orientation "portrait") + (http://www.gnumeric.org/v10.dtd:Header + (@ (Right "") (Middle "&[TAB]") (Left ""))) + (http://www.gnumeric.org/v10.dtd:Footer + (@ (Right "") (Middle "Page &[PAGE]") (Left ""))) + (http://www.gnumeric.org/v10.dtd:paper "na_letter") + (http://www.gnumeric.org/v10.dtd:comments "in_place") + (http://www.gnumeric.org/v10.dtd:errors "as_displayed")) + (http://www.gnumeric.org/v10.dtd:Styles + (http://www.gnumeric.org/v10.dtd:StyleRegion + (@ (startRow "0") (startCol "0") (endRow "65535") (endCol "255")) + (http://www.gnumeric.org/v10.dtd:Style + (@ (WrapText "0") + (VAlign "2") + (ShrinkToFit "0") + (Shade "0") + (Rotation "0") + (PatternColor "0:0:0") + (Locked "1") + (Indent "0") + (Hidden "0") + (HAlign "1") + (Format "General") + (Fore "0:0:0") + (Back "FFFF:FFFF:FFFF")) + (http://www.gnumeric.org/v10.dtd:Font + (@ (Unit "10") + (Underline "0") + (StrikeThrough "0") + (Script "0") + (Italic "0") + (Bold "0")) + "Sans")))) + (http://www.gnumeric.org/v10.dtd:Cols + (@ (DefaultSizePts "48")) + (http://www.gnumeric.org/v10.dtd:ColInfo + (@ (Unit "108.8") (No "0") (HardSize "1"))) + (http://www.gnumeric.org/v10.dtd:ColInfo + (@ (Unit "97.5") (No "1") (HardSize "1"))) + (http://www.gnumeric.org/v10.dtd:ColInfo + (@ (Unit "100.5") (No "2") (HardSize "1") (Count "2")))) + (http://www.gnumeric.org/v10.dtd:Rows + (@ (DefaultSizePts "12.75")) + (http://www.gnumeric.org/v10.dtd:RowInfo + (@ (Unit "13.5") (No "0") (Count "2"))) + (http://www.gnumeric.org/v10.dtd:RowInfo (@ (Unit "12.75") (No "2"))) + (http://www.gnumeric.org/v10.dtd:RowInfo + (@ (Unit "13.5") (No "3") (Count "2"))) + (http://www.gnumeric.org/v10.dtd:RowInfo (@ (Unit "12.75") (No "5"))) + (http://www.gnumeric.org/v10.dtd:RowInfo (@ (Unit "13.5") (No "6"))) + (http://www.gnumeric.org/v10.dtd:RowInfo (@ (Unit "12.75") (No "7")))) + (http://www.gnumeric.org/v10.dtd:Selections + (@ (CursorRow "7") (CursorCol "3")) + (http://www.gnumeric.org/v10.dtd:Selection + (@ (startRow "7") (startCol "3") (endRow "7") (endCol "3")))) + (http://www.gnumeric.org/v10.dtd:SheetLayout (@ (TopLeft "A1"))) + (http://www.gnumeric.org/v10.dtd:Solver + (@ (ProgramR "0") + (ProblemType "0") + (NonNeg "1") + (ModelType "0") + (MaxTime "60") + (MaxIter "1000") + (Discr "0") + (AutoScale "0")))) ADDED tests/installall/configs/chicken-4.8.0.4.config Index: tests/installall/configs/chicken-4.8.0.4.config ================================================================== --- /dev/null +++ tests/installall/configs/chicken-4.8.0.4.config @@ -0,0 +1,1 @@ +CHICKEN_URL http://code.call-cc.org/releases/4.8.0/chicken-4.8.0.4.tar.gz ADDED tests/installall/configs/chicken-4.8.1.config Index: tests/installall/configs/chicken-4.8.1.config ================================================================== --- /dev/null +++ tests/installall/configs/chicken-4.8.1.config @@ -0,0 +1,1 @@ +CHICKEN_URL http://code.call-cc.org/dev-snapshots/2013/01/04/chicken-4.8.1.tar.gz ADDED tests/installall/megatest.config Index: tests/installall/megatest.config ================================================================== --- /dev/null +++ tests/installall/megatest.config @@ -0,0 +1,24 @@ +[fields] +CHICKEN_VERSION TEXT +MEGATEST_VERSION TEXT +IUPMODE TEXT +BUILD_TAG TEXT + +[setup] +max_concurrent_jobs 6 +linktree #{getenv MT_RUN_AREA_HOME}/links +testcopycmd cp --remove-destination -rsv TEST_SRC_PATH/. TEST_TARG_PATH/. >> TEST_TARG_PATH/mt_launch.log 2>> TEST_TARG_PATH/mt_launch.log + +[jobtools] +useshell yes +launcher nbfind + +[env-override] +EXAMPLE_VAR example value + +[server] +port 9080 + +[disks] +disk0 #{getenv MT_RUN_AREA_HOME}/runs + ADDED tests/installall/runconfigs.config Index: tests/installall/runconfigs.config ================================================================== --- /dev/null +++ tests/installall/runconfigs.config @@ -0,0 +1,38 @@ +[.............] +# +# [CHICKEN_VERSION/MEGATEST_VERSION/IUPMODE/PLATFORM/BUILD_TAG] +# + +[default] +ALLTESTS see this variable +PREFIX #{getenv MT_RUN_AREA_HOME}/#{getenv BUILD_TAG}/#{getenv MT_RUNNAME} +DOWNLOADS #{getenv MT_RUN_AREA_HOME}/downloads +IUPLIB 26g4 +PLATFORM linux +LOGPRO_VERSION v1.05 +BUILDSQLITE yes +SQLITE3_VERSION 3071401 +ZEROMQ_VERSION 2.2.0 +logpro_VERSION v1.08 +stml_VERSION v0.901 +megatest_VERSION v1.5511 + +[include configs/hicken-#{getenv CHICKEN_VERSION}.config] + +# Currently must have at least one variable in a section +[4.8.0/trunk/bin/std] +IUP_VERSION na + +[4.8.0.4/trunk/src/std] +CHICKEN_URL http://code.call-cc.org/releases/4.8.0/chicken-4.8.0.4.tar.gz +IUP_VERSION na + +[4.8.1/trunk/src/std] +IUP_VERSION na + +[4.8.0/v1.5508/opt] +IUP_VERSION na +PREFIX /opt/chicken/4.8.0 + +[4.8.0/trunk/centos5.7vm] +BUILDSQLITE no ADDED tests/installall/tests/canvas-draw/install.logpro Index: tests/installall/tests/canvas-draw/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/canvas-draw/install.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/canvas-draw/install.sh Index: tests/installall/tests/canvas-draw/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/canvas-draw/install.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/canvas-draw/testconfig Index: tests/installall/tests/canvas-draw/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/canvas-draw/testconfig @@ -0,0 +1,18 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +install install.sh + +# Test requirements are specified here +[requirements] +waiton iuplib setup + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install the canvas-draw egg +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/chicken/compile.logpro Index: tests/installall/tests/chicken/compile.logpro ================================================================== --- /dev/null +++ tests/installall/tests/chicken/compile.logpro @@ -0,0 +1,10 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Leaving directory ..." #/Leaving directory/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:ignore in "LogFileBody" >= 0 "Ignore HAVE_STRERROR" #/HAVE_STRERROR/) + +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/chicken/compile.sh Index: tests/installall/tests/chicken/compile.sh ================================================================== --- /dev/null +++ tests/installall/tests/chicken/compile.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +cd chicken-${CHICKEN_VERSION} +make PLATFORM=${PLATFORM} PREFIX=${PREFIX} ADDED tests/installall/tests/chicken/download.logpro Index: tests/installall/tests/chicken/download.logpro ================================================================== --- /dev/null +++ tests/installall/tests/chicken/download.logpro @@ -0,0 +1,11 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "README file must be seen" #/README$/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! + +(expect:ignore in "LogFileBody" >= 0 "Ignore error flagged by finalizer-error-test" #/\w+-error/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/chicken/download.sh Index: tests/installall/tests/chicken/download.sh ================================================================== --- /dev/null +++ tests/installall/tests/chicken/download.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +if [ ! -e ${DOWNLOADS}/chicken-${CHICKEN_VERSION}.tar.gz ]; then + if [ "${CHICKEN_URL}" == "" ]; then + CHICKEN_URL=http://code.call-cc.org/releases/${CHICKEN_VERSION}/chicken-${CHICKEN_VERSION}.tar.gz + fi + echo "Downloading $CHICKEN_URL" + (cd ${DOWNLOADS};wget ${CHICKEN_URL}) +fi + +ls -l ${DOWNLOADS}/chicken-${CHICKEN_VERSION}.tar.gz + +tar xfvz ${DOWNLOADS}/chicken-${CHICKEN_VERSION}.tar.gz + +ls -l chicken-${CHICKEN_VERSION} ADDED tests/installall/tests/chicken/install.logpro Index: tests/installall/tests/chicken/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/chicken/install.logpro @@ -0,0 +1,11 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Leaving directory" #/Leaving directory/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! + +(expect:ignore in "LogFileBody" >= 0 "Ignore error in some filenames" #/\w+-errors/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/chicken/install.sh Index: tests/installall/tests/chicken/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/chicken/install.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh +# source $PREFIX + +cd chicken-${CHICKEN_VERSION} +make PLATFORM=${PLATFORM} PREFIX=${PREFIX} install + +ls -l ${PREFIX}/bin ADDED tests/installall/tests/chicken/testconfig Index: tests/installall/tests/chicken/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/chicken/testconfig @@ -0,0 +1,22 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +download download.sh +compile compile.sh +install install.sh + +# Test requirements are specified here +[requirements] +waiton setup +# priority 10 + +# Iteration for your tests are controlled by the items section +[items] +# CHICKEN_VERSION 4.8.0 + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Download and install chicken scheme +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/eggs/install.logpro Index: tests/installall/tests/eggs/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/eggs/install.logpro @@ -0,0 +1,9 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Last thing done is chmod ..." #/chmod /) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" >= 0 "Ignore someword-errors" #/\w+-error/) +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/eggs/install.sh Index: tests/installall/tests/eggs/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/eggs/install.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +$PREFIX/bin/chicken-install $PROX $EGG_NAME + ADDED tests/installall/tests/eggs/testconfig Index: tests/installall/tests/eggs/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/eggs/testconfig @@ -0,0 +1,20 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +install install.sh + +# Test requirements are specified here +[requirements] +waiton chicken setup +priority 9 + +# Iteration for your tests are controlled by the items section +[items] +EGG_NAME matchable readline apropos base64 regex-literals format regex-case test coops trace csv dot-locking posix-utils posix-extras directory-utils hostinfo tcp-server rpc csv-xml fmt json md5 ssax sxml-serializer sxml-modifications salmonella sql-de-lite postgresql + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Download and install eggs with no significant prerequisites +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/ffcall/compile.logpro Index: tests/installall/tests/ffcall/compile.logpro ================================================================== --- /dev/null +++ tests/installall/tests/ffcall/compile.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Leaving directory" #/Leaving directory/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/ffcall/compile.sh Index: tests/installall/tests/ffcall/compile.sh ================================================================== --- /dev/null +++ tests/installall/tests/ffcall/compile.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +cd ffcall +./configure --prefix=${PREFIX} --enable-shared +make ADDED tests/installall/tests/ffcall/download.logpro Index: tests/installall/tests/ffcall/download.logpro ================================================================== --- /dev/null +++ tests/installall/tests/ffcall/download.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "VERSION" #/ VERSION/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/ffcall/download.sh Index: tests/installall/tests/ffcall/download.sh ================================================================== --- /dev/null +++ tests/installall/tests/ffcall/download.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +if ! [[ -e ${DOWNLOADS}/ffcall.tar.gz ]] ; then + (cd ${DOWNLOADS};wget http://www.kiatoa.com/matt/iup/ffcall.tar.gz ) +fi + +tar xfvz ${DOWNLOADS}/ffcall.tar.gz + +ls -l ffcall ADDED tests/installall/tests/ffcall/install.logpro Index: tests/installall/tests/ffcall/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/ffcall/install.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Leaving directory" #/Leaving directory/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/ffcall/install.sh Index: tests/installall/tests/ffcall/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/ffcall/install.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +cd ffcall +make install ADDED tests/installall/tests/ffcall/testconfig Index: tests/installall/tests/ffcall/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/ffcall/testconfig @@ -0,0 +1,20 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +download download.sh +compile compile.sh +install install.sh + +# Test requirements are specified here +[requirements] +waiton setup + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install the ffcall library +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/iup/install.logpro Index: tests/installall/tests/iup/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iup/install.logpro @@ -0,0 +1,9 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "chmod is roughly last thing that happens" #/chmod /) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" >= 0 "Ignore setup-error-handling" #/\w+-error/) +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iup/install.sh Index: tests/installall/tests/iup/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/iup/install.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh +# source $PREFIX/setup-chicken4x.sh + +export CSCLIBS=`echo $LD_LIBRARY_PATH | sed 's/:/ -L/g'` +CSC_OPTIONS="-I$PREFIX/include -L$CSCLIBS" $PREFIX/bin/chicken-install $PROX -D no-library-checks -feature disable-iup-web iup +# CSC_OPTIONS="-I$PREFIX/include -L$CSCLIBS" $CHICKEN_INSTALL $PROX -D no-library-checks -feature disable-iup-web -deploy -prefix $DEPLOYTARG iup +# iup:1.0.2 +CSC_OPTIONS="-I$PREFIX/include -L$CSCLIBS" $PREFIX/bin/chicken-install $PROX -D no-library-checks canvas-draw +# CSC_OPTIONS="-I$PREFIX/include -L$CSCLIBS" $CHICKEN_INSTALL $PROX -D no-library-checks -deploy -prefix $DEPLOYTARG canvas-draw ADDED tests/installall/tests/iup/testconfig Index: tests/installall/tests/iup/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/iup/testconfig @@ -0,0 +1,18 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +install install.sh + +# Test requirements are specified here +[requirements] +waiton iup#{getenv IUPMODE}lib tougheggs + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install iup egg +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/iupbinlib/compile.logpro Index: tests/installall/tests/iupbinlib/compile.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/compile.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iupbinlib/compile.sh Index: tests/installall/tests/iupbinlib/compile.sh ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/compile.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/iupbinlib/download.logpro Index: tests/installall/tests/iupbinlib/download.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/download.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "README file should show up" #/README/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iupbinlib/download.sh Index: tests/installall/tests/iupbinlib/download.sh ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/download.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh +# source $PREFIX/setup-chicken4x.sh + +if [[ `uname -a | grep x86_64` == "" ]]; then + export ARCHSIZE='' +else + export ARCHSIZE=64_ +fi + # export files="cd-5.4.1_Linux${IUPLIB}_lib.tar.gz im-3.6.3_Linux${IUPLIB}_lib.tar.gz iup-3.5_Linux${IUPLIB}_lib.tar.gz" +if [[ x$USEOLDIUP == "x" ]];then + export files="cd-5.5.1_Linux${IUPLIB}_${ARCHSIZE}lib.tar.gz im-3.8_Linux${IUPLIB}_${ARCHSIZE}lib.tar.gz iup-3.6_Linux${IUPLIB}_${ARCHSIZE}lib.tar.gz" +else + echo WARNING: Using old IUP libraries + export files="cd-5.4.1_Linux${IUPLIB}_${ARCHSIZE}lib.tar.gz im-3.6.3_Linux${IUPLIB}_${ARCHSIZE}lib.tar.gz iup-3.5_Linux${IUPLIB}_${ARCHSIZE}lib.tar.gz" +fi + +mkdir -p $PREFIX/iuplib +for a in `echo $files` ; do + if ! [[ -e ${DOWNLOADS}/$a ]] ; then + (cd ${DOWNLOADS};wget http://www.kiatoa.com/matt/iup/$a) + fi + echo Untarring $a into $PREFIX/lib + (cd $PREFIX/lib;tar xfvz ${DOWNLOADS}/$a;mv include/* ../include) +done + ADDED tests/installall/tests/iupbinlib/install.logpro Index: tests/installall/tests/iupbinlib/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/install.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iupbinlib/install.sh Index: tests/installall/tests/iupbinlib/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/install.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/iupbinlib/testconfig Index: tests/installall/tests/iupbinlib/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/testconfig @@ -0,0 +1,18 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +download download.sh + +# Test requirements are specified here +[requirements] +waiton ffcall setup + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install the iup library if it is not already installed +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/iupbinlib/untar.logpro Index: tests/installall/tests/iupbinlib/untar.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/untar.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iupbinlib/untar.sh Index: tests/installall/tests/iupbinlib/untar.sh ================================================================== --- /dev/null +++ tests/installall/tests/iupbinlib/untar.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/iupsrclib/cd.logpro Index: tests/installall/tests/iupsrclib/cd.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/cd.logpro @@ -0,0 +1,3 @@ +(expect:ignore in "LogFileBody" >= 0 "Ignore these binary operator errors for now" #/error: missing binary operator/) + +(load "compile.logpro") ADDED tests/installall/tests/iupsrclib/compile.logpro Index: tests/installall/tests/iupsrclib/compile.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/compile.logpro @@ -0,0 +1,12 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Completed signature" #/(Dynamic Library.*Done|Leaving directory|Nothing to be done)/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" >= 0 "Ignore files with error in name" #/error.[ch]/) +(expect:ignore in "LogFileBody" >= 0 "Ignore files with errors in name" #/errors.[ch]/) +(expect:ignore in "LogFileBody" >= 0 "Ignore files with warn in name" #/warning.[ch]/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iupsrclib/compile.sh Index: tests/installall/tests/iupsrclib/compile.sh ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/compile.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# Run your step here + +pkg=$1 + +source $PREFIX/buildsetup.sh + +export LUA_SUFFIX= +export LUA_INC=$MT_TEST_RUN_DIR/lua52/include + +if [[ $pkg == "lua52" ]]; then + (cd $pkg/src;make $PLATFORM) +else + (cd $pkg/src;make) +fi + ADDED tests/installall/tests/iupsrclib/download.logpro Index: tests/installall/tests/iupsrclib/download.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/download.logpro @@ -0,0 +1,13 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "README file should show up" #/README/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! + +(expect:ignore in "LogFileBody" >= 0 "Ignore files with error in name" #/error.[ch]/) +(expect:ignore in "LogFileBody" >= 0 "Ignore files with errors in name" #/errors.[ch]/) +(expect:ignore in "LogFileBody" >= 0 "Ignore files with warn in name" #/warning.[ch]/) + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iupsrclib/download.sh Index: tests/installall/tests/iupsrclib/download.sh ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/download.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh + +mkdir -p $PREFIX/iuplib +for a in cd-5.6.1_Sources.tar.gz im-3.8.1_Sources.tar.gz iup-3.8_Sources.tar.gz lua-5.2.1_Sources.tar.gz; do + if ! [[ -e ${DOWNLOADS}/$a ]] ; then + (cd ${DOWNLOADS};wget http://www.kiatoa.com/matt/iup/$a) + fi + tar xfvz ${DOWNLOADS}/$a +done + +find . -type d -exec chmod ug+x {} \; ADDED tests/installall/tests/iupsrclib/im.logpro Index: tests/installall/tests/iupsrclib/im.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/im.logpro @@ -0,0 +1,1 @@ +(load "compile.logpro") ADDED tests/installall/tests/iupsrclib/install.logpro Index: tests/installall/tests/iupsrclib/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/install.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iupsrclib/install.sh Index: tests/installall/tests/iupsrclib/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/install.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh + +# The so files +cp -f im/lib/Linux26g4/*.so $PREFIX/lib +cp -f cd/lib/Linux26g4/*.so $PREFIX/lib +cp -f iup/lib/Linux26g4/*.so $PREFIX/lib + +# The development files +mkdir -p $PREFIX/include/im +cp -fR im/include/*.h $PREFIX/include/im +cp -f im/lib/Linux26g4/*.a $PREFIX/lib + +mkdir -p $PREFIX/include/cd +cp -f cd/include/*.h $PREFIX/include/cd +cp -f cd/lib/Linux26g4/*.a $PREFIX/lib + +mkdir -p /usr/include/iup +cp -f iup/include/*.h $PREFIX/include/iup +cp -f iup/lib/Linux26g4/*.a $PREFIX/lib ADDED tests/installall/tests/iupsrclib/iup.logpro Index: tests/installall/tests/iupsrclib/iup.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/iup.logpro @@ -0,0 +1,3 @@ +(expect:ignore in "LogFileBody" >= 0 "Ignore these binary operator errors for now" #/error: missing binary operator/ expires: "10/10/2013") + +(load "compile.logpro") ADDED tests/installall/tests/iupsrclib/lua.logpro Index: tests/installall/tests/iupsrclib/lua.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/lua.logpro @@ -0,0 +1,1 @@ +(load "compile.logpro") ADDED tests/installall/tests/iupsrclib/testconfig Index: tests/installall/tests/iupsrclib/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/testconfig @@ -0,0 +1,22 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +download download.sh +lua compile.sh lua52 +im compile.sh im +cd compile.sh cd +iup compile.sh iup + +# Test requirements are specified here +[requirements] +waiton ffcall setup + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install the iup library if it is not already installed +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/iupsrclib/untar.logpro Index: tests/installall/tests/iupsrclib/untar.logpro ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/untar.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/iupsrclib/untar.sh Index: tests/installall/tests/iupsrclib/untar.sh ================================================================== --- /dev/null +++ tests/installall/tests/iupsrclib/untar.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/mmisc/clone.logpro Index: tests/installall/tests/mmisc/clone.logpro ================================================================== --- /dev/null +++ tests/installall/tests/mmisc/clone.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Output from fossil" #/^repository:\s+/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/i) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/mmisc/clone.sh Index: tests/installall/tests/mmisc/clone.sh ================================================================== --- /dev/null +++ tests/installall/tests/mmisc/clone.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh + +fossil clone http://www.kiatoa.com/fossils/$FSLPKG $FSLPKG.fossil + +mkdir src +cd src +fossil open ../$FSLPKG.fossil --nested +fossil co ${$FSLPKG}_VERSION} ADDED tests/installall/tests/mmisc/install.logpro Index: tests/installall/tests/mmisc/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/mmisc/install.logpro @@ -0,0 +1,9 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Always get a chmod at the end of install" #/chmod.*logpro.setup-info/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in setup-error-handling" #/setup-error-handling/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/i) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/mmisc/install.sh Index: tests/installall/tests/mmisc/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/mmisc/install.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh +cd src +if [ $FSLPKG == "logpro" ];then + chicken-install +elif [ $FSLPKG == "stml" ];then + cp install.cfg.template install.cfg + cp requirements.scm.template requirements.scm + make + make install +else + make + make install PREFIX=$PREFIX +fi ADDED tests/installall/tests/mmisc/testconfig Index: tests/installall/tests/mmisc/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/mmisc/testconfig @@ -0,0 +1,21 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +clone clone.sh +install install.sh + +# Test requirements are specified here +[requirements] +waiton eggs setup + +# Iteration for your tests are controlled by the items section +[items] +FSLPKG logpro stml megatest + + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install the logpro tool +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/opensrc/clone.logpro Index: tests/installall/tests/opensrc/clone.logpro ================================================================== --- /dev/null +++ tests/installall/tests/opensrc/clone.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Output from fossil" (list #/^repository:\s+/ #/comment:/)) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/i) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/opensrc/clone.sh Index: tests/installall/tests/opensrc/clone.sh ================================================================== --- /dev/null +++ tests/installall/tests/opensrc/clone.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh + +parentdir=$MT_LINKTREE/$MT_TARGET/$MT_RUNNAME + +lockfile $parentdir/clone.lock +if [ ! -e $parentdir/opensrc.fossil ];then + fossil clone http://www.kiatoa.com/fossils/opensrc $parentdir/opensrc.fossil +fi + +if [ ! -e $parentdir/src/dbi ];then + mkdir -p $parentdir/src + (cd $parentdir/src;fossil open $parentdir/opensrc.fossil --nested) +else + (cd $parentdir/src;fossil sync;fossil co trunk;fossil status) +fi +rm -f $parentdir/clone.lock + +ln -sf $parentdir/src $MT_TEST_RUN_DIR/src + ADDED tests/installall/tests/opensrc/install.logpro Index: tests/installall/tests/opensrc/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/opensrc/install.logpro @@ -0,0 +1,9 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Always get a chmod at the end of install" #/chmod.*.setup-info/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in setup-error-handling" #/setup-error-handling/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/i) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/opensrc/install.sh Index: tests/installall/tests/opensrc/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/opensrc/install.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh +cd src/$MODULE_NAME +chicken-install ADDED tests/installall/tests/opensrc/testconfig Index: tests/installall/tests/opensrc/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/opensrc/testconfig @@ -0,0 +1,20 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +clone clone.sh +install install.sh + +# Test requirements are specified here +[requirements] +waiton eggs setup sqlite3 + +# Iteration for your tests are controlled by the items section +[items] +MODULE_NAME dbi margs qtree vcd xfig mutils + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install the eggs from the opensrc fossil +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/setup/setup.logpro Index: tests/installall/tests/setup/setup.logpro ================================================================== --- /dev/null +++ tests/installall/tests/setup/setup.logpro @@ -0,0 +1,10 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "ALL DONE" #/ALL DONE$/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! + + +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/setup/setup.sh Index: tests/installall/tests/setup/setup.sh ================================================================== --- /dev/null +++ tests/installall/tests/setup/setup.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Run your step here + +cksetupsh=$PREFIX/setup-chicken4x.sh +cksetupcsh=$PREFIX/setup-chicken4x.csh +setupsh=$PREFIX/buildsetup.sh + +# make a cache dir +mkdir -p $DOWNLOADS +mkdir -p $PREFIX + +# File for users to source to run chicken +echo "# Source me to setup to to run chicken" > $cksetupsh +echo "export PATH=$PREFIX/bin:\$PATH" > $cksetupsh +echo "export LD_LIBRARY_PATH=$PREFIX/lib" >> $cksetupsh + +# tcsh version +echo "setenv PATH $PREFIX/bin:\$PATH" > $cksetupcsh +echo "setenv LD_LIBRARY_PATH $PREFIX/lib" >> $cksetupcsh + +# File to source for build process +echo "export PATH=$PREFIX/bin:\$PATH" > $setupsh +echo "export LD_LIBRARY_PATH=$PREFIX/lib" >> $setupsh + +if [[ $proxy == "" ]]; then + echo 'Please set the environment variable "proxy" to host.com:port (e.g. foo.com:1234) to use a proxy' +else + echo "export http_proxy=http://$proxy" >> $setupsh + echo "export PROX=\"-proxy $proxy\"" >> $setupsh +fi + +echo "export PREFIX=$PREFIX" >> $setupsh + +echo ALL DONE ADDED tests/installall/tests/setup/testconfig Index: tests/installall/tests/setup/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/setup/testconfig @@ -0,0 +1,18 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +setup setup.sh + +# Test requirements are specified here +[requirements] +# priority 10 + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Download and install chicken scheme +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/sqlite3/compile.logpro Index: tests/installall/tests/sqlite3/compile.logpro ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/compile.logpro @@ -0,0 +1,9 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Leaving directory" #/(Leaving directory|Nothing to be done for|creating sqlite3)/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" >= 0 "Ignore strerror_r" #/strerror_r/i) +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/sqlite3/compile.sh Index: tests/installall/tests/sqlite3/compile.sh ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/compile.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +cd sqlite-autoconf-$SQLITE3_VERSION +./configure --prefix=$PREFIX + +make ADDED tests/installall/tests/sqlite3/download.logpro Index: tests/installall/tests/sqlite3/download.logpro ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/download.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "sqlite-autoconf" #/sqlite-autoconf/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/sqlite3/download.sh Index: tests/installall/tests/sqlite3/download.sh ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/download.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +echo Install sqlite3 +if ! [[ -e ${DOWNLOADS}/sqlite-autoconf-${SQLITE3_VERSION}.tar.gz ]]; then + (cd ${DOWNLOADS};wget http://www.sqlite.org/sqlite-autoconf-${SQLITE3_VERSION}.tar.gz) +fi + +tar xfz ${DOWNLOADS}/sqlite-autoconf-${SQLITE3_VERSION}.tar.gz + +ls -l sqlite-autoconf-${SQLITE3_VERSION}.tar.gz ADDED tests/installall/tests/sqlite3/install.logpro Index: tests/installall/tests/sqlite3/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/install.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Leaving directory" #/Leaving directory/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/sqlite3/install.sh Index: tests/installall/tests/sqlite3/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/install.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +cd sqlite-autoconf-$SQLITE3_VERSION +make install + ADDED tests/installall/tests/sqlite3/installegg.logpro Index: tests/installall/tests/sqlite3/installegg.logpro ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/installegg.logpro @@ -0,0 +1,9 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "chmod sqlite3" #/chmod.*sqlite3/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" >= 0 "Ignore setup-error-handling" #/setup-error-handling/) +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/sqlite3/installegg.sh Index: tests/installall/tests/sqlite3/installegg.sh ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/installegg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +CSC_OPTIONS="-I$PREFIX/include -L$PREFIX/lib" $PREFIX/bin/chicken-install $PROX sqlite3 ADDED tests/installall/tests/sqlite3/testconfig Index: tests/installall/tests/sqlite3/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/sqlite3/testconfig @@ -0,0 +1,24 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +download download.sh +compile compile.sh +install install.sh +installegg installegg.sh + +# Test requirements are specified here +[requirements] +# We waiton chicken because this one installs the egg. It would behove us to split this +# into two tests ... +waiton tougheggs +priority 2 + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install sqlite3 library for systems where it is not installed +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/tougheggs/install.logpro Index: tests/installall/tests/tougheggs/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/tougheggs/install.logpro @@ -0,0 +1,9 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Last thing done is chmod ..." #/chmod /) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" >= 0 "Ignore someword-errors" #/\w+-error/) +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/tougheggs/install.sh Index: tests/installall/tests/tougheggs/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/tougheggs/install.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh + +lockfile $PREFIX/eggs.lock +$PREFIX/bin/chicken-install $PROX $EGG_NAME +rm -f $PREFIX/eggs.lock ADDED tests/installall/tests/tougheggs/testconfig Index: tests/installall/tests/tougheggs/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/tougheggs/testconfig @@ -0,0 +1,19 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +install install.sh + +# Test requirements are specified here +[requirements] +waiton eggs + +# Iteration for your tests are controlled by the items section +[items] +EGG_NAME intarweb http-client awful uri-common spiffy-request-vars spiffy apropos spiffy-directory-listing + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Download and install eggs with no significant prerequisites +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/zmq/install.logpro Index: tests/installall/tests/zmq/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/zmq/install.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/zmq/install.sh Index: tests/installall/tests/zmq/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/zmq/install.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/zmq/testconfig Index: tests/installall/tests/zmq/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/zmq/testconfig @@ -0,0 +1,18 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +install install.sh + +# Test requirements are specified here +[requirements] +waiton zmqlib chicken setup + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install the zmq egg +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/zmqlib/compile.logpro Index: tests/installall/tests/zmqlib/compile.logpro ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/compile.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/zmqlib/compile.sh Index: tests/installall/tests/zmqlib/compile.sh ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/compile.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/zmqlib/download.logpro Index: tests/installall/tests/zmqlib/download.logpro ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/download.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/zmqlib/download.sh Index: tests/installall/tests/zmqlib/download.sh ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/download.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/zmqlib/install.logpro Index: tests/installall/tests/zmqlib/install.logpro ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/install.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/zmqlib/install.sh Index: tests/installall/tests/zmqlib/install.sh ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/install.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +# Run your step here + +source $PREFIX/buildsetup.sh ADDED tests/installall/tests/zmqlib/testconfig Index: tests/installall/tests/zmqlib/testconfig ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/testconfig @@ -0,0 +1,21 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +download download.sh +untar untar.sh +compile compile.sh +install install.sh + +# Test requirements are specified here +[requirements] +waiton setup + +# Iteration for your tests are controlled by the items section +[items] + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Install the zmq library if it doesn't already exist +tags tagone,tagtwo +reviewed never ADDED tests/installall/tests/zmqlib/untar.logpro Index: tests/installall/tests/zmqlib/untar.logpro ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/untar.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/i)) ;; but disallow any other errors ADDED tests/installall/tests/zmqlib/untar.sh Index: tests/installall/tests/zmqlib/untar.sh ================================================================== --- /dev/null +++ tests/installall/tests/zmqlib/untar.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Run your step here +source $PREFIX/buildsetup.sh ADDED tests/manual.sh Index: tests/manual.sh ================================================================== --- /dev/null +++ tests/manual.sh @@ -0,0 +1,1 @@ +(cd ..;make install) && `realpath ../bin/megatest` -runtests manual_example :sysname ubuntu :fsname afs :datapath none :runname testing -setvars TARGETDISPLAY=:0,TARGETHOST=localhost,TARGETDIR=/tmp/blah,TARGETUSER=matt ADDED tests/mintest/megatest.config Index: tests/mintest/megatest.config ================================================================== --- /dev/null +++ tests/mintest/megatest.config @@ -0,0 +1,17 @@ +[fields] +X TEXT + +[setup] +max_concurrent_jobs 50 +linktree #{getenv MT_RUN_AREA_HOME}/linktree +transport http + +[server] +port 8090 + +[jobtools] +useshell yes +launcher nbfind + +[disks] +disk0 #{getenv PWD}/runs ADDED tests/mintest/runconfigs.config Index: tests/mintest/runconfigs.config ================================================================== --- /dev/null +++ tests/mintest/runconfigs.config @@ -0,0 +1,6 @@ +[default] +ALLTESTS see this variable + +# Your variables here are grouped by targets [SYSTEM/RELEASE] +[a] +ANOTHERVAR only defined if target is "a" ADDED tests/mintest/tests/a/testconfig Index: tests/mintest/tests/a/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/a/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton b ADDED tests/mintest/tests/a1/testconfig Index: tests/mintest/tests/a1/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/a1/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton b1 ADDED tests/mintest/tests/b/testconfig Index: tests/mintest/tests/b/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/b/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton c ADDED tests/mintest/tests/b1/testconfig Index: tests/mintest/tests/b1/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/b1/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton c1 ADDED tests/mintest/tests/c/testconfig Index: tests/mintest/tests/c/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/c/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton d ADDED tests/mintest/tests/c1/testconfig Index: tests/mintest/tests/c1/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/c1/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton d1fail ADDED tests/mintest/tests/d/testconfig Index: tests/mintest/tests/d/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/d/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton e ADDED tests/mintest/tests/d1fail/testconfig Index: tests/mintest/tests/d1fail/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/d1fail/testconfig @@ -0,0 +1,7 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS +step2 exit 123 + +[requirements] +waiton e1 ADDED tests/mintest/tests/e/testconfig Index: tests/mintest/tests/e/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/e/testconfig @@ -0,0 +1,4 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + ADDED tests/mintest/tests/e1/testconfig Index: tests/mintest/tests/e1/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/e1/testconfig @@ -0,0 +1,4 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + ADDED tests/mintest/tests/f/testconfig Index: tests/mintest/tests/f/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/f/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton ADDED tests/mintest/tests/g/testconfig Index: tests/mintest/tests/g/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/g/testconfig @@ -0,0 +1,9 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton b + +[items] +NADA ADDED tests/mintest/tests/h/testconfig Index: tests/mintest/tests/h/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/h/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton b ADDED tests/mintest/tests/i/testconfig Index: tests/mintest/tests/i/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/i/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton b ADDED tests/mintest/tests/j/testconfig Index: tests/mintest/tests/j/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/j/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton b ADDED tests/mintest/tests/k/testconfig Index: tests/mintest/tests/k/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/k/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton b ADDED tests/mintest/tests/l/testconfig Index: tests/mintest/tests/l/testconfig ================================================================== --- /dev/null +++ tests/mintest/tests/l/testconfig @@ -0,0 +1,6 @@ +# Add steps here. Format is "stepname script" +[ezsteps] +step1 echo SUCCESS + +[requirements] +waiton b ADDED tests/ods-test.scm Index: tests/ods-test.scm ================================================================== --- /dev/null +++ tests/ods-test.scm @@ -0,0 +1,13 @@ +(load "ods.scm") + +(ods:list->ods + "testing" + "testing.ods" + '((Sheet1 ("Row 1,A" "Row 1,B") + ("Row 2,A" "Row 2,B")) + (Sheet2 (1 2) + (3 4) + () + ("This is sheet 2")) + (Sheet_3 ("Test" "Item Path" "Category" "Value" "Comment") + ("LVS_esd" "eb8zxffd" "Cells" "n")))) ADDED tests/release/Makefile Index: tests/release/Makefile ================================================================== --- /dev/null +++ tests/release/Makefile @@ -0,0 +1,10 @@ + + +dashboard : compile + dashboard -rows 24 & + +compile : runs + cd ../..;make -j install + +runs : + mkdir -p runs ADDED tests/release/megatest.config Index: tests/release/megatest.config ================================================================== --- /dev/null +++ tests/release/megatest.config @@ -0,0 +1,22 @@ +[fields] +release TEXT +iteration TEXT + +[setup] +linktree #{getenv MT_RUN_AREA_HOME}/links +max_concurrent_jobs 100 +logviewer (%MTCMD%) 2> /dev/null > /dev/null +# htmlviewercmd firefox -new-window +htmlviewercmd arora + +[jobtools] +# launcher #{shell if which bsub > /dev/null;then echo bsub;else echo nbfake;fi} +launcher nbfake +maxload 2.5 + +[server] +required yes + +[disks] +disk0 #{getenv MT_RUN_AREA_HOME}/runs + ADDED tests/release/runconfigs.config Index: tests/release/runconfigs.config ================================================================== --- /dev/null +++ tests/release/runconfigs.config @@ -0,0 +1,9 @@ +[default] +MTRUNNER #{shell readlink -f #{getenv MT_RUN_AREA_HOME}/../../utils/mtrunner} +MTTESTDIR #{shell readlink -f #{getenv MT_RUN_AREA_HOME}/..} +MTPATH #{shell readlink -f #{getenv MT_RUN_AREA_HOME}/../../bin} + +[v1.60/15] + +[include atwork.config] + ADDED tests/release/tests/dependencies/simpleresults.logpro Index: tests/release/tests/dependencies/simpleresults.logpro ================================================================== --- /dev/null +++ tests/release/tests/dependencies/simpleresults.logpro @@ -0,0 +1,110 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ("setup" 1 20) + ("test1/layout/ptran" 1 20) + ("test1/schematic/ptran" 1 20) + ("test2/layout/ptran" 1 20) + ("test2/schematic/ptran" 1 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + )) + +(define warn-specs '()) + +(define nost-specs '( + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +(expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +(expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +(expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +(expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +(expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +(expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +(expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/dependencies/testconfig Index: tests/release/tests/dependencies/testconfig ================================================================== --- /dev/null +++ tests/release/tests/dependencies/testconfig @@ -0,0 +1,12 @@ +# test2 from the tests/Makefile + +[var] +tname itemwait + +[ezsteps] + +# Set things up +cleansimple $MTRUNNER $MTTESTDIR/dep-tests $MTPATH megatest -remove-runs -testpatt % -target simple/0 -runname #{get var tname} +simple $MTRUNNER $MTTESTDIR/dep-tests $MTPATH megatest -run -testpatt test2/%/ptran -target simple/0 -runname #{get var tname} +simpleresults $MTRUNNER $MTTESTDIR/dep-tests $MTPATH megatest -list-runs #{get var name} -target simple/0 + ADDED tests/release/tests/fullrun/results.logpro Index: tests/release/tests/fullrun/results.logpro ================================================================== --- /dev/null +++ tests/release/tests/fullrun/results.logpro @@ -0,0 +1,140 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ("exit_0" 1 20) + ("ezlog_fail_then_pass" 1 20) + ("ezlog_pass" 1 20) + ("ez_pass" 1 20) + ("lineitem_pass" 1 20) + ("priority_1" 1 20) + ("priority_10" 1 20) + ("priority_10_waiton_1" 1 20) + ("priority_3" 1 20) + ("priority_4" 1 20) + ;; ("priority_5" 1 20) + ("priority_6" 1 20) +;; ("priority_7" 1 20) + ("priority_8" 1 20) + ("priority_9" 1 20) + ("runfirst" 7 20) + ("singletest" 1 20) + ("singletest2" 1 20) + ("special" 1 20) + ("sqlitespeed" 10 20) + ("test1" 1 20) + ("test2" 6 20) + ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ("exit_1" 1 20) + ("ez_exit2_fail" 1 20) + ("ez_fail" 1 20) + ("ez_fail_quick" 1 20) + ("ezlog_fail" 1 20) + ("lineitem_fail" 1 20) + ("logpro_required_fail" 1 20) + ("manual_example" 1 20) + ("neverrun" 1 20))) + +(define warn-specs '(("ezlog_warn" 1 20))) + +(define nost-specs '(("wait_no_items1" 1 20) + ("wait_no_items2" 1 20) + ("wait_no_items3" 1 20) + ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +(expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +(expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +(expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +(expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +(expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +(expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +(expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/fullrun/testconfig Index: tests/release/tests/fullrun/testconfig ================================================================== --- /dev/null +++ tests/release/tests/fullrun/testconfig @@ -0,0 +1,11 @@ +[ezsteps] +cleantop $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -remove-runs -target ubuntu/nfs/none -runname release_toplevel -testpatt % +runall $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run -testpatt % -target ubuntu/nfs/none -runname release_toplevel -runwait +runtop $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run -testpatt all_toplevel -target ubuntu/nfs/none -runname release_toplevel -rerun FAIL -preclean -runwait +results $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs release_toplevel -target ubuntu/nfs/none -runname release_toplevel + +[requirements] +# waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +# mode toplevel ADDED tests/release/tests/itemwait/testconfig Index: tests/release/tests/itemwait/testconfig ================================================================== --- /dev/null +++ tests/release/tests/itemwait/testconfig @@ -0,0 +1,24 @@ +# test2 from the tests/Makefile + +[var] +tname itemwait + +[pre-launch-env-vars] +NUMTESTS 20 + +[ezsteps] + +# Set things up +clean $MTRUNNER $MTTESTDIR/fdktestqa/testqa $MTPATH megatest -remove-runs -testpatt % -target %/% -runname #{get var tname}% +runbigrun3 $MTRUNNER $MTTESTDIR/fdktestqa/testqa $MTPATH nbfake megatest -run -testpatt bigrun3 -target a/bigrun3 -runname #{get var tname} +# watchrun watches until it sees at least one RUNNING in bigrun and one PASS in bigrun2 +watchrun sleep 15;watchrun.sh #{get var tname} + +[requirements] +# waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +# mode toplevel + + +# test2 : fullprep ADDED tests/release/tests/itemwait/watchrun.sh Index: tests/release/tests/itemwait/watchrun.sh ================================================================== --- /dev/null +++ tests/release/tests/itemwait/watchrun.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +runname=$1 + +pass=no +alldone=no +while [[ $alldone == no ]];do + sleep 5 + $MTRUNNER $MTTESTDIR/fdktestqa/testqa $MTPATH megatest -list-runs $runname > list-runs.log + bigrun_running=$(cat list-runs.log | egrep 'bigrun\(.*RUNNING'|wc -l) + bigrun2_pass=$(cat list-runs.log | egrep 'bigrun2.*COMPLETED.*PASS'|wc -l) + echo "bigrun_running=$bigrun_running, bigrun2_pass=$bigrun2_pass" + if [[ $bigrun_running -gt 0 ]] && [[ $bigrun2_pass -gt 0 ]];then + pass=yes + alldone=yes + fi + if [[ $bigrun_running -eq 0 ]];then + echo "bigrun all done and no bigrun2 found with PASS." + alldone=yes + fi +done + +if [[ $pass == yes ]];then + echo PASS + exit 0 +else + echo FAIL + exit 1 +fi ADDED tests/release/tests/rollup/firstres.logpro Index: tests/release/tests/rollup/firstres.logpro ================================================================== --- /dev/null +++ tests/release/tests/rollup/firstres.logpro @@ -0,0 +1,144 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ;; ("exit_0" 1 20) + ;; ("ezlog_fail_then_pass" 1 20) + ;; ("ezlog_pass" 1 20) + ;; ("ez_pass" 1 20) + ;; ("lineitem_pass" 1 20) + ;; ("priority_1" 1 20) + ;; ("priority_10" 1 20) + ;; ("priority_10_waiton_1" 1 20) + ;; ("priority_3" 1 20) + ;; ("priority_4" 1 20) + ;; ;; ("priority_5" 1 20) + ;; ("priority_6" 1 20) +;; ;; ("priority_7" 1 20) + ;; ("priority_8" 1 20) + ;; ("priority_9" 1 20) + ("runfirst" 7 20) + ;; ("singletest" 1 20) + ;; ("singletest2" 1 20) + ;; ("special" 1 20) + ;; ("sqlitespeed" 10 20) + ;; ("test1" 1 20) + ;; ("test2" 6 20) + ;; ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ;; ("exit_1" 1 20) + ;; ("ez_exit2_fail" 1 20) + ;; ("ez_fail" 1 20) + ;; ("ez_fail_quick" 1 20) + ;; ("ezlog_fail" 1 20) + ;; ("lineitem_fail" 1 20) + ;; ("logpro_required_fail" 1 20) + ;; ("manual_example" 1 20) + ;; ("neverrun" 1 20) + )) + +(define warn-specs '( + ;; ("ezlog_warn" 1 20) + )) + +(define nost-specs '( + ;; ("wait_no_items1" 1 20) + ;; ("wait_no_items2" 1 20) + ;; ("wait_no_items3" 1 20) + ;; ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +;; (expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +;; (expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +;; (expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +;; (expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +;; (expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +;; (expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +;; (expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/rollup/results.logpro Index: tests/release/tests/rollup/results.logpro ================================================================== --- /dev/null +++ tests/release/tests/rollup/results.logpro @@ -0,0 +1,145 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ;; ("exit_0" 1 20) + ;; ("ezlog_fail_then_pass" 1 20) + ;; ("ezlog_pass" 1 20) + ;; ("ez_pass" 1 20) + ;; ("lineitem_pass" 1 20) + ;; ("priority_1" 1 20) + ;; ("priority_10" 1 20) + ;; ("priority_10_waiton_1" 1 20) + ;; ("priority_3" 1 20) + ;; ("priority_4" 1 20) + ;; ;; ("priority_5" 1 20) + ;; ("priority_6" 1 20) +;; ;; ("priority_7" 1 20) + ;; ("priority_8" 1 20) + ;; ("priority_9" 1 20) + ("runfirst" 5 20) + ;; ("singletest" 1 20) + ;; ("singletest2" 1 20) + ;; ("special" 1 20) + ;; ("sqlitespeed" 10 20) + ;; ("test1" 1 20) + ;; ("test2" 6 20) + ;; ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ;; ("exit_1" 1 20) + ;; ("ez_exit2_fail" 1 20) + ;; ("ez_fail" 1 20) + ;; ("ez_fail_quick" 1 20) + ;; ("ezlog_fail" 1 20) + ;; ("lineitem_fail" 1 20) + ;; ("logpro_required_fail" 1 20) + ;; ("manual_example" 1 20) + ;; ("neverrun" 1 20) + )) + +(define warn-specs '( + ;; ("ezlog_warn" 1 20) + )) + +(define nost-specs '( + ;; ("wait_no_items1" 1 20) + ;; ("wait_no_items2" 1 20) + ;; ("wait_no_items3" 1 20) + ;; ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +;; (expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +;; (expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +;; (expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +;; (expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "Toplevel will be NOT_STARTED" #/Test: runfirst\s+State: (INCOMPLETE|NOT_STARTED)/) +(expect:required in logbody = 1 "runfirst/b/2 will be NOT_STARTED/INCOMPLETE" #/Test: runfirst.b.2.\s+State: NOT_STARTED\s+Status: INCOMPLETE/) +;; (expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +;; (expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +;; (expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/rollup/testconfig Index: tests/release/tests/rollup/testconfig ================================================================== --- /dev/null +++ tests/release/tests/rollup/testconfig @@ -0,0 +1,28 @@ +# test2 from the tests/Makefile + +[var] +tname rollup + +[ezsteps] + +# Set things up +clean $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -remove-runs -testpatt % -target ubuntu/nfs/none -runname #{get var tname}% +runfirst $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -runtests runfirst/% -reqtarg ubuntu/nfs/none -runname #{get var tname} -preclean +firstres $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs #{get var tname} -target ubuntu/nfs/none + +# Set one test item to INCOMPLETE +setstate $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -set-state-status INCOMPLETE,FAIL :state COMPLETED :status PASS -testpatt runfirst/b/2 -target ubuntu/nfs/none -runname #{get var tname} + +# Rerun a different test item +rerun $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run -testpatt runfirst/spring -reqtarg ubuntu/nfs/none -runname #{get var tname} -preclean -rerun PASS + +results $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs #{get var tname} -target ubuntu/nfs/none + +[requirements] +# waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +# mode toplevel + + +# test2 : fullprep ADDED tests/release/tests/test2/results.logpro Index: tests/release/tests/test2/results.logpro ================================================================== --- /dev/null +++ tests/release/tests/test2/results.logpro @@ -0,0 +1,144 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ;; ("exit_0" 1 20) + ;; ("ezlog_fail_then_pass" 1 20) + ;; ("ezlog_pass" 1 20) + ;; ("ez_pass" 1 20) + ;; ("lineitem_pass" 1 20) + ;; ("priority_1" 1 20) + ;; ("priority_10" 1 20) + ;; ("priority_10_waiton_1" 1 20) + ;; ("priority_3" 1 20) + ;; ("priority_4" 1 20) + ;; ;; ("priority_5" 1 20) + ;; ("priority_6" 1 20) +;; ;; ("priority_7" 1 20) + ;; ("priority_8" 1 20) + ;; ("priority_9" 1 20) + ("runfirst" 2 20) + ;; ("singletest" 1 20) + ;; ("singletest2" 1 20) + ;; ("special" 1 20) + ;; ("sqlitespeed" 10 20) + ;; ("test1" 1 20) + ;; ("test2" 6 20) + ;; ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ;; ("exit_1" 1 20) + ;; ("ez_exit2_fail" 1 20) + ;; ("ez_fail" 1 20) + ;; ("ez_fail_quick" 1 20) + ;; ("ezlog_fail" 1 20) + ;; ("lineitem_fail" 1 20) + ;; ("logpro_required_fail" 1 20) + ;; ("manual_example" 1 20) + ;; ("neverrun" 1 20) + )) + +(define warn-specs '( + ;; ("ezlog_warn" 1 20) + )) + +(define nost-specs '( + ;; ("wait_no_items1" 1 20) + ;; ("wait_no_items2" 1 20) + ;; ("wait_no_items3" 1 20) + ;; ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +;; (expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +;; (expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +;; (expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +;; (expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +;; (expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +;; (expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +;; (expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/test2/results_a.logpro Index: tests/release/tests/test2/results_a.logpro ================================================================== --- /dev/null +++ tests/release/tests/test2/results_a.logpro @@ -0,0 +1,144 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ;; ("exit_0" 1 20) + ;; ("ezlog_fail_then_pass" 1 20) + ;; ("ezlog_pass" 1 20) + ;; ("ez_pass" 1 20) + ;; ("lineitem_pass" 1 20) + ;; ("priority_1" 1 20) + ;; ("priority_10" 1 20) + ;; ("priority_10_waiton_1" 1 20) + ;; ("priority_3" 1 20) + ;; ("priority_4" 1 20) + ;; ;; ("priority_5" 1 20) + ;; ("priority_6" 1 20) +;; ;; ("priority_7" 1 20) + ;; ("priority_8" 1 20) + ;; ("priority_9" 1 20) + ("runfirst" 2 20) + ;; ("singletest" 1 20) + ;; ("singletest2" 1 20) + ;; ("special" 1 20) + ;; ("sqlitespeed" 10 20) + ;; ("test1" 1 20) + ;; ("test2" 6 20) + ;; ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ;; ("exit_1" 1 20) + ;; ("ez_exit2_fail" 1 20) + ;; ("ez_fail" 1 20) + ;; ("ez_fail_quick" 1 20) + ;; ("ezlog_fail" 1 20) + ;; ("lineitem_fail" 1 20) + ;; ("logpro_required_fail" 1 20) + ;; ("manual_example" 1 20) + ;; ("neverrun" 1 20) + )) + +(define warn-specs '( + ;; ("ezlog_warn" 1 20) + )) + +(define nost-specs '( + ;; ("wait_no_items1" 1 20) + ;; ("wait_no_items2" 1 20) + ;; ("wait_no_items3" 1 20) + ;; ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +;; (expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +;; (expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +;; (expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +;; (expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +;; (expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +;; (expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +;; (expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/test2/results_b.logpro Index: tests/release/tests/test2/results_b.logpro ================================================================== --- /dev/null +++ tests/release/tests/test2/results_b.logpro @@ -0,0 +1,144 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ;; ("exit_0" 1 20) + ;; ("ezlog_fail_then_pass" 1 20) + ;; ("ezlog_pass" 1 20) + ;; ("ez_pass" 1 20) + ;; ("lineitem_pass" 1 20) + ;; ("priority_1" 1 20) + ;; ("priority_10" 1 20) + ;; ("priority_10_waiton_1" 1 20) + ;; ("priority_3" 1 20) + ;; ("priority_4" 1 20) + ;; ;; ("priority_5" 1 20) + ;; ("priority_6" 1 20) +;; ;; ("priority_7" 1 20) + ;; ("priority_8" 1 20) + ;; ("priority_9" 1 20) + ("runfirst" 2 20) + ;; ("singletest" 1 20) + ;; ("singletest2" 1 20) + ;; ("special" 1 20) + ;; ("sqlitespeed" 10 20) + ;; ("test1" 1 20) + ;; ("test2" 6 20) + ;; ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ;; ("exit_1" 1 20) + ;; ("ez_exit2_fail" 1 20) + ;; ("ez_fail" 1 20) + ;; ("ez_fail_quick" 1 20) + ;; ("ezlog_fail" 1 20) + ;; ("lineitem_fail" 1 20) + ;; ("logpro_required_fail" 1 20) + ;; ("manual_example" 1 20) + ;; ("neverrun" 1 20) + )) + +(define warn-specs '( + ;; ("ezlog_warn" 1 20) + )) + +(define nost-specs '( + ;; ("wait_no_items1" 1 20) + ;; ("wait_no_items2" 1 20) + ;; ("wait_no_items3" 1 20) + ;; ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +;; (expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +;; (expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +;; (expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +;; (expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +;; (expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +;; (expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +;; (expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/test2/testconfig Index: tests/release/tests/test2/testconfig ================================================================== --- /dev/null +++ tests/release/tests/test2/testconfig @@ -0,0 +1,27 @@ +# test2 from the tests/Makefile + +[var] +tname test2 +mtpath #{shell readlink -f ../../bin} + +[ezsteps] +clean $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -remove-runs -testpatt % -target ubuntu/nfs/none -runname #{get var tname}% +part1 $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run -testpatt ez_pass,runfirst/a/% -reqtarg ubuntu/nfs/none -runname #{get var tname} -preclean +part2 $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run -testpatt %/,%/ai -reqtarg ubuntu/nfs/none -runname #{get var tname}_a -preclean +part3 $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -runtests %/,%/ai -reqtarg ubuntu/nfs/none -runname #{get var tname}_b -preclean +part4 $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run -testpatt runfirst/%,%/ai -reqtarg ubuntu/nfs/none -runname #{get var tname}_a -preclean +part5 $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run -testpatt %/,%/winter -reqtarg ubuntu/nfs/none -runname #{get var tname}_a -preclean +part6 $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -set-state-status COMPLETED,FORCED :state COMPLETED :status PASS -testpatt ez_p%s,runfirst/ -target ubuntu/nfs/none -runname #{get var tname} + +results $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs #{get var tname} -target ubuntu/nfs/none +results_a $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs #{get var tname}_a -target ubuntu/nfs/none +results_b $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs #{get var tname}_b -target ubuntu/nfs/none + +[requirements] +# waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +# mode toplevel + + +# test2 : fullprep ADDED tests/release/tests/testpatt/cleanres.logpro Index: tests/release/tests/testpatt/cleanres.logpro ================================================================== --- /dev/null +++ tests/release/tests/testpatt/cleanres.logpro @@ -0,0 +1,144 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ;; ("exit_0" 1 20) + ;; ("ezlog_fail_then_pass" 1 20) + ;; ("ezlog_pass" 1 20) + ;; ("ez_pass" 1 20) + ;; ("lineitem_pass" 1 20) + ;; ("priority_1" 1 20) + ;; ("priority_10" 1 20) + ;; ("priority_10_waiton_1" 1 20) + ;; ("priority_3" 1 20) + ;; ("priority_4" 1 20) + ;; ;; ("priority_5" 1 20) + ;; ("priority_6" 1 20) +;; ;; ("priority_7" 1 20) + ;; ("priority_8" 1 20) + ;; ("priority_9" 1 20) + ;; ("runfirst" 2 20) + ;; ("singletest" 1 20) + ;; ("singletest2" 1 20) + ;; ("special" 1 20) + ;; ("sqlitespeed" 10 20) + ;; ("test1" 1 20) + ;; ("test2" 6 20) + ;; ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ;; ("exit_1" 1 20) + ;; ("ez_exit2_fail" 1 20) + ;; ("ez_fail" 1 20) + ;; ("ez_fail_quick" 1 20) + ;; ("ezlog_fail" 1 20) + ;; ("lineitem_fail" 1 20) + ;; ("logpro_required_fail" 1 20) + ;; ("manual_example" 1 20) + ;; ("neverrun" 1 20) + )) + +(define warn-specs '( + ;; ("ezlog_warn" 1 20) + )) + +(define nost-specs '( + ;; ("wait_no_items1" 1 20) + ;; ("wait_no_items2" 1 20) + ;; ("wait_no_items3" 1 20) + ;; ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +;; (expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +;; (expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +;; (expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +;; (expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +;; (expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +;; (expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +;; (expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/testpatt/results.logpro Index: tests/release/tests/testpatt/results.logpro ================================================================== --- /dev/null +++ tests/release/tests/testpatt/results.logpro @@ -0,0 +1,144 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ;; ("exit_0" 1 20) + ;; ("ezlog_fail_then_pass" 1 20) + ;; ("ezlog_pass" 1 20) + ;; ("ez_pass" 1 20) + ;; ("lineitem_pass" 1 20) + ;; ("priority_1" 1 20) + ;; ("priority_10" 1 20) + ;; ("priority_10_waiton_1" 1 20) + ;; ("priority_3" 1 20) + ;; ("priority_4" 1 20) + ;; ;; ("priority_5" 1 20) + ;; ("priority_6" 1 20) +;; ;; ("priority_7" 1 20) + ;; ("priority_8" 1 20) + ;; ("priority_9" 1 20) + ("runfirst" 2 20) + ;; ("singletest" 1 20) + ;; ("singletest2" 1 20) + ;; ("special" 1 20) + ;; ("sqlitespeed" 10 20) + ;; ("test1" 1 20) + ;; ("test2" 6 20) + ;; ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ;; ("exit_1" 1 20) + ;; ("ez_exit2_fail" 1 20) + ;; ("ez_fail" 1 20) + ;; ("ez_fail_quick" 1 20) + ;; ("ezlog_fail" 1 20) + ;; ("lineitem_fail" 1 20) + ;; ("logpro_required_fail" 1 20) + ;; ("manual_example" 1 20) + ;; ("neverrun" 1 20) + )) + +(define warn-specs '( + ;; ("ezlog_warn" 1 20) + )) + +(define nost-specs '( + ;; ("wait_no_items1" 1 20) + ;; ("wait_no_items2" 1 20) + ;; ("wait_no_items3" 1 20) + ;; ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +;; (expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +;; (expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +;; (expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +;; (expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +;; (expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +;; (expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +;; (expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +;; (expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/testpatt/testconfig Index: tests/release/tests/testpatt/testconfig ================================================================== --- /dev/null +++ tests/release/tests/testpatt/testconfig @@ -0,0 +1,12 @@ +[ezsteps] +clean $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -remove-runs -testpatt % -target ubuntu/nfs/none -runname release_testpatt +cleanres $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs release_testpatt -target ubuntu/nfs/none + +runitems $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run -testpatt runfirst/%2 -target ubuntu/nfs/none -runname release_testpatt +results $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs release_testpatt -target ubuntu/nfs/none + +[requirements] +# waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +# mode toplevel ADDED tests/release/tests/testpatt_envvar/results.logpro Index: tests/release/tests/testpatt_envvar/results.logpro ================================================================== --- /dev/null +++ tests/release/tests/testpatt_envvar/results.logpro @@ -0,0 +1,141 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ("exit_0" 1 20) + ("ezlog_fail_then_pass" 1 20) + ("ezlog_pass" 1 20) + ("ez_pass" 1 20) + ("lineitem_pass" 1 20) + ("priority_1" 1 20) + ("priority_10" 1 20) + ("priority_10_waiton_1" 1 20) + ("priority_3" 1 20) + ("priority_4" 1 20) + ;; ("priority_5" 1 20) + ("priority_6" 1 20) +;; ("priority_7" 1 20) + ("priority_8" 1 20) + ("priority_9" 1 20) + ("runfirst" 7 20) + ("singletest" 1 20) + ("singletest2" 1 20) + ("special" 1 20) + ("sqlitespeed" 10 20) + ("test1" 1 20) + ("test2" 6 20) + ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ("exit_1" 1 20) + ("ez_exit2_fail" 1 20) + ("ez_fail" 1 20) + ("ez_fail_quick" 1 20) + ("ezlog_fail" 1 20) + ("lineitem_fail" 1 20) + ("logpro_required_fail" 1 20) + ("manual_example" 1 20) + ("neverrun" 1 20))) + +(define warn-specs '(("ezlog_warn" 1 20))) + +(define nost-specs '(("wait_no_items1" 1 20) + ("wait_no_items2" 1 20) + ("wait_no_items3" 1 20) + ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +(expect:error in logbody > 0 "blocktestxz not to run" #/Test: blocktestxz/) +(expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +(expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +(expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +(expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +(expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +(expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +(expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/testpatt_envvar/testconfig Index: tests/release/tests/testpatt_envvar/testconfig ================================================================== --- /dev/null +++ tests/release/tests/testpatt_envvar/testconfig @@ -0,0 +1,14 @@ +[var] +targ -target ubuntu/nfs/all_toplevel +tp -testpatt % + +[ezsteps] +cleantop $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -remove-runs #{get var tp} #{get var targ} -runname release_toplevel +runall $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -run #{get var tp} #{get var targ} -runname release_toplevel -runwait +results $MTRUNNER $MTTESTDIR/fullrun $MTPATH megatest -list-runs release_toplevel #{get var targ} -runname release_toplevel + +[requirements] +# waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +# mode toplevel ADDED tests/release/tests/toprun/results.logpro Index: tests/release/tests/toprun/results.logpro ================================================================== --- /dev/null +++ tests/release/tests/toprun/results.logpro @@ -0,0 +1,140 @@ +;; (c) 2006,2007,2008,2009 Matthew Welland matt@kiatoa.com +;; +;; License GPL. + +(define logbody "LogFileBody") + +(define pass-specs '( ;; testname num-expected max-runtime + ("exit_0" 1 20) + ("ezlog_fail_then_pass" 1 20) + ("ezlog_pass" 1 20) + ("ez_pass" 1 20) + ("lineitem_pass" 1 20) + ("priority_1" 1 20) + ("priority_10" 1 20) + ("priority_10_waiton_1" 1 20) + ("priority_3" 1 20) + ("priority_4" 1 20) + ;; ("priority_5" 1 20) + ("priority_6" 1 20) +;; ("priority_7" 1 20) + ("priority_8" 1 20) + ("priority_9" 1 20) + ("runfirst" 7 20) + ("singletest" 1 20) + ("singletest2" 1 20) + ("special" 1 20) + ("sqlitespeed" 10 20) + ("test1" 1 20) + ("test2" 6 20) + ("test_mt_vars" 6 20) + )) + +(define fail-specs '( ;; testname num-expected max-runtime + ("exit_1" 1 20) + ("ez_exit2_fail" 1 20) + ("ez_fail" 1 20) + ("ez_fail_quick" 1 20) + ("ezlog_fail" 1 20) + ("lineitem_fail" 1 20) + ("logpro_required_fail" 1 20) + ("manual_example" 1 20) + ("neverrun" 1 20))) + +(define warn-specs '(("ezlog_warn" 1 20))) + +(define nost-specs '(("wait_no_items1" 1 20) + ("wait_no_items2" 1 20) + ("wait_no_items3" 1 20) + ("wait_no_items4" 1 20) + ;; ("no_items" 1 20) + )) + +(define (check-one-test estate estatus testname count runtime) + (let* ((rxe (regexp (conc "^\\s+Test: " testname "(\\(.*|\\s+)\\s+State: " estate "\\s+Status: " estatus "\\s+Runtime:\\s+(\\d+)s"))) + (msg1 (conc testname " expecting count of " count)) + (msg2 (conc testname " expecting runtime less than " runtime))) + (expect:required in logbody = count msg1 rxe) + ;;(expect:value in logbody count < msg2 rxe) + )) + +;; Special cases +;; +(expect:ignore in logbody >= 0 "db_sync test might not have run" #/Test: db_sync/) +(expect:ignore in logbody >= 0 "all_toplevel may not yet be done" #/Test: all_toplevel/) +(expect:error in logbody = 0 "tests left in RUNNING state" #/State: RUNNING/) +(expect:required in logbody = 1 "priority_2 is KILLED" #/Test: priority_2\s+State: KILLED\s+Status: KILLED/) +(expect:required in logbody = 1 "priority_5 is either PASS or SKIP" #/Test: priority_5\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "priority_7 is either PASS or SKIP" #/Test: priority_7\s+State: COMPLETED\s+Status: (SKIP|PASS)/) +(expect:required in logbody = 1 "testxz has 1 NOT_STARTED test" #/Test: testxz\s+State: NOT_STARTED/) +(expect:required in logbody = 1 "no items" #/Test: no_items\s+State: NOT_STARTED\s+Status: ZERO_ITEMS/) +(expect:warning in logbody = 1 "dynamic waiton" #/Test: dynamic_waiton/) +(expect:required in logbody = 29 "blocktestxz has 29 tests" #/Test: blocktestxz/) + +;; General cases +;; +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "PASS" testdat)) + pass-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "FAIL" testdat)) + fail-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "COMPLETED" "WARN" testdat)) + warn-specs) + +(for-each + (lambda (testdat) + (apply check-one-test "NOT_STARTED" "PREQ_DISCARDED" testdat)) + nost-specs) + +;; Catch all. +;; +(expect:error in logbody = 0 "Tests not accounted for" #/Test: /) + + +;; ;; define your hooks +;; (hook:first-error "echo \"Error hook activated: #{escaped errmsg}\"") +;; (hook:first-warning "echo \"Got warning: #{escaped warnmsg}\"") +;; (hook:value "echo \"Value hook activated: expected=#{expected}, measured=#{measured}, tolerance=#{tolerance}, message=#{message}\"") +;; +;; ;; first ensure your run at least started +;; ;; +;; (trigger "Init" #/This is a header/) +;; (trigger "InitEnd" #/^\s*$/) +;; (section "Init" "Init" "InitEnd") +;; +;; (trigger "Body" #/^.*$/) ;; anything starts the body +;; ;; (trigger "EndBody" #/This had better never match/) +;; +;; (section "Body" "Body" "EndBody") +;; +;; (trigger "Blah2" #/^begin Blah2/) +;; (trigger "Blah2End" #/^end Blah2/) +;; (section "Blah2" "Blah2" "Blah2End") +;; +;; (expect:required in "Init" = 1 "Header" #/This is a header/) +;; (expect:required in "LogFileBody" > 0 "Something required but not found" #/This is required but not found/) +;; (expect:value in "LogFileBody" 1.9 0.1 "Output voltage" #/Measured voltage output:\s*([\d\.\+\-e]+)v/) +;; (expect:value in "LogFileBody" 0.5 0.1 "Output current" #/Measured output current:\s*([\d\.\+\-e]+)mA/) +;; (expect:value in "LogFileBody" 110e9 2e9 "A big number (first)" #/Freq:\s*([\d\.\+\-e]+)\s+Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (second), hook not called" #/Freq:\s*([\d\.\+\-e]+)Hz/) +;; (expect:value in "LogFileBody" 110e9 1e9 "A big number (never activated)" #/Freq:\s*([\d\.\+\-e]+)zH/) +;; +;; ;; Using match number +;; (expect:value in "LogFileBody" 1.9 0.1 "Time Voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; ;; Comparison instead of tolerance +;; (expect:value in "LogFileBody" 1.9 > "Time voltage" #/out: (\d+)\s+(\d+)/ match: 2) +;; +;; (expect:ignore in "Blah2" < 99 "FALSE ERROR" #/ERROR/) +;; (expect:ignore in "Body" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +;; (expect:warning in "Body" = 0 "Any warning" #/WARNING/) +;; (expect:error in "Body" = 0 "ERROR BLAH" (list #/ERROR/ #/error/)) ;; but disallow any other errors +;; +;; ;(expect in "Init" < 1 "Junk" #/This is bogus/) ADDED tests/release/tests/toprun/testconfig Index: tests/release/tests/toprun/testconfig ================================================================== --- /dev/null +++ tests/release/tests/toprun/testconfig @@ -0,0 +1,15 @@ +[misc] +rname release_toprun +rdir $MTTESTDIR/fullrun + +[ezsteps] +cleantop $MTRUNNER #{get misc rdir} $MTPATH megatest -remove-runs -testpatt % -target ubuntu/nfs/none -runname #{get misc rname} -testpatt % +runall $MTRUNNER #{get misc rdir} $MTPATH megatest -run -testpatt % -target ubuntu/nfs/none -runname #{get misc rname} -runwait +runtop $MTRUNNER #{get misc rdir} $MTPATH megatest -runtests all_toplevel -target ubuntu/nfs/none -runname #{get misc rname} -runwait +results $MTRUNNER #{get misc rdir} $MTPATH megatest -list-runs #{get misc rname} -target ubuntu/nfs/none + +[requirements] +# waiton #{getenv ALL_TOPLEVEL_TESTS} + +# This is a "toplevel" test, it does not require waitons to be non-FAIL to run +# mode toplevel ADDED tests/resources/ruby/librunscript.rb Index: tests/resources/ruby/librunscript.rb ================================================================== --- /dev/null +++ tests/resources/ruby/librunscript.rb @@ -0,0 +1,37 @@ +# This is the library of stuff for megatest + +def run_and_record(stepname, cmd, checks) + system "megatest -step #{stepname} :state start :status n/a" + system cmd + exitcode=$? + if exitcode==0 + exitcode='pass' + else + exitcode='fail' + end + system "megatest -step #{stepname} :state end :status #{exitcode}" +end + +def record_step(stepname,state,status) + system "megatest -step #{stepname} :state #{state} :status #{status}" +end + +def test_status(state,status) + system "megatest -test-status :state #{state} :status #{status}" +end + + +# WARNING: This example is deprecated. Don't use the -test-status command +# unless you know for sure what you are doing. +def file_size_checker(stepname,filename,minsize,maxsize) + fsize=File.size(filename) + if fsize > maxsize or fsize < minsize + system "megatest -test-status :state COMPLETED :status fail" + else + system "megatest -test-status :state COMPLETED :status pass" + end +end + + +def wait_for_step(testname,stepname) +end ADDED tests/rununittest.sh Index: tests/rununittest.sh ================================================================== --- /dev/null +++ tests/rununittest.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Usage: rununittest.sh testname debuglevel +# + +# put megatest on path from correct location +mtbindir=$(readlink -f ../bin) + +export PATH="${mtbindir}:$PATH" + +# Clean setup +# +dbdir=$(cd simplerun;megatest -show-config -section setup -var linktree)/.db +rm -f simplerun/megatest.db simplerun/monitor.db simplerun/db/monitor.db $dbdir/*.db +rm -rf simplelinks/ simpleruns/ simplerun/db/ $dbdir +mkdir -p simplelinks simpleruns +(cd simplerun;cp ../../*_records.scm .;perl -pi.bak -e 's/define-inline/define/' *_records.scm) + +# Run the test $1 is the unit test to run +cd simplerun;echo '(load "../tests.scm")' | ../../bin/megatest -repl -debug $2 $1 ADDED tests/simplerun/megatest.config Index: tests/simplerun/megatest.config ================================================================== --- /dev/null +++ tests/simplerun/megatest.config @@ -0,0 +1,32 @@ +[fields] +SYSTEM TEXT +RELEASE TEXT + +[setup] +# Adjust max_concurrent_jobs to limit how much you load your machines +max_concurrent_jobs 50 + +# Uncomment this to make the in-mem db into a disk based db (slower but good for debug) +# be aware that some unit tests will fail with this due to persistent data +# +# tmpdb /tmp + +# This is your link path, you can move it but it is generally better to keep it stable +linktree #{getenv MT_RUN_AREA_HOME}/../simplelinks + +# Valid values for state and status for steps, NB// It is not recommended you use this +[validvalues] +state start end completed + +# Job tools are more advanced ways to control how your jobs are launched +[jobtools] +useshell yes +launcher nbfind + +# You can override environment variables for all your tests here +[env-override] +EXAMPLE_VAR example value + +# As you run more tests you may need to add additional disks, the names are arbitrary but must be unique +[disks] +disk0 #{getenv MT_RUN_AREA_HOME}/../simpleruns ADDED tests/simplerun/runconfigs.config Index: tests/simplerun/runconfigs.config ================================================================== --- /dev/null +++ tests/simplerun/runconfigs.config @@ -0,0 +1,6 @@ +[default] +ALLTESTS see this variable + +# Your variables here are grouped by targets [SYSTEM/RELEASE] +[SYSTEM_val/RELEASE_val] +ANOTHERVAR only defined if target is SYSTEM_val/RELEASE_val ADDED tests/simplerun/test.config Index: tests/simplerun/test.config ================================================================== --- /dev/null +++ tests/simplerun/test.config @@ -0,0 +1,31 @@ +[section1] +1 ./blah + +[section2] + +# A comment + +[disks] +1 ./ + +[validvalues] +state start end aborted +status pass fail n/a + +[include a file that doesn't exist] + + +blah nada + +# now inlcude a file tha tdoes exist +[include megatest.config] + +[metadata] +description This is a multiline + description. The leading whitespace is discarded + irrespective of amount of indenting. + This line is indented more. + + +author matt +lastreview never ADDED tests/simplerun/tests/test1/step1.logpro Index: tests/simplerun/tests/test1/step1.logpro ================================================================== --- /dev/null +++ tests/simplerun/tests/test1/step1.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +;; (expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/simplerun/tests/test1/step1.sh Index: tests/simplerun/tests/test1/step1.sh ================================================================== --- /dev/null +++ tests/simplerun/tests/test1/step1.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +# Run your step here +echo Got here! + ADDED tests/simplerun/tests/test1/step2.logpro Index: tests/simplerun/tests/test1/step2.logpro ================================================================== --- /dev/null +++ tests/simplerun/tests/test1/step2.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +;; (expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/simplerun/tests/test1/step2.sh Index: tests/simplerun/tests/test1/step2.sh ================================================================== --- /dev/null +++ tests/simplerun/tests/test1/step2.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +# Run your step here +echo Got here eh! + + ADDED tests/simplerun/tests/test1/testconfig Index: tests/simplerun/tests/test1/testconfig ================================================================== --- /dev/null +++ tests/simplerun/tests/test1/testconfig @@ -0,0 +1,21 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +step1 step1.sh +step2 step2.sh + +# Test requirements are specified here +[requirements] +# waiton setup +priority 0 + +# Iteration for your tests are controlled by the items section +[items] +# PARTOFDAY morning noon afternoon evening night + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description An example test +tags tagone,tagtwo +reviewed never ADDED tests/simplerun/tests/test2/step1.logpro Index: tests/simplerun/tests/test2/step1.logpro ================================================================== --- /dev/null +++ tests/simplerun/tests/test2/step1.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +;; (expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/simplerun/tests/test2/step1.sh Index: tests/simplerun/tests/test2/step1.sh ================================================================== --- /dev/null +++ tests/simplerun/tests/test2/step1.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +# Run your step here ADDED tests/simplerun/tests/test2/step2.logpro Index: tests/simplerun/tests/test2/step2.logpro ================================================================== --- /dev/null +++ tests/simplerun/tests/test2/step2.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +;; (expect:required in "LogFileBody" > 0 "Put description here" #/put pattern here/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/simplerun/tests/test2/step2.sh Index: tests/simplerun/tests/test2/step2.sh ================================================================== --- /dev/null +++ tests/simplerun/tests/test2/step2.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +# Run your step here ADDED tests/simplerun/tests/test2/testconfig Index: tests/simplerun/tests/test2/testconfig ================================================================== --- /dev/null +++ tests/simplerun/tests/test2/testconfig @@ -0,0 +1,21 @@ +# Add additional steps here. Format is "stepname script" +[ezsteps] +step1 step1.sh +step2 step2.sh + +# Test requirements are specified here +[requirements] +waiton test1 +priority 0 + +# Iteration for your tests are controlled by the items section +[items] +LANDTYPE desert plains forest jungle beach + +# test_meta is a section for storing additional data on your test +[test_meta] +author matt +owner matt +description Yet another example test +tags tagone,tagtwo +reviewed never ADDED tests/speedtest/megatest.config Index: tests/speedtest/megatest.config ================================================================== --- /dev/null +++ tests/speedtest/megatest.config @@ -0,0 +1,48 @@ +[fields] +sysname TEXT +fsname TEXT +datapath TEXT + +[setup] +transport #{scheme (if (getenv "USEHTTP") "http" "fs")} + +max_concurrent_jobs 50 + +# It is possible (but not recommended) to override the rsync command used +# to populate the test directories. For test development the following +# example can be useful +# +testcopycmd cp --remove-destination -rsv TEST_SRC_PATH/. TEST_TARG_PATH/. >> TEST_TARG_PATH/mt_launch.log 2>> TEST_TARG_PATH/mt_launch.log + +# FULL or 2, NORMAL or 1, OFF or 0 +synchronous OFF + +# override the logview command +# +logviewer (%MTCMD%) 2> /dev/null > /dev/null + +# override the html viewer launch command +# +# htmlviewercmd firefox -new-window +htmlviewercmd konqueror + +[jobtools] +launcher nbfake + +[server] + +# If the server can't be started on this port it will try the next port until +# it succeeds +port 8080 + +# This server will keep running this number of hours after last access. +# Three minutes is 0.05 hours +timeout 0.025 + +## disks are: +## name host:/path/to/area +## -or- +## name /path/to/area +[disks] +disk0 #{getenv MT_RUN_AREA_HOME}/tmp_run + ADDED tests/speedtest/runconfigs.config Index: tests/speedtest/runconfigs.config ================================================================== --- /dev/null +++ tests/speedtest/runconfigs.config @@ -0,0 +1,3 @@ +[default] +SOMEVAR This should show up in SOMEVAR3 + ADDED tests/speedtest/tests/speedtest/main.sh Index: tests/speedtest/tests/speedtest/main.sh ================================================================== --- /dev/null +++ tests/speedtest/tests/speedtest/main.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# a bunch of steps in 2 second increments +for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17;do + $MT_MEGATEST -step step$i :state start :status running -setlog results$i.html + sleep $TEST_DELAY + $MT_MEGATEST -step step$i :state end :status 0 +done + +exit 0 ADDED tests/speedtest/tests/speedtest/testconfig Index: tests/speedtest/tests/speedtest/testconfig ================================================================== --- /dev/null +++ tests/speedtest/tests/speedtest/testconfig @@ -0,0 +1,18 @@ +[setup] +runscript main.sh + +[requirements] +priority 1 + +[items] +SETLOG 0 1 +TEST_DELAY 0 1 2 3 4 5 6 7 8 9 10 +ITERATIONS 0 1 2 3 4 5 6 7 8 9 10 + +[test_meta] +author matt +owner bob +description This test checks that a multi-lineitem test with mix of pass and non-fail rolls up a PASS + +tags first,single +reviewed 09/10/2011, by Matt ADDED tests/stats.txt Index: tests/stats.txt ================================================================== --- /dev/null +++ tests/stats.txt @@ -0,0 +1,77 @@ +DB Stats: a1236d6bf92ec5cb8955f490761b21b0d3eea9d3 +======== +Cmd Count TotTime Avg +get-count-tests-running-for-run-id 1035 237.0 0.23 +get-count-tests-running-in-jobgroup 884 119.0 0.13 +get-count-tests-running 884 169.0 0.19 +get-prereqs-not-met 884 732.0 0.83 +get-test-info-by-id 673 122.0 0.18 +get-keys 476 1.0 0.00 +get-test-id 356 42.0 0.12 +testmeta-get-record 203 24.0 0.12 +roll-up-pass-fail-counts 159 39.0 0.25 +register-test 140 30.0 0.21 +test-set-rundir-shortdir 128 98.0 0.77 +test-set-status-state 94 45.0 0.48 +find-and-mark-incomplete 32 0.0 0.00 +state-status-msg 25 4.0 0.16 +delete-tests-in-state 12 4.0 0.33 +get-tests-for-run-mindata 8 0.0 0.00 +get-all-run-ids 5 2.0 0.40 +get-run-info 4 0.0 0.00 +register-run 4 5.0 1.25 +set-tests-state-status 4 15.0 3.75 +get-tests-for-run 4 15.0 3.75 + +# After converting first three functions above to sqlite3:first-result +DB Stats +======== +Cmd Count TotTime Avg +get-count-tests-running-for-run-id 1138 179.0 0.16 +get-count-tests-running-in-jobgroup 987 91.0 0.09 +get-count-tests-running 987 171.0 0.17 +get-prereqs-not-met 987 892.0 0.90 +get-test-info-by-id 672 95.0 0.14 +get-keys 476 0.0 0.00 +get-test-id 355 41.0 0.12 +testmeta-get-record 203 15.0 0.07 +roll-up-pass-fail-counts 159 30.0 0.19 +register-test 140 22.0 0.16 +test-set-rundir-shortdir 128 855.0 6.68 +test-set-status-state 94 20.0 0.21 +find-and-mark-incomplete 36 1.0 0.03 +state-status-msg 24 5.0 0.21 +delete-tests-in-state 12 2.0 0.17 +get-tests-for-run-mindata 9 0.0 0.00 +get-all-run-ids 5 1.0 0.20 +register-run 4 1.0 0.25 +get-tests-for-run 4 11.0 2.75 +get-run-info 4 0.0 0.00 +set-tests-state-status 4 17.0 4.25 + +DB Stats another run, converted one or two non-relevant functions to sqlite3:first-result +======== +Cmd Count TotTime Avg +get-count-tests-running-for-run-id 987 157.0 0.16 +get-count-tests-running-in-jobgroup 836 79.0 0.09 +get-count-tests-running 836 121.0 0.14 +get-prereqs-not-met 836 513.0 0.61 +get-test-info-by-id 673 85.0 0.13 +get-keys 476 0.0 0.00 +get-test-id 356 32.0 0.09 +testmeta-get-record 203 19.0 0.09 +roll-up-pass-fail-counts 159 27.0 0.17 +register-test 140 23.0 0.16 +test-set-rundir-shortdir 128 35.0 0.27 +test-set-status-state 94 20.0 0.21 +find-and-mark-incomplete 40 0.0 0.00 +state-status-msg 25 5.0 0.20 +delete-tests-in-state 12 1.0 0.08 +get-tests-for-run-mindata 10 0.0 0.00 +get-all-run-ids 5 0.0 0.00 +set-tests-state-status 4 15.0 3.75 +register-run 4 2.0 0.50 +get-run-info 4 1.0 0.25 +get-tests-for-run 4 12.0 3.00 + + ADDED tests/supportfiles/ruby/librunscript.rb Index: tests/supportfiles/ruby/librunscript.rb ================================================================== --- /dev/null +++ tests/supportfiles/ruby/librunscript.rb @@ -0,0 +1,37 @@ +# This is the library of stuff for megatest + +def run_and_record(stepname, cmd, checks) + system "megatest -step #{stepname} :state start :status n/a" + system cmd + exitcode=$? + if exitcode==0 + exitcode='pass' + else + exitcode='fail' + end + system "megatest -step #{stepname} :state end :status #{exitcode}" +end + +def record_step(stepname,state,status) + system "megatest -step #{stepname} :state #{state} :status #{status}" +end + +def test_status(state,status) + system "megatest -test-status :state #{state} :status #{status}" +end + + +# WARNING: This example is deprecated. Don't use the -test-status command +# unless you know for sure what you are doing. +def file_size_checker(stepname,filename,minsize,maxsize) + fsize=File.size(filename) + if fsize > maxsize or fsize < minsize + system "megatest -test-status :state COMPLETED :status fail" + else + system "megatest -test-status :state COMPLETED :status pass" + end +end + + +def wait_for_step(testname,stepname) +end ADDED tests/test7.logpro Index: tests/test7.logpro ================================================================== --- /dev/null +++ tests/test7.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "All tests launched" #/INFO:.*All tests launched/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/ERROR/ #/error/)) ;; but disallow any other errors ADDED tests/tests.scm Index: tests/tests.scm ================================================================== --- /dev/null +++ tests/tests.scm @@ -0,0 +1,40 @@ +;; Copyright 2006-2012, Matthew Welland. +;; +;; This program is made available under the GNU GPL version 2.0 or +;; greater. See the accompanying file COPYING for details. +;; +;; This program is distributed WITHOUT ANY WARRANTY; without even the +;; implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +;; PURPOSE. + +;; strftime('%m/%d/%Y %H:%M:%S','now','localtime') + +(require-extension test) +(require-extension regex) +(require-extension srfi-18) +(import srfi-18) +;; (require-extension zmq) +;; (import zmq) + +(define test-work-dir (current-directory)) + +;; read in all the _record files +(let ((files (glob "*_records.scm"))) + (for-each + (lambda (file) + (print "Loading " file) + (load file)) + files)) + +(let* ((unit-test-name (list-ref (argv) 4)) + (fname (conc "../unittests/" unit-test-name ".scm"))) + (if (file-exists? fname) + (load fname) + (print "ERROR: Unit test " unit-test-name " not found in unittests directory"))) + + + (list "abc" "abc/%" "ab%/c%" "~abc/c%" "abc/~c%" "a,b/c,%/d" "%/,%/a" "%/,%/a" "%/,%/a" "%" "%" "%/" "%/" "%abc%") + (list "abc" "abc" "abcd" "abc" "abc" "a" "abc" "def" "ghi" "a" "a" "a" "a" "abc") + (list "" "" "cde" "cde" "cde" "" "" "a" "b" "" "b" "" "b" "abc") + (list #t #t #t #f #f #t #t #t #f #t #t #t #f #t)) + ADDED tests/unit.logpro Index: tests/unit.logpro ================================================================== --- /dev/null +++ tests/unit.logpro @@ -0,0 +1,8 @@ +;; You should have at least one expect:required. This ensures that your process ran +(expect:required in "LogFileBody" > 0 "At least one PASS" #/\[.{0,4}PASS.{0,4}\]/) + +;; You may need ignores to suppress false error or warning hits from the later expects +;; NOTE: Order is important here! +(expect:ignore in "LogFileBody" < 99 "Ignore the word error in comments" #/^\/\/.*error/) +(expect:warning in "LogFileBody" = 0 "Any warning" #/warn/) +(expect:error in "LogFileBody" = 0 "Any error" (list #/error/i #/\[.{0,4}FAIL.{0,4}\]/)) ;; but disallow any other errors ADDED tests/unittests/basicserver.scm Index: tests/unittests/basicserver.scm ================================================================== --- /dev/null +++ tests/unittests/basicserver.scm @@ -0,0 +1,269 @@ +;;====================================================================== +;; S E R V E R +;;====================================================================== + +;; Run like this: +;; +;; ./rununittest.sh server 1;(cd simplerun;megatest -stop-server 0) + +(delete-file* "logs/1.log") +(define run-id 1) + +(test "setup for run" #t (begin (launch:setup-for-run) + (string? (getenv "MT_RUN_AREA_HOME")))) + +;; NON Server tests go here + +(test #f #f (db:dbdat-get-path *db*)) +(test #f #f (db:get-run-name-from-id *db* run-id)) +;; (test #f '("SYSTEM" "RELEASE") (rmt:get-keys)) + +;; (exit) + +;; Server tests go here +(for-each + (lambda (run-id) + (test #f #f (tasks:server-running-or-starting? (db:delay-if-busy (tasks:open-db)) run-id)) + (server:kind-run run-id) + (test "did server start within 20 seconds?" + #t + (let loop ((remtries 20) + (running (tasks:server-running-or-starting? (db:delay-if-busy + (tasks:open-db)) + run-id))) + (if running + (> running 0) + (if (> remtries 0) + (begin + (thread-sleep! 1) + (loop (- remtries 1) + (tasks:server-running-or-starting? (db:delay-if-busy + (tasks:open-db)) + run-id))))))) + + (test "did server become available" #t + (let loop ((remtries 10) + (res (tasks:get-server (db:delay-if-busy (tasks:open-db)) run-id))) + (if res + (vector? res) + (begin + (if (> remtries 0) + (begin + (thread-sleep! 1.1) + (loop (- remtries 1)(tasks:get-server (db:delay-if-busy (tasks:open-db)) run-id))) + res))))) + ) + (list 0 1)) + +(define user (current-user-name)) +(define runname "mytestrun") +(define keys (rmt:get-keys)) +(define runinfo #f) +(define keyvals '(("SYSTEM" "abc")("RELEASE" "def"))) +(define header (list "SYSTEM" "RELEASE" "id" "runname" "state" "status" "owner" "event_time")) + +;; Setup +;; +(test #f #f (not (client:setup run-id))) +(test #f #f (not (hash-table-ref/default *runremote* run-id #f))) + +;; Login +;; +(test #f'(#t "successful login") + (rmt:login-no-auto-client-setup (hash-table-ref/default *runremote* run-id #f) run-id)) +(test #f '(#t "successful login") + (rmt:login run-id)) + +;; Keys +;; +(test #f '("SYSTEM" "RELEASE") (rmt:get-keys)) + +;; No data in db +;; +(test #f '() (rmt:get-all-run-ids)) +(test #f #f (rmt:get-run-name-from-id run-id)) +(test #f + (vector + header + (vector #f #f #f #f)) + (rmt:get-run-info run-id)) + +;; Insert data into db +;; +(test #f 1 (rmt:register-run keyvals runname "new" "n/a" user)) +;; (test #f #f (rmt:get-runs-by-patt keys runname)) +(test #f #t (rmt:general-call 'register-test run-id run-id "test-one" "")) +(define test-one-id #f) +(test #f 30001 (let ((test-id (rmt:get-test-id run-id "test-one" ""))) + (set! test-one-id test-id) + test-id)) +(define test-one-rec #f) +(test #f "test-one" (let ((test-rec (rmt:get-test-info-by-id run-id test-one-id))) + (set! test-one-rec test-rec) + (vector-ref test-rec 2))) + +;; With data in db +;; +(print "Using runame=" runname) +(test #f '(1) (rmt:get-all-run-ids)) +(test #f runname (rmt:get-run-name-from-id run-id)) +(test #f + runname + (let ((run-info (rmt:get-run-info run-id))) + (db:get-value-by-header (db:get-rows run-info) + (db:get-header run-info) + "runname"))) + +(for-each (lambda (run-id) +;; test killing server +;; +(tasks:kill-server-run-id run-id) + +(test #f #f (tasks:server-running-or-starting? (db:delay-if-busy (tasks:open-db)) run-id)) +) +(list 0 1)) + +;; Tests to assess reading/writing while servers are starting/stopping +(define start-time (current-seconds)) +(let loop ((test-state 'start)) + (let* ((server-dats (tasks:get-server-records (db:delay-if-busy (tasks:open-db)) run-id)) + (first-dat (if (not (null? server-dats)) + (car server-dats) + #f))) + (map (lambda (dat) + (apply print (intersperse (vector->list dat) ", "))) + server-dats) + (test #f test-one-rec (rmt:get-test-info-by-id run-id test-one-id)) + (thread-sleep! 1) + (case test-state + ((start) + (print "Trying to start server") + (server:kind-run run-id) + (loop 'server-started)) + ((server-started) + (case (if first-dat (vector-ref first-dat 0) 'blah) + ((running) + (print "Server appears to be running. Now ask it to shutdown") + (rmt:kill-server run-id) + (loop 'server-shutdown)) + ((shutting-down) + (loop test-state)) + (else (print "Don't know what to do if get here")))) + ((server-shutdown) + (loop test-state))))) + +;;====================================================================== +;; END OF TESTS +;;====================================================================== + + +;; (test #f #f (client:setup run-id)) + +;; (set! *transport-type* 'http) +;; +;; (test "setup for run" #t (begin (launch:setup-for-run) +;; (string? (getenv "MT_RUN_AREA_HOME")))) +;; +;; (test "server-register, get-best-server" #t (let ((res #f)) +;; (open-run-close tasks:server-register tasks:open-db 1 "bob" 1234 100 'live 'http) +;; (set! res (open-run-close tasks:get-best-server tasks:open-db)) +;; (number? (vector-ref res 3)))) +;; +;; (test "de-register server" #f (let ((res #f)) +;; (open-run-close tasks:server-deregister tasks:open-db "bob" port: 1234) +;; (vector? (open-run-close tasks:get-best-server tasks:open-db)))) +;; +;; (define server-pid #f) +;; +;; ;; Not sure how the following should work, replacing it with system of megatest -server +;; ;; (test "launch server" #t (let ((pid (process-fork (lambda () +;; ;; ;; (daemon:ize) +;; ;; (server:launch 'http))))) +;; ;; (set! server-pid pid) +;; ;; (number? pid))) +;; (system "../../bin/megatest -server - -debug 22 > server.log 2> server.log &") +;; +;; (let loop ((n 10)) +;; (thread-sleep! 1) ;; need to wait for server to start. +;; (let ((res (open-run-close tasks:get-best-server tasks:open-db))) +;; (print "tasks:get-best-server returned " res) +;; (if (and (not res) +;; (> n 0)) +;; (loop (- n 1))))) +;; +;; (test "get-best-server" #t (begin +;; (client:launch) +;; (let ((dat (open-run-close tasks:get-best-server tasks:open-db))) +;; (vector? dat)))) +;; +;; (define *keys* (keys:config-get-fields *configdat*)) +;; (define *keyvals* (keys:target->keyval *keys* "a/b/c")) +;; +;; (test #f #t (string? (car *runremote*))) +;; (test #f '(#t "successful login") (rmt:login)) ;; *runremote* *toppath* *my-client-signature*))) +;; +;; (test #f #f (rmt:get-test-info-by-id 99)) ;; get non-existant test +;; +;; ;; RUNS +;; (test #f 1 (rmt:register-run *keyvals* "firstrun" "new" "n/a" (current-user-name))) +;; (test "get run info" "firstrun" (let ((rinfo (rmt:get-run-info 1))) +;; (vector-ref (vector-ref rinfo 1) 3))) +;; (test "get runname from id" "firstrun" (rmt:get-run-name-from-id 1)) +;; +;; ;; TESTS +;; (test "get tests (no data)" '() (rmt:get-tests-for-run 1 "%" '() '() #f #f #f #f #f #f)) +;; (test "register test" #t (rmt:general-call 'register-test 1 "test1" "")) +;; (test "get tests (some data)" 1 (length (rmt:get-tests-for-run 1 "%" '() '() #f #f #f #f #f #f))) +;; (test "get test id" 1 (rmt:get-test-id 1 "test1" "")) +;; (test "sync back" #t (> (rmt:sync-inmem->db) 0)) +;; (test "get test id from main" 1 (db:get-test-id *db* 1 "test1" "")) +;; (test "get keys" #t (list? (rmt:get-keys))) +;; (test "set comment" #t (begin (rmt:general-call 'set-test-comment "this is a comment" 1) #t)) +;; (test "get comment" "this is a comment" (let ((trec (rmt:get-test-info-by-id 1))) +;; (db:test-get-comment trec))) +;; +;; ;; MORE RUNS +;; (test "get runs" #t (let* ((runs (rmt:get-runs "%" #f #f '())) +;; (header (vector-ref runs 0)) +;; (data (vector-ref runs 1))) +;; (and (list? header) +;; (list? data) +;; (vector? (car data))))) +;; +;; (test "get local testinfo" "test1" (vector-ref (db:get-testinfo-state-status *db* 1) 2)) +;; (test "get testinfo" "test1" (vector-ref (rmt:get-testinfo-state-status 1) 2)) +;; +;; ;;====================================================================== +;; ;; D B +;; ;;====================================================================== +;; +;; (test "pass fail counts" #t (rmt:general-call 'pass-fail-counts 10 9 1)) +;; (test "get pass fail counts" 19 (let ((dat (rmt:get-test-info-by-id 1))) +;; (+ (db:test-get-pass_count dat) +;; (db:test-get-fail_count dat)))) +;; +;; (define testregistry (make-hash-table)) +;; (for-each +;; (lambda (tname) +;; (for-each +;; (lambda (itempath) +;; (let ((tkey (conc tname "/" itempath)) +;; (rpass (random 10)) +;; (rfail (random 10))) +;; (hash-table-set! testregistry tkey (list tname itempath)) +;; (rmt:general-call 'register-test 1 tname itempath) +;; (let* ((tid (rmt:get-test-id 1 tname itempath)) +;; (tdat (rmt:get-test-info-by-id tid))) +;; (rmt:general-call 'pass-fail-counts rpass rfail (db:test-get-id tdat)) +;; (let* ((resdat (rmt:get-test-info-by-id tid))) +;; (test "set/get pass fail counts" (list rpass rfail) +;; (list (db:test-get-pass_count resdat) +;; (db:test-get-fail_count resdat))))))) +;; (list "" "a" "b" "c" "d" "e" "f" "g" "h" "i" "j"))) +;; (list "test1" "test2" "test3" "test4" "test5")) +;; +;; +;; (test #f '(#t "exit process started") (rmt:kill-server)) ;; *toppath* *my-client-signature* #f))) +;; + +(exit) ADDED tests/unittests/configfiles.scm Index: tests/unittests/configfiles.scm ================================================================== --- /dev/null +++ tests/unittests/configfiles.scm @@ -0,0 +1,52 @@ +;;====================================================================== +;; C O N F I G F I L E S +;;====================================================================== + +(define conffile #f) +(test "Read a config" #t (hash-table? (read-config "test.config" #f #f))) +(test "Read a config that doesn't exist" #t (hash-table? (read-config "nada.config" #f #f))) + +(set! conffile (read-config "test.config" #f #f)) +(test "Get available diskspace" #t (number? (get-df "./"))) +(test "Get best dir" #t (let ((bestdir (get-best-disk conffile))) + (or (equal? "./" bestdir) + (equal? "/tmp" bestdir)))) +(test "Multiline variable" 4 (length (string-split (config-lookup conffile "metadata" "description") "\n"))) + +;; db +(define row (vector "a" "b" "c" "blah")) +(define header (list "col1" "col2" "col3" "col4")) +(test "Get row by header" "blah" (db:get-value-by-header row header "col4")) + +;; (define *toppath* "tests") +(define *db* #f) +(test "open-db" #t (begin + (set! *db* (open-db)) + (if *db* #t #f))) + +;; quit wasting time, I'm changing *db* to db +(define db *db*) + +(test "get cpu load" #t (number? (get-cpu-load))) +(test "get uname" #t (string? (get-uname))) + +(test "get validvalues as list" (list "start" "end" "completed") + (string-split (config-lookup *configdat* "validvalues" "state"))) + +(for-each (lambda (item) + (test (conc "get valid items (" item ")") + item (items:check-valid-items "state" item))) + (list "start" "end" "completed")) + +(for-each (lambda (item) + (test (conc "get valid items (" item ")") + item (items:check-valid-items "status" item))) + (list "pass" "fail" "n/a")) + +(test #f #f (items:check-valid-items "state" "blahfool")) + +(test "write env files" "nada.csh" (begin + (save-environment-as-files "nada") + (and (file-exists? "nada.sh") + (file-exists? "nada.csh")))) + ADDED tests/unittests/dbrdbstruct.scm Index: tests/unittests/dbrdbstruct.scm ================================================================== --- /dev/null +++ tests/unittests/dbrdbstruct.scm @@ -0,0 +1,33 @@ +;;====================================================================== +;; S E R V E R +;;====================================================================== + +;; Run like this: +;; +;; (cd ..;make && make install) && ./rununittest.sh server 1;(cd simplerun;megatest -stop-server 0) + +(test #f #t (vector? (make-dbr:dbstruct "/tmp"))) + +(define dbstruct (make-dbr:dbstruct "/tmp")) + +(test #f #t (begin (dbr:dbstruct-set-main! dbstruct "blah") #t)) +(test #f "blah" (dbr:dbstruct-get-main dbstruct)) +(for-each + (lambda (run-id) + (test #f #t (vector? (dbr:dbstruct-get-rundb-rec dbstruct run-id)))) + (list 1 2 3 4 5 6 7 8 9 #f)) + +(test #f 0 (dbr:dbstruct-field-name->num 'rundb)) +(test #f 1 (dbr:dbstruct-field-name->num 'inmem)) +(test #f 2 (dbr:dbstruct-field-name->num 'mtime)) + +(test #f #f (dbr:dbstruct-get-runvec-val dbstruct 1 'rundb)) +(test #f #t (begin (dbr:dbstruct-set-runvec-val! dbstruct 1 'rundb "rundb") #t)) +(test #f "rundb" (dbr:dbstruct-get-runvec-val dbstruct 1 'rundb)) + +(for-each + (lambda (k) + (test #f #t (begin (dbr:dbstruct-set-runvec-val! dbstruct 1 k (conc k)) #t)) + (test #f (conc k) (dbr:dbstruct-get-runvec-val dbstruct 1 k))) + '(rundb inmem mtime rtime stime inuse)) + ADDED tests/unittests/inmemdb.scm Index: tests/unittests/inmemdb.scm ================================================================== --- /dev/null +++ tests/unittests/inmemdb.scm @@ -0,0 +1,44 @@ +;;====================================================================== +;; S E R V E R +;;====================================================================== + +;; Run like this: +;; +;; (cd ..;make && make install) && ./rununittest.sh server 1;(cd simplerun;megatest -stop-server 0) + +(set! *transport-type* 'http) + +(system "cp ../fullrun/megatest.db megatest.db") + +(test "open inmem db" 1 (begin (open-in-mem-db) 1)) + +(test "setup for run" #t (begin (setup-for-run) + (string? (getenv "MT_RUN_AREA_HOME")))) + +(system "megatest -server - -debug 0 &") + +(thread-sleep! 3) ;; need to wait for server to start. Yes, a better way is needed. + +(define *keys* (keys:config-get-fields *configdat*)) +(define *keyvals* (keys:target->keyval *keys* "a/b/c")) + +(test #f #t (string? (car *runremote*))) +(test #f '(#t "successful login") (rmt:login)) ;; *runremote* *toppath* *my-client-signature*))) + +(define inmem (db:open-inmem-db)) + +(define (inmem-test t b) + (test "inmem sync to" t (db:sync-to *db* inmem)) + (test "inmem sync back" b (db:sync-to inmem *db*))) + +(inmem-test 0 0) + +(inmem-test 1 1) + +;;====================================================================== +;; D B +;;====================================================================== + +(test #f '(#t "exit process started") (rmt:kill-server)) ;; *toppath* *my-client-signature* #f))) + + ADDED tests/unittests/misc.scm Index: tests/unittests/misc.scm ================================================================== --- /dev/null +++ tests/unittests/misc.scm @@ -0,0 +1,48 @@ +;;====================================================================== +;; P R O C E S S E S +;;====================================================================== + +(test "cmd-run-with-stderr->list" '("No such file or directory") + (let ((reslst (cmd-run-with-stderr->list "ls" "/tmp/ihadbetternotexist"))) + (string-search (regexp "No such file or directory")(car reslst)))) + +;;====================================================================== +;; T E S T M A T C H I N G +;;====================================================================== + +;; tests:glob-like-match +(test #f '("abc") (tests:glob-like-match "abc" "abc")) +(for-each + (lambda (patt str expected) + (test (conc patt " " str "=>" expected) expected (tests:glob-like-match patt str))) + (list "abc" "~abc" "~abc" "a*c" "a%c") + (list "abc" "abcd" "abc" "ABC" "ABC") + (list '("abc") #t #f #f '("ABC")) + ) + +;; tests:match +(test #f #t (tests:match "abc/def" "abc" "def")) +(for-each + (lambda (patterns testname itempath expected) + (test (conc patterns " " testname "/" itempath "=>" expected) + expected + (tests:match patterns testname itempath))) + (list "abc" "abc/%" "ab%/c%" "~abc/c%" "abc/~c%" "a,b/c,%/d" "%/,%/a" "%/,%/a" "%/,%/a" "%" "%" "%/" "%/") + (list "abc" "abc" "abcd" "abc" "abc" "a" "abc" "def" "ghi" "a" "a" "a" "a") + (list "" "" "cde" "cde" "cde" "" "" "a" "b" "" "b" "" "b") + (list #t #t #t #f #f #t #t #t #f #t #t #t #f)) + +;; db:patt->like +(test #f "testname LIKE 't%'" (db:patt->like "testname" "t%" comparator: " AND ")) +(test #f "testname LIKE 't%' AND testname LIKE '%t'" (db:patt->like "testname" "t%,%t" comparator: " AND ")) +(test #f "item_path GLOB ''" (db:patt->like "item_path" "")) + +;; test:match->sqlqry +(test #f "(testname GLOB 'a' AND item_path GLOB 'b') OR (testname LIKE 'a%' AND item_path LIKE '%') OR (testname GLOB '' AND item_path LIKE 'b%')" + (tests:match->sqlqry "a/b,a%,/b%")) +(test #f "(testname GLOB 'a' AND item_path GLOB 'b') OR (testname LIKE 'a%' AND item_path LIKE '%') OR (testname LIKE '%' AND item_path LIKE 'b%')" + (tests:match->sqlqry "a/b,a%,%/b%")) + + +(exit) + ADDED tests/unittests/runs.scm Index: tests/unittests/runs.scm ================================================================== --- /dev/null +++ tests/unittests/runs.scm @@ -0,0 +1,330 @@ +(define keys (rmt:get-keys)) + +(test "get all legal tests" (list "test1" "test2") (sort (hash-table-keys (tests:get-all)) string<=?)) + +(test "register-run" #t (number? + (rmt:register-run + '(("SYSTEM" "key1")("RELEASE" "key2")) + "myrun" + "new" + "n/a" + "bob"))) + +(test #f #t (rmt:register-test 1 "nada" "")) +(test #f 30001 (rmt:get-test-id 1 "nada" "")) +(test #f "NOT_STARTED" (vector-ref (rmt:get-test-info-by-id 1 30001) 3)) ;; "nada" "") 3)) + +(test #f "FOO LIKE 'abc%def'" (db:patt->like "FOO" "abc%def")) +(test #f "key2" (vector-ref (car (vector-ref (mt:get-runs-by-patt '("SYSTEM" "RELEASE") "%" "key1/key2") 1)) 1)) + +(test #f "SYSTEM,RELEASE,id,runname,state,status,owner,event_time" (car (runs:get-std-run-fields keys '("id" "runname" "state" "status" "owner" "event_time")))) +(test #f #t (runs:operate-on 'print "%" "%" "%")) + +;;(test "update-test-info" #t (test-update-meta-info *db* 1 "nada" +(setenv "BLAHFOO" "1234") +(unsetenv "NADAFOO") +(test "env temp overrides" "xyz" (let ((prevvals (alist->env-vars '(("BLAHFOO" 4321)("NADAFOO" xyz)))) + (result (get-environment-variable "NADAFOO"))) + (alist->env-vars prevvals) + result)) + +(test "env restored" "1234" (get-environment-variable "BLAHFOO")) + + +(test "Items assoc" "Elephant" (cadar (cadr (item-assoc->item-list '(("ANIMAL" "Elephant Lion")("SEASON" "Spring Fall")))))) +(set! *verbosity* 6) +(test "Items assoc" '()(item-assoc->item-list '(("a" "a b c d")("b" "c d e")("c" "")("d")))) +(set! *verbosity* -1) +(test "Items assoc empty items" '() (item-assoc->item-list '(("A")))) +(set! *verbosity* 1) +(test "Items table" "SEASON" (caadar (item-table->item-list '(("ANIMAL" "Elephant Lion")("SEASON" "Spring Winter"))))) +(test "Items table empty items I" '() (item-table->item-list '(("A")))) +(test "Items table empty items II" '() (item-table->item-list '(("A" "")))) + +;; Test out the steps code + +(define test-id #f) + +;; force keepgoing +; (hash-table-set! args:arg-hash "-keepgoing" #t) +(hash-table-set! args:arg-hash "-itempatt" "%") +(hash-table-set! args:arg-hash "-testpatt" "%") +(hash-table-set! args:arg-hash "-target" "ubuntu/r1.2") ;; SYSTEM/RELEASE +(hash-table-set! args:arg-hash "-runname" "testrun") +(test "Setup for a run" #t (begin (launch:setup-for-run) #t)) + +(define *tdb* #f) +(define keyvals #f) +(test "target->keyval" #t (let ((kv (keys:target->keyval keys (args:get-arg "-target")))) + (print "keyvals=" kv ", keys=" keys) + (set! keyvals kv)(list? keyvals))) + +(define testdbpath (conc "/tmp/" (getenv "USER") "/megatest_testing")) +(system (conc "rm -f " testdbpath "/testdat.db;mkdir -p " testdbpath)) + +(print "Using " testdbpath " for test db") +(test #f #t (let ((db (open-test-db testdbpath))) + (set! *tdb* db) + (sqlite3#database? db))) +(sqlite3#finalize! *tdb*) + +;; (test "Remove the rollup run" #t (begin (remove-runs) #t)) +(define tconfig #f) +(test "get a testconfig" #t (let ((tconf (tests:get-testconfig "test1" (tests:get-all) 'return-procs ))) + (set! tconfig tconf) + (hash-table? tconf))) + +(test "set-megatest-env-vars" + "ubuntu" + (begin + (runs:set-megatest-env-vars 1 inkeys: keys) + (get-environment-variable "SYSTEM"))) +(test "setup-env-defaults" + "see this variable" + (begin + (setup-env-defaults "runconfigs.config" 1 *already-seen-runconfig-info* keyvals environ-patt: "pre-launch-env-vars") + (get-environment-variable "ALLTESTS"))) + +(test #f "ubuntu" (car (keys:target-set-args keys (args:get-arg "-target") args:arg-hash))) + +(define rinfo #f) +(test "get-run-info" #f (vector? (vector-ref (let ((rinf (rmt:get-run-info 1))) + (set! rinfo rinf) + rinf) 0))) +;; (test "get-key-vals" "key1" (car (db:get-key-vals *dbstruct* 1))) +(test "tests:sort-by" '() (tests:sort-by-priority-and-waiton (make-hash-table))) + +(test "update-test_meta" "test1" (begin + (runs:update-test_meta "test1" tconfig) + (let ((dat (rmt:testmeta-get-record "test1"))) + (vector-ref dat 1)))) + +(define test-path "tests/test1") +(define disk-path #f) +(test "get-best-disk" #t (string? (file-exists? (let ((d (get-best-disk *configdat*))) + (set! disk-path d) + d)))) +(test "create-work-area" #t (symbolic-link? (car (create-work-area 1 rinfo keyvals 1 test-path disk-path "test1" '())))) +(test #f "" (item-list->path '())) + +;;====================================================================== +;; Create a test with multiple items and verify that rollup logic works +;;====================================================================== + +(rmt:register-test 1 "rollup" "") ;; toplevel test +(for-each + (lambda (itempath) + (rmt:register-test 1 "rollup" itempath) + (let ((test-id (rmt:get-test-id 1 "rollup" itempath)) + (comment (conc "This is a comment for itempath " itempath))) + ;; (rmt:test-set-state-status-by-id run-id test-id "COMPLETED" "PASS" comment) + (tests:test-set-status! 1 test-id "COMPLETED" "PASS" comment #f))) ;; #!key (work-area #f)) + '("item/1" "item/2" "item/3" "item/4" "item/5")) + +(test #f #t (number? (rmt:get-test-id 1 "rollup" "item/4"))) + +(define (get-state-status run-id testname itempath) + (let ((tdat (rmt:get-test-info-by-id 1 (rmt:get-test-id run-id testname itempath)))) + (list (db:test-get-state tdat) + (db:test-get-status tdat)))) + +(test "Rollup PASS" '("COMPLETED" "PASS") (get-state-status 1 "rollup" "")) +(let ((test-id (rmt:get-test-id 1 "rollup" "item/4")) + (top-id (rmt:get-test-id 1 "rollup" ""))) + (for-each + (lambda (state status rup-state rup-status) + ;; reset to COMPLETED/PASS + (tests:test-set-status! 1 test-id "COMPLETED" "PASS" #f #f) + (test "Top reset to COMPLETED/PASS" '("COMPLETED" "PASS")(get-state-status 1 "rollup" "")) + (tests:test-set-status! 1 test-id state status #f #f) + (test (conc "Item set to " state "/" status) + (list state status) + (get-state-status 1 "rollup" "item/4")) + (test (conc "Rollup of " state "/" status) + (list rup-state rup-status) + (get-state-status 1 "rollup" ""))) + '("COMPLETED" "COMPLETED" "INCOMPLETE" "INCOMPLETE" "RUNNING" "RUNNING" "COMPLETED" "COMPLETED") + '("ABORT" "FAIL" "PASS" "FAIL" "PASS" "FAIL" "BLAH" "AUTO") + '("COMPLETED" "COMPLETED" "COMPLETED" "COMPLETED" "RUNNING" "RUNNING" "COMPLETED" "COMPLETED") + '("ABORT" "FAIL" "FAIL" "FAIL" "PASS" "FAIL" "ABORT" "AUTO"))) + + +(test "launch-test" #t + (string? + (file-exists? + ;; (launch-test test-id run-id run-info keyvals runname test-conf test-name test-path itemdat params) + (launch-test 30001 1 rinfo keyvals "run1" tconfig "test1" test-path '() (make-hash-table))))) + + + + +(exit 1) + + + + +;; (test "Run a test" #t (general-run-call +;; "-runtests" +;; "run a test" +;; (lambda (target runname keys keyvallst) +;; (let ((test-patts "test%")) +;; ;; (runs:run-tests target runname test-patts user (make-hash-table)) +;; ;; (run:test run-id run-info key-vals runname test-record flags parent-test) +;; ;; (set! *verbosity* 22) ;; (list 0 1 2)) +;; (run:test 1 ;; run-id +;; #f ;; run-info is yet only a dream +;; keyvallst ;; (keys:target->keyval keys target) +;; "run1" ;; runname +;; (vector ;; test_records.scm tests:testqueue +;; "test1" ;; testname +;; tconfig ;; testconfig +;; (make-hash-table) ;; flags +;; #f ;; parent test +;; (tests:get-all) ;; test registry +;; 0 ;; priority +;; #f ;; items +;; #f ;; itemsdat +;; "" ;; itempath +;; ) +;; args:arg-hash ;; flags (e.g. -itemspatt) +;; #f) +;; ;; (set! *verbosity* 0) +;; )))) +;; +;; +;; +;; +;; +;; (test "server stop" #f (let ((hostname (car *runremote*)) +;; (port (cadr *runremote*))) +;; (tasks:kill-server #t hostname port server-pid 'http) +;; (open-run-close tasks:get-best-server tasks:open-db))) + +;; (test "cache is coherent" #t (let ((cached-info (db:get-test-info-cached-by-id db 2)) +;; (non-cached (db:get-test-info-not-cached-by-id db 2))) +;; (print "\nCached: " cached-info) +;; (print "Noncached: " non-cached) +;; (equal? cached-info non-cached))) + +(change-directory test-work-dir) +(test #f #t (> (length (mt:get-tests-for-run 1 "test1" '() '())) 0)) +(test "Add a step" #t + (begin + (rmt:teststep-set-status! 1 30002 "step1" "start" 0 "This is a comment" "mylogfile.html") + (sleep 2) + (rmt:teststep-set-status! 1 30002 "step1" "end" "pass" "This is a different comment" "finallogfile.html") + (set! test-id (db:test-get-id (car (mt:get-tests-for-run 1 "test1" '() '())))) + (number? test-id))) + +(test "Get rundir" #t (let ((rundir (cdb:remote-run db:test-get-rundir-from-test-id #f test-id))) + (print "Rundir " rundir) + (system (conc "mkdir -p " rundir)) + (string? rundir))) +(test #f #t (sqlite3#database? (open-test-db "./"))) +(test "Create a test db" "../simpleruns/key1/key2/myrun/test1/testdat.db" + (let ((tdb (open-run-close db:open-test-db-by-test-id db test-id))) + (if tdb (sqlite3#finalize! tdb)) + (file-exists? "../simpleruns/key1/key2/myrun/test1/testdat.db"))) + +(test "Get steps for test" #t (let ((steps (cdb:remote-run db:get-steps-for-test #f test-id))) + (print steps) + (> (length steps) 0))) +(test "Get nice table for steps" "2.0s" + (begin + (vector-ref (hash-table-ref (open-run-close db:get-steps-table #f test-id) "step1") 4))) + +;; (exit) + +(test #f "myrun" (cdb:remote-run db:get-run-name-from-id #f 1)) + +(test #f #f (cdb:remote-run db:roll-up-pass-fail-counts #f 1 "nada" "" "PASS")) + +;;====================================================================== +;; R E M O T E C A L L S +;;====================================================================== + +(define start-wait (current-seconds)) +(print "Starting intensive cache and rpc test") +(for-each (lambda (params) + (print "Intensive: params=" params) + (cdb:tests-register-test *runremote* 1 (conc "test" (random 20)) "") + (apply cdb:test-set-status-state *runremote* test-id params) + (cdb:pass-fail-counts *runremote* test-id (random 100) (random 100)) + (cdb:test-rollup-test_data-pass-fail *runremote* test-id) + (cdb:roll-up-pass-fail-counts *runremote* 1 "test1" "" (cadr params)) + (thread-sleep! 0.01)) ;; cache ordering granularity is at the second level. Should really be at the ms level + '(("COMPLETED" "PASS" #f) + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("COMPLETED" "PASS" #f) + ("NOT_STARTED" "FAIL" "Just testing") + ("KILLED" "UNKNOWN" "More testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("COMPLETED" "PASS" #f) + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("KILLED" "UNKNOWN" "More testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("COMPLETED" "PASS" #f) + ("NOT_STARTED" "FAIL" "Just testing") + ("KILLED" "UNKNOWN" "More testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("COMPLETED" "PASS" #f) + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("KILLED" "UNKNOWN" "More testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("COMPLETED" "PASS" #f) + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("KILLED" "UNKNOWN" "More testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("COMPLETED" "PASS" #f) + ("NOT_STARTED" "FAIL" "Just testing") + ("KILLED" "UNKNOWN" "More testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("NOT_STARTED" "FAIL" "Just testing") + ("COMPLETED" "PASS" #f) + ("NOT_STARTED" "FAIL" "Just testing") + ("KILLED" "UNKNOWN" "More testing") + ("KILLED" "UNKNOWN" "More testing") + )) + +;; now set all tests to completed +(cdb:flush-queue *runremote*) +(let ((tests (cdb:remote-run db:get-tests-for-run #f 1 "%" '() '()))) + (print "Setting " (length tests) " to COMPLETED/PASS") + (for-each + (lambda (test) + (cdb:test-set-status-state *runremote* (db:test-get-id test) "COMPLETED" "PASS" "Forced pass")) + tests)) + +;; (process-wait server-pid) +;; (test "Server wait time" #t (let ((run-delta (- (current-seconds) start-wait))) +;; (print "Server ran for " run-delta " seconds") +;; (> run-delta 20))) + +(test "Rollup the run(s)" #t (begin + (runs:rollup-run keys (keys->alist keys "na") "rollup" "matt") + #t)) + +(hash-table-set! args:arg-hash ":runname" "%") + +(test "Remove the rollup run" #t (begin (operate-on 'remove-runs))) + +(print "Waiting for server to be done, should be about 20 seconds") +(test "server stop" #f (let ((hostname (car *runremote*)) + (port (cadr *runremote*))) + (tasks:kill-server #t hostname port server-pid 'http) + (open-run-close tasks:get-best-server tasks:open-db))) + +;; (cdb:kill-server *runremote*) + +;; (thread-join! th1 th2 th3) + +;; ADD ME!!!! (db:get-prereqs-not-met *db* 1 '("runfirst") "" mode: 'normal) +;; ADD ME!!!! (rdb:get-tests-for-run *db* 1 "runfirst" #f '() '()) ADDED tests/unittests/server.scm Index: tests/unittests/server.scm ================================================================== --- /dev/null +++ tests/unittests/server.scm @@ -0,0 +1,193 @@ +;;====================================================================== +;; S E R V E R +;;====================================================================== + +;; Run like this: +;; +;; (cd ..;make && make install) && ./rununittest.sh server 1;(cd simplerun;megatest -stop-server 0) + +(delete-file* "logs/1.log") +(define run-id 1) + +(test "setup for run" #t (begin (launch:setup-for-run) + (string? (getenv "MT_RUN_AREA_HOME")))) + +;; Insert data into db +;; +(define user (current-user-name)) +(define runname "mytestrun") +(define keys (rmt:get-keys)) +(define runinfo #f) +(define keyvals '(("SYSTEM" "abc")("RELEASE" "def"))) +(define header (list "SYSTEM" "RELEASE" "id" "runname" "state" "status" "owner" "event_time")) + +(test #f 1 (rmt:register-run keyvals runname "new" "n/a" user)) +;; (test #f #f (rmt:get-runs-by-patt keys runname)) +(test #f #t (rmt:general-call 'register-test run-id run-id "test-one" "")) +(define test-one-id #f) +(test #f 30001 (let ((test-id (rmt:get-test-id run-id "test-one" ""))) + (set! test-one-id test-id) + test-id)) +(define test-one-rec #f) +(test #f "test-one" (let ((test-rec (rmt:get-test-info-by-id run-id test-one-id))) + (set! test-one-rec test-rec) + (vector-ref test-rec 2))) + +(use trace) +(import trace) +;; (trace +;; rmt:send-receive +;; rmt:open-qry-close-locally +;; ) + +;; Tests to assess reading/writing while servers are starting/stopping +(define start-time (current-seconds)) +(let loop ((test-state 'start)) + (let* ((server-dats (tasks:get-server-records (db:delay-if-busy (tasks:open-db)) run-id)) + (first-dat (if (not (null? server-dats)) + (car server-dats) + #f)) + (server-state (or (and first-dat (string->symbol (vector-ref first-dat 8))) 'no-dat))) + (if first-dat + (map (lambda (dat) + (apply print (intersperse (vector->list dat) ", "))) + server-dats) + (print "No server")) + (test #f test-one-rec (rmt:get-test-info-by-id run-id test-one-id)) + (thread-sleep! 1) + (case test-state + ((start) + (print "Trying to start server") + (server:kind-run run-id) + (loop 'server-started)) + ((server-started) + (case server-state + ((running) + (print "Server appears to be running. Now ask it to shutdown") + (rmt:kill-server run-id) + ;; (trace rmt:open-qry-close-locally rmt:send-receive) + (loop 'shutdown-started)) + ((available) + (loop test-state)) + ((shutting-down) + (loop test-state)) + ((no-dat) + (loop test-state)) + (else (print "Don't know what to do if get here")))) + ((shutdown-started) + (case server-state + ((no-dat) + (print "Server appears to have shutdown, ending this test")) + (else + (loop test-state))))))) + +(exit) + +;; (set! *transport-type* 'http) +;; +;; (test "setup for run" #t (begin (setup-for-run) +;; (string? (getenv "MT_RUN_AREA_HOME")))) +;; +;; (test "server-register, get-best-server" #t (let ((res #f)) +;; (open-run-close tasks:server-register tasks:open-db 1 "bob" 1234 100 'live 'http) +;; (set! res (open-run-close tasks:get-best-server tasks:open-db)) +;; (number? (vector-ref res 3)))) +;; +;; (test "de-register server" #f (let ((res #f)) +;; (open-run-close tasks:server-deregister tasks:open-db "bob" port: 1234) +;; (vector? (open-run-close tasks:get-best-server tasks:open-db)))) +;; +;; (define server-pid #f) +;; +;; ;; Not sure how the following should work, replacing it with system of megatest -server +;; ;; (test "launch server" #t (let ((pid (process-fork (lambda () +;; ;; ;; (daemon:ize) +;; ;; (server:launch 'http))))) +;; ;; (set! server-pid pid) +;; ;; (number? pid))) +;; (system "../../bin/megatest -server - -debug 22 > server.log 2> server.log &") +;; +;; (let loop ((n 10)) +;; (thread-sleep! 1) ;; need to wait for server to start. +;; (let ((res (open-run-close tasks:get-best-server tasks:open-db))) +;; (print "tasks:get-best-server returned " res) +;; (if (and (not res) +;; (> n 0)) +;; (loop (- n 1))))) +;; +;; (test "get-best-server" #t (begin +;; (client:launch) +;; (let ((dat (open-run-close tasks:get-best-server tasks:open-db))) +;; (vector? dat)))) +;; +;; (define *keys* (keys:config-get-fields *configdat*)) +;; (define *keyvals* (keys:target->keyval *keys* "a/b/c")) +;; +;; (test #f #t (string? (car *runremote*))) +;; (test #f '(#t "successful login") (rmt:login)) ;; *runremote* *toppath* *my-client-signature*))) +;; +;; (test #f #f (rmt:get-test-info-by-id 1 99)) ;; get non-existant test +;; +;; ;; RUNS +;; (test #f 1 (rmt:register-run *keyvals* "firstrun" "new" "n/a" (current-user-name))) +;; (test "get run info" "firstrun" (let ((rinfo (rmt:get-run-info 1))) +;; (vector-ref (vector-ref rinfo 1) 3))) +;; (test "get runname from id" "firstrun" (rmt:get-run-name-from-id 1)) +;; +;; ;; TESTS +;; (test "get tests (no data)" '() (rmt:get-tests-for-run 1 "%" '() '() #f #f #f #f #f #f)) +;; (test "register test" #t (rmt:general-call 'register-test 1 1 "test1" "")) +;; (test "get tests (some data)" 1 (length (rmt:get-tests-for-run 1 "%" '() '() #f #f #f #f #f #f))) +;; (test "get test id" 1 (rmt:get-test-id 1 "test1" "")) +;; +;; (test "sync back" #t (> (rmt:sync-inmem->db) 0)) +;; (test "get test id from main" 1 (db:get-test-id *db* 1 "test1" "")) +;; +;; (test "get keys" #t (list? (rmt:get-keys))) +;; (test "set comment" #t (begin (rmt:general-call 'set-test-comment 1 "this is a comment" 1) #t)) +;; (test "get comment" "this is a comment" (let ((trec (rmt:get-test-info-by-id 1 1))) +;; (db:test-get-comment trec))) +;; +;; ;; MORE RUNS +;; (test "get runs" #t (let* ((runs (rmt:get-runs "%" #f #f '())) +;; (header (vector-ref runs 0)) +;; (data (vector-ref runs 1))) +;; (and (list? header) +;; (list? data) +;; (vector? (car data))))) +;; +;; (test "get local testinfo" "test1" (vector-ref (db:get-testinfo-state-status *db* 1 1) 2)) +;; (test "get testinfo" "test1" (vector-ref (rmt:get-testinfo-state-status 1 1) 2)) +;; +;; ;;====================================================================== +;; ;; D B +;; ;;====================================================================== +;; +;; (test "pass fail counts" #t (rmt:general-call 'pass-fail-counts 10 9 1)) +;; (test "get pass fail counts" 19 (let ((dat (rmt:get-test-info-by-id 1))) +;; (+ (db:test-get-pass_count dat) +;; (db:test-get-fail_count dat)))) +;; +;; (define testregistry (make-hash-table)) +;; (for-each +;; (lambda (tname) +;; (for-each +;; (lambda (itempath) +;; (let ((tkey (conc tname "/" itempath)) +;; (rpass (random 10)) +;; (rfail (random 10))) +;; (hash-table-set! testregistry tkey (list tname itempath)) +;; (rmt:general-call 'register-test 1 tname itempath) +;; (let* ((tid (rmt:get-test-id 1 tname itempath)) +;; (tdat (rmt:get-test-info-by-id tid))) +;; (rmt:general-call 'pass-fail-counts rpass rfail (db:test-get-id tdat)) +;; (let* ((resdat (rmt:get-test-info-by-id tid))) +;; (test "set/get pass fail counts" (list rpass rfail) +;; (list (db:test-get-pass_count resdat) +;; (db:test-get-fail_count resdat))))))) +;; (list "" "a" "b" "c" "d" "e" "f" "g" "h" "i" "j"))) +;; (list "test1" "test2" "test3" "test4" "test5")) +;; +;; +;; (test #f '(#t "exit process started") (rmt:kill-server)) ;; *toppath* *my-client-signature* #f))) +;; ADDED tests/unittests/tests.scm Index: tests/unittests/tests.scm ================================================================== --- /dev/null +++ tests/unittests/tests.scm @@ -0,0 +1,13 @@ +;;====================================================================== +;; itemwait, itemmatch + +(db:compare-itempaths ref-item-path item-path itemmap) + +;; prereqs-not-met + +(rmt:get-prereqs-not-met run-id waitons item-path mode: testmode itemmap: itemmap)) + + (fails (runs:calc-fails prereqs-not-met)) + (prereq-fails (runs:calc-prereq-fail prereqs-not-met)) + (non-completed (runs:calc-not-completed prereqs-not-met)) + (runnables (runs:calc-runnable prereqs-not-met))) ADDED tests/vectors-vs-records.scm Index: tests/vectors-vs-records.scm ================================================================== --- /dev/null +++ tests/vectors-vs-records.scm @@ -0,0 +1,37 @@ +(use srfi-9) + +(define numtodo (string->number (caddr (argv)))) + +;; using vectors +(define testvalvec (vector 0 1 2 3 4 5)) +(define-inline (testing:get-first vec )(vector-ref vec 0)) +(define-inline (testing:get-count vec )(vector-ref vec 5)) +(define-inline (testing:set-first! vec val)(vector-set! vec 0 val)) +(define-inline (testing:set-count! vec val)(vector-set! vec 5 val)) + +(if (equal? (cadr (argv)) "vectors") + (begin + (print "Testing " numtodo " vectors") + (let loop ((i 0)) + (testing:set-count! testvalvec i) + (testing:set-first! testvalvec (testing:get-count testvalvec)) + (if (< i numtodo) + (loop (+ i 1)))))) + +;; using records +(define-record-type testing + (make-testing zeroeth first second third fourth count) + testing? + (count get:count set:count) + (first get:first set:first)) + +(define testvalrec (make-testing 0 1 2 3 4 5)) + +(if (equal? (cadr (argv)) "records") + (begin + (print "Testing " numtodo " records") + (let loop ((i 0)) + (set:count testvalrec i) + (set:first testvalrec (get:count testvalrec)) + (if (< i numtodo) + (loop (+ i 1)))))) ADDED tests/watch-monitor.sh Index: tests/watch-monitor.sh ================================================================== --- /dev/null +++ tests/watch-monitor.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ -e fullrun/db/monitor.db ];then +sqlite3 fullrun/db/monitor.db << EOF +.header on +.mode column +select * from servers order by start_time desc; +.q +EOF +fi