123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766(*****************************************************************************)(* *)(* Open Source License *)(* Copyright (c) 2020-2023 Nomadic Labs <contact@nomadic-labs.com> *)(* Copyright (c) 2020 Metastate AG <hello@metastate.dev> *)(* *)(* Permission is hereby granted, free of charge, to any person obtaining a *)(* copy of this software and associated documentation files (the "Software"),*)(* to deal in the Software without restriction, including without limitation *)(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *)(* and/or sell copies of the Software, and to permit persons to whom the *)(* Software is furnished to do so, subject to the following conditions: *)(* *)(* The above copyright notice and this permission notice shall be included *)(* in all copies or substantial portions of the Software. *)(* *)(* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR*)(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *)(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *)(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*)(* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING *)(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *)(* DEALINGS IN THE SOFTWARE. *)(* *)(*****************************************************************************)openBaselet()=Clap.description"Run or manage test suite.\n\n\
Running without any argument runs the full test suite. To see the list of \
tests instead of running it, run with the argument `--list`. See below \
for more details on how to control test execution and select tests."moduleOptions=structtypetemporary_file_mode=Delete|Delete_if_successful|Keeplettemporary_file_mode=Clap.flag_enum~description:"Whether to delete temporary files and directories that were created.\n\n\
--keep-temp: Keep temporary files and directories after tests.\n\n\
--delete-temp: Delete temporary files and directories after tests.\n\n\
--delete-temp-if-success: Delete temporary files and directories, \
except if the test failed. If a test succeeds after it however, it \
causes all temporary files to be deleted, even those of the failed \
tests; so you should probably avoid using this argument in \
conjunction with --keep-going or --job-count."[(["keep-temp"],[],Keep);(["delete-temp"],[],Delete);(["delete-temp-if-success"],[],Delete_if_successful);]Deleteletkeep_going=Clap.flag~set_long:"keep-going"~set_short:'k'~description:"If a test fails, continue with the remaining tests instead of \
stopping. Aborting manually with Ctrl+C still stops everything."falseletglobal_timeout=Clap.optional_float~long:"global-timeout"~placeholder:"SECONDS"~description:"Fail if the set of tests takes more than SECONDS to run."()lettest_timeout=Clap.optional_float~long:"test-timeout"~placeholder:"SECONDS"~description:"Fail if a test takes, on its own, more than SECONDS to run."()letcleanup_timeout=Clap.default_float~long:"cleanup-timeout"~placeholder:"SECONDS"~description:"Send SIGKILL to processes that are still running SECONDS after they \
were asked to stop. This affects processes stopped with \
Process.terminate, and workers when running with --job-count."60.letwarn_after_timeout=Clap.default_float~long:"warn-after-timeout"~placeholder:"SECONDS"~description:"If a test is still running SECONDS after it started, warn. This \
warning repeats every SECONDS as long as the test is running. If \
SECONDS is negative or null, disable this warning instead. This \
option only has an effect when using --job-count (-j) with a value of \
2 or more."60.letretry=Clap.default_int~long:"retry"~placeholder:"COUNT"~description:"Retry each failing test up to COUNT times. If one retry is \
successful, the test is considered successful."0letreset_regressions=Clap.flag~set_long:"reset-regressions"~description:"Remove regression test outputs if they exist, and regenerate them."falsetypeon_unknown_regression_files_mode=Warn|Ignore|Fail|Deleteleton_unknown_regression_files_mode_type=Clap.enum"mode for --on-unknown-regression-files"[("warn",Warn);("ignore",Ignore);("fail",Fail);("delete",Delete)]leton_unknown_regression_files_mode=Clap.defaulton_unknown_regression_files_mode_type~long:"on-unknown-regression-files"~placeholder:"MODE"~description:"How to handle regression test outputs that are not declared by any \
test. MODE can be:\n\
- warn: emit a warning for unknown output files;\n\
- ignore: ignore unknown output files;\n\
- fail: terminate execution with exit code 1 and without running any \
further action when unknown output files are found;\n\
- delete: delete unknown output files.\n\n\
To check which files would be deleted, run with this option set to \
'warn', which is the default."Warntypeloop_mode=Infinite|Countofintletloop=Clap.flag~set_long:"loop"~description:"Restart from the beginning once all tests are done. All tests are \
repeated until one of them fails or if you interrupt with Ctrl+C. \
This is useful to reproduce non-deterministic failures. When used in \
conjunction with --keep-going, tests are repeated even if they fail, \
until you interrupt them with Ctrl+C."falseletloop_count=Clap.optional_int~long:"loop-count"~placeholder:"COUNT"~description:"Stop after all tests have been run COUNT times. Implies --loop. A \
value of 0 means tests are not run."()letloop_mode=matchloop_countwith|None->ifloopthenInfiniteelseCount1|Somecount->Countcountletresume_file=Clap.optional_string~long:"resume-file"~placeholder:"FILE"~description:"Record test results to FILE for use with --resume. When using \
--resume, test results that existed in FILE are kept, contrary to \
--record."()letresume=Clap.flag~set_long:"resume"~set_short:'r'~description:"Resume from a previous run. This reads the resume file located at \
--resume-file to resume from it.\n\n\
If --resume-file is not specified, --resume implies --resume-file \
tezt-resume.json. If the resume file does not exist, act as if it was \
empty.\n\n\
Before running a test, it is checked whether this test was already \
successfully ran according to the resume file. If it was, the test is \
skipped.\n\n\
When using --loop or --loop-count, the test is skipped as many times \
as it was successful according to the resume file."falseletjob_count=Clap.default_int~long:"job-count"~short:'j'~placeholder:"COUNT"~description:"Run COUNT tests in parallel, in separate processes. With \
--suggest-jobs, set the number of target jobs for --suggest-jobs \
instead.\n\n\
If environment variable TEZT_JOB_COUNT is a positive integer, default \
value is the value of TEZT_JOB_COUNT. Else, default value is 1."(matchSys.getenv_opt"TEZT_JOB_COUNT"with|None->1|Somes->(matchint_of_string_optswith|None->1|Somen->ifn>0thennelse1))lettest_arg_type=Clap.typ~name:"test argument"~dummy:("","")~parse:(funstring->letlen=String.lengthstringinletrecfind_equali=ifi>=lenthenNoneelseifstring.[i]='='thenSomeielsefind_equal(i+1)inSome(matchfind_equal0with|None->(string,"true")|Somei->(String.substring0i,String.substring(i+1)(len-i-1))))~show:(fun(parameter,value)->parameter^"="^value)lettest_args_list=Clap.listtest_arg_type~long:"test-arg"~short:'a'~placeholder:"<PARAMETER>=<VALUE>"~description:"Pass a generic argument to tests. Tests can get this argument with \
Cli.get. --test-arg <PARAMETER> is a short-hand for: --test-arg \
<PARAMETER>=true"()lettest_args=List.fold_left(funacc(k,v)->String_map.addkvacc)String_map.emptytest_args_listletseed=Clap.optional_int~long:"seed"~placeholder:"SEED"~description:"Force tests declared with ~seed:Random to initialize the \
pseudo-random number generator with this seed."()endmoduleLogs=structletsection=Clap.section"LOGS"letcolor=Clap.flag~section~set_long:"color"~unset_long:"no-color"~description:"Whether to use colors in output. Default value depends on whether \
stdout is a terminal and on the value of the TERM environment \
variable."(Unix.isattyUnix.stdout&&Sys.getenv_opt"TERM"<>Some"dumb")lettimestamp=Clap.flag~section~set_long:"log-timestamp"~unset_long:"no-log-timestamp"~description:"Whether to print a timestamp."trueletprefix=Clap.flag~section~set_long:"log-prefix"~unset_long:"no-log-prefix"~description:"Whether to print log prefixes, i.e. the '~prefix' argument of log \
functions."truetypelevel=Quiet|Error|Warn|Report|Info|Debugletlevel_type=Clap.enum"log level"[("quiet",Quiet);("error",Error);("warn",Warn);("report",Report);("info",Info);("debug",Debug);]letlevel_1=Clap.defaultlevel_type~section~long:"log-level"~placeholder:"LEVEL"~description:"Set log level. Possible LEVELs are: quiet, error, warn, report, info, \
debug."Reportletlevel_2=Clap.flag_enum~section~description:"Set log level. Overrides --log-level.\n\n\
--verbose, -v: Same as --log-level debug.\n\
--quiet, -q: Same as --log-level quiet.\n\
--info, -i: Same as --log-level info."[(["verbose"],['v'],SomeDebug);(["quiet"],['q'],SomeQuiet);(["info"],['i'],SomeInfo);]Noneletlevel=matchlevel_2withSomex->x|None->level_1letfile=Clap.optional_string~section~long:"log-file"~placeholder:"FILE"~description:"Also log to FILE. Note that --log-level does not apply: FILE contains \
logs in verbose mode. In the presence of --job-count, the main \
process will log test results to FILE while each worker writes test \
logs to a separate file BASENAME-WORKER_ID[.EXT]. BASENAME is the \
basename of FILE, WORKER_ID is the zero-indexed id of the worker and \
.EXT is the extension of FILE if present."()letbuffer_size=Clap.default_int~section~long:"log-buffer-size"~placeholder:"COUNT"~description:"Before logging an error on stdout, also log the last COUNT messages \
that have been ignored because of the log level since the last \
message that was not ignored."50letworker_id=Clap.flag~section~set_long:"log-worker-id"~description:"Decorate logs with worker IDs when --job-count is more than 1."falseletcommands=Clap.flag~section~set_long:"commands"~set_short:'c'~description:"Output commands which are run, in a way that is easily copy-pasted \
for manual reproducibility."falseendmoduleReports=structletsection=Clap.section"REPORTS"lettime=Clap.flag~section~set_long:"time"~description:"Print a summary of the total time taken by each test. Ignored if a \
test failed. Includes the time read from records: to display a \
record, you can use --from-record <FILE> --list --time."falseletrecord=Clap.optional_string~section~long:"record"~placeholder:"FILE"~description:"Record test results to FILE. This file can then be used with \
--from-record. If you use --loop or --loop-count, times are averaged \
for each test."()letfrom_records=Clap.list_string~section~long:"from-record"~placeholder:"FILE"~description:"Use data recorded with --record. If specified multiple times, use the \
union of those records.\n\n\
If <FILE> is a directory, this is equivalent to specifying \
--from-record for all files in this directory that have the .json \
extension.\n\n\
When using --time, test durations include tests found in record \
files.\n\n\
When using --record, the new record which is output does NOT include \
the input records.\n\n\
When using --junit, reports do NOT include input records."()letjunit=Clap.optional_string~section~long:"junit"~placeholder:"FILE"~description:"Store test results in FILE using JUnit XML format. Time information \
for each test is the sum of all runs of this test for the current \
session. Test result (success or failure) is the result for the last \
run of the test."()endmoduleCommands=structletsection=Clap.section"COMMANDS"typecommand=Run|List|List_tsv|Suggest_jobs|Versionletcommand=Clap.flag_enum~section~description:"Do not run tests. Instead:\n\n\
--list, -l: List tests.\n\n\
Pass --time to also display results and timings (in seconds) from a \
previous execution given through --from-record, in the format TIME \
(COUNT). TIME is the average time of successful executions. COUNT is \
SCOUNT/(SCOUNT+FCOUNT) where SCOUNT (resp. FCOUNT) is the number of \
successful (resp. failed) tests in the record. If there is only one \
successful test, then (COUNT) is omitted. Tests lacking a past record \
of successful executions are noted '-'. A final row is added \
containing the total of the averages of successful test executions, \
and the total number of selected tests.\n\n\
--list-tsv: List tests as tab-separated values in the format FILE \
TITLE TAGS.\n\n\
Pass --time to also display results and timings (in nanoseconds) from \
a previous execution given through --from-record. Then each line is \
appended with STIME SCOUNT FTIME FCOUNT. STIME (resp. FTIME) is the \
total running time in nanoseconds of successful (resp. failed) \
previous runs. SCOUNT (resp. FCOUNT) is the count of successful \
(resp. failed) previous runs.\n\n\
--suggest-jobs: Read test results records specified with \
--from-record and suggest a partition of the tests that would result \
in --job-count sets of roughly the same total duration. Output each \
job as a list of flags that can be passed to Tezt, followed by a \
shell comment that denotes the expected duration of the job.\n\n\
A similar result can be obtained with --list --job, except that the \
last job suggested by --suggest-jobs uses --not-test to express \"all \
tests that are not already in other jobs\", meaning that the last job \
acts as a catch-all for unknown tests.\n\n\
--version: Print the version number of Tezt and exit."[(["list"],['l'],List);(["list-tsv"],[],List_tsv);(["suggest-jobs"],[],Suggest_jobs);(["version"],[],Version);]RunendmoduleSelecting_tests=structletsection=Clap.section"SELECTING TESTS"~description:("Tests are registered with a filename, a title, and a list of tags. \
You can specify multiple tags, negated tags, titles, title patterns \
and filenames on the command line. Only tests which match all the \
following conditions will be run:\n\
- the test must have all tags and none of the negated tags;\n\
- the test must have one of the specified titles;\n\
- the test must have a title matching one of the specified patterns;\n\
- the test must be implemented in one of the specified files.\n\n\
The tags of a test are given by the ~tags argument of Test.register. \
To negate a tag, prefix it with a slash: /\n\n\
The title of a test is given by the ~title argument of \
Test.register. It is what is printed after [SUCCESS] (or [FAILURE] \
or [ABORTED]) in the reports. Use --title (respectively --not-title) \
to select (respectively unselect) a test by its title on the \
command-line. You can also select (respectively unselect) tests for \
which 'filename: title' matches one or several Perl regular \
expressions using --match (respectively --not-match).\n\n\
The file in which a test is implemented is specified by the \
~__FILE__ argument of Test.register. In other words, it is the path \
of the file in which the test is defined. Use --file (respectively \
--not-file) to select (respectively unselect) a test by its path (or \
a suffix thereof) on the command-line.\n\n\
For instance:\n\n"^Sys.argv.(0)^" node bake /rpc --file bootstrap.ml --file sync.ml\n\n\
will run all tests defined in either bootstrap.ml or sync.ml, which \
have at least tags 'node' and 'bake', but which do not have the \
'rpc' tag.\n\n\
You can also specify more complex predicates using the Test \
Selection Language (TSL). See section TEST SELECTION LANGUAGE (TSL) \
below.")letfiles_to_run=Clap.list_string~section~long:"file"~short:'f'~placeholder:"FILE"~description:"Only run tests implemented in source files ending with FILE."()letfiles_not_to_run=Clap.list_string~section~long:"not-file"~placeholder:"FILE"~description:"Only run tests not implemented in source files ending with FILE."()letrex_type=Clap.typ~name:"Perl regular expression"~dummy:(rex"")~parse:(funs->trySome(rexs)withRe.Perl.Parse_error->None)~show:show_rexletpatterns_to_run=Clap.listrex_type~section~long:"match"~short:'m'~placeholder:"PERL_REGEXP"~description:"Only run tests for which 'FILE: TITLE' matches PERL_REGEXP (case \
insensitive), where FILE is the source file of the test and TITLE its \
title."()letpatterns_not_to_run=Clap.listrex_type~section~long:"not-match"~placeholder:"PERL_REGEXP"~description:"Only run tests for which 'FILE: TITLE' does not match PERL_REGEXP \
(case insensitive), where FILE is the source file of the test and \
TITLE its title."()lettests_to_run=Clap.list_string~section~long:"title"~long_synonyms:["test"]~short:'t'~description:"Only run tests which are exactly entitled TITLE."()lettests_not_to_run=Clap.list_string~section~long:"not-title"~long_synonyms:["not-test"]~description:"Only run tests which are not exactly entitled TITLE."()letjob_type=Clap.typ~name:"--job"~dummy:(1,1)~parse:(funvalue->matchString.split_on_char'/'valuewith|[index;count]->(match(int_of_string_optindex,int_of_string_optcount)with|Someindex,Somecountwhenindex>=1&&count>=1&&index<=count->Some(index,count)|_->None)|_->None)~show:(fun(index,count)->string_of_intindex^"/"^string_of_intcount)letjob=Clap.optionaljob_type~section~long:"job"~placeholder:"<INDEX>/<COUNT>"~description:"Split the set of selected tests into COUNT subsets of roughly the \
same total duration. Execute only one of these subsets, specified by \
INDEX.\n\n\
COUNT must be at least 1 and INDEX must be between 1 and COUNT.\n\n\
Use --from-record to feed duration data from past runs. Tests for \
which no time data is available are given a default duration of 1 \
second.\n\n\
You can use --list to see what tests are in a subset without actually \
running the tests.\n\n\
A typical use is to run tests in parallel on different machines. For \
instance, have one machine run with --job 1/3, one with --job 2/3 and \
one with --job 3/3. Be sure to provide exactly the same records with \
--from-record, in the same order, and to select exactly the same set \
of tests (same tags, same --file and same --test) for all machines, \
otherwise some tests may not be run at all."()letskip=Clap.default_int~section~long:"skip"~placeholder:"COUNT"~description:"Skip the first COUNT tests. This filter is applied after --job and \
before --only."0letonly=Clap.optional_int~section~long:"only"~placeholder:"COUNT"~description:"Only run the first COUNT tests. This filter is applied after --job \
and --skip."()lettsl_expression_type=Clap.typ~name:"TSL expression"~dummy:TSL_AST.True~parse:TSL.parse~show:TSL.showlettsl_expression_lazy=lazy(Clap.listtsl_expression_type~section~placeholder:"TSL_EXPRESSION"~description:"Only run tests that satisfy the predicate denoted by \
TSL_EXPRESSION."()|>TSL.conjunction)lettsl_expression()=Lazy.forcetsl_expression_lazytypeon_empty_test_list=Ignore|Warn|Failleton_empty_test_list_type=Clap.enum"mode for --on-empty-test-list"[("ignore",Ignore);("warn",Warn);("fail",Fail)]leton_empty_test_list=Clap.defaulton_empty_test_list_type~section~long:"on-empty-test-list"~placeholder:"MODE"~description:"How to behave if the list of selected tests is empty. MODE can be:\n\
- ignore: exit with code 0;\n\
- warn: output 'No test found for filters: ...', exit with code 0;\n\
- fail: output 'No test found for filters: ...', exit with code 3."Failendlet_=Clap.section"TEST SELECTION LANGUAGE (TSL)"~description:("For more advanced needs, you can select tests using Test Selection \
Language (TSL) expressions. For instance:\n\n"^Sys.argv.(0)^" 'file = bootstrap.ml || bake && /rpc'\n\n\
runs all tests defined in bootstrap.ml, as well as tests (possibly \
from other files) with tag 'bake' but without tag 'rpc'. This example \
cannot be expressed without TSL.\n\n\
Passing multiple predicates is equivalent to passing their conjunction \
as a single predicate. For instance, passing the two command-line \
arguments 'a || b' and 'c || d' is equivalent to passing the single \
argument '(a || b) && (c || d)'.\n\n\
\ TSL allows the following expressions, where STRING denotes a \
string and EXPR denotes a TSL expression:\n\
- 'true': always true;\n\
- 'false': always false;\n\
- 'EXPR && EXPR': conjunction;\n\
- 'EXPR || EXPR': disjunction;\n\
- 'not EXPR': negation;\n\
- 'STRING': test has tag STRING;\n\
- '/STRING': same as 'not STRING' (STRING must not be quoted);\n\
- 'file = STRING': test file is STRING;\n\
- 'file <> STRING': same as 'not (file = STRING)';\n\
- 'title = STRING': test title is STRING;\n\
- 'title <> STRING': same as 'not (title = STRING)';\n\
- 'file =~ STRING': test file matches the regular expression STRING;\n\
- 'file =~! STRING': same as 'not (file =~ STRING)';\n\
- 'title =~ STRING': test title matches the regular expression STRING;\n\
- 'title =~! STRING': same as 'not (title =~ STRING)';\n\
- '(EXPR)': same as EXPR.\n\n\
'not' has higher precedence than '&&' which has higher precedence than \
'||'.\n\n\
TSL strings need to be quoted using double quotes '\"' unless they \
only contain characters 'a-zA-Z0-9_-./' and do not start with a slash \
'/'. Double quotes '\"' and backslashes '\\' need to be escaped using \
backslashes '\\'.\n\n\
Note that 'file = STRING' is not equivalent to '--file STRING'. \
Indeed, '--file a.ml' selects 'x/a.ml', while 'file = a.ml' does not.\n\n\
Examples of TSL expressions:\n\n\
'/tag && title = test_title'\n\
'not (some_tag || \"some_other_tag\") && title <> \"test title\"'\n\
'file =~ \"some *regular *expression\"'\n")letget_optparseparameter=matchString_map.find_optparameterOptions.test_argswith|Somevalue->(matchparsevaluewith|None->failwith(sf"invalid value for -a %s: %s"parametervalue)|Somevalue->Somevalue)|None->Noneletget?defaultparseparameter=matchget_optparseparameterwith|Somev->v|None->(matchdefaultwith|None->failwith(sf"missing test argument %s, please specify it with: -a %s=<VALUE>"parameterparameter)|Somedefault->default)letget_bool?defaultparameter=get?defaultbool_of_string_optparameterletget_int?defaultparameter=get?defaultint_of_string_optparameterletget_float?defaultparameter=get?defaultfloat_of_string_optparameterletget_string?defaultparameter=get?defaultOption.someparameterletget_bool_optparameter=get_optbool_of_string_optparameterletget_int_optparameter=get_optint_of_string_optparameterletget_float_optparameter=get_optfloat_of_string_optparameterletget_string_optparameter=get_optOption.someparameter