1 % (c) 2009-2024 Lehrstuhl fuer Softwaretechnik und Programmiersprachen,
2 % Heinrich Heine Universitaet Duesseldorf
3 % This software is licenced under EPL 1.0 (http://www.eclipse.org/org/documents/epl-v10.html)
4
5 %:- use_module('../extensions/profiler/profiler.pl').
6
7 :- module(test_runner, [
8 test_repl/0,
9 run_silently/0,
10 run_safely/0,
11 run_last_test/0, last/0,
12 run_tests_by_id/1, ids/1, run_id/1,
13 run_tests_all/0, run_tests_all_except/1,
14 run_tests_by_category/1, run_tests_by_category/2, category/1,
15 run_tests_by_first_category/1,
16 makefile_by_category/2, copy/1,
17 generate_makefile_all_tests/0,
18 generate_makefile_for_coverage/0,
19 full_flush/0,
20 jenkins_sanity_check/0,
21 sanity_check/1,
22 v/0, vv/0,
23 cache/0, % enable caching
24 all_categories/1,
25 all_ids/1,
26 ids_in_category/2,
27 set_prob_examples_location/1,
28 halt_tests/0 % use to call halt with proper exit code; however, normally test predicates will call halt1 directly, mainly useful in junit mode
29 ]).
30
31 :- if(predicate_property(expects_dialect(_), _)).
32 :- expects_dialect(sicstus4).
33 :- endif.
34
35 % Some errors, such as syntax errors, are only printed through the message mechanism and not thrown as exceptions.
36 % These errors also don't terminate SICStus or affect the exit code,
37 % which makes it easy for them to go unnoticed in CI runs.
38 % As a workaround, add a portray_message hook to detect error messages
39 % and halt the test runner at an appropriate time.
40
41 :- dynamic(prolog_error_occurred/0).
42
43 :- multifile(user:portray_message/2).
44 user:portray_message(error, _) :-
45 \+ prolog_error_occurred,
46 assertz(prolog_error_occurred),
47 fail. % to let SICStus portray the error
48
49 check_no_prolog_startup_error :-
50 prolog_error_occurred,
51 !,
52 format('*** Error(s) from Prolog while starting test runner! Aborting. ***~n', []),
53 halt(1).
54 check_no_prolog_startup_error.
55 :- initialization(check_no_prolog_startup_error).
56
57 :- use_module('../src/pathes', []). % set up library search paths
58
59 % comment in to ensure that plspec loaded first:
60 % :- use_module('../extensions/plspec/plspec/plspec_core').
61
62 :- use_module(probsrc(prob_cli)). % we call go_cli
63
64 %:- set_prolog_flag(compiling,debugcode).
65 %:- set_prolog_flag(source_info,on).
66 %:- set_prolog_flag(profiling,on).
67
68 /*
69 :- use_module('../extensions/profiler/profiler.pl'). % (adds the profiler himself)
70 :- use_module('../extensions/profiler/profiler_te.pl'). % (adds the term expander)
71 %:- use_module('../extensions/profiler/profiler_gui.pl'). % (adds the term expander)
72 :- enable_profiling_all(b_expression_sharing).
73 :- enable_profiling(bsyntaxtree:transform_bexpr_with_acc/5).
74 :- enable_profiling(bsyntaxtree:transform_bexpr/3).
75 :- enable_profiling(b_ast_cleanup:recompute_used_ids_info/2).
76 :- enable_profiling(bsyntaxtree:sub_expression_contains_wd_condition/1).
77
78 for starting from Spider:
79 set_prob_examples_location('../../prob_examples'), run_silently, test_repl.
80 or use test_runner_cov.pl
81 */
82
83
84 :- use_module(library(system)).
85 :- use_module(library(lists)).
86 :- use_module(library(sets), [intersect/2]).
87 :- use_module(library(file_systems)).
88
89 :- use_module(probsrc(junit_tests),[set_junit_dir/1,create_and_print_junit_result/4]).
90 :- use_module(testcases).
91 :- use_module(probsrc(system_call)).
92 :- use_module(probsrc(tools),[safe_number_codes/2,get_options/5]).
93 :- use_module(probsrc(tools_commands),[edit_file/1]).
94 :- use_module(probsrc(tools_printing), [start_terminal_colour/2, reset_terminal_colour/1, format_with_colour/4, format_with_colour_nl/4]).
95 :- use_module(probsrc(error_manager),[get_total_number_of_errors/1, get_all_errors/1]).
96 :- use_module(probsrc(debug),[formatsilent/3, debug_mode/1]).
97 :- use_module(probsrc(performance_messages),[toggle_perfmessages/0]).
98
99 :- use_module(probsrc(module_information)).
100 :- module_info(group,testing).
101 :- module_info(description,'This module runs the tests stored in testcases.pl.').
102
103 :- set_prolog_flag(double_quotes, codes).
104
105
106 :- use_module(probsrc(tools), [ajoin_with_sep/3]).
107 generate_makefile(RegenerateGoal,Filename,TargetCommand) :-
108 tell(Filename),
109 all_first_categories(AllTestCategories),
110 ajoin_with_sep(AllTestCategories, ' \\\n\t', JoinedCategories),
111 write('# This Makefile is generated automatically. DO NOT EDIT.\n'),
112 write('# If you have added/removed a test category, regenerate using this command:\n'),
113 format('# ./prolog.sh --file tests/test_runner.pl --goal "~q, halt ; halt(1)."\n\n', [RegenerateGoal]),
114 format('categories = \\\n\t~w\n\n', [JoinedCategories]),
115 write('.PHONY: test_all_source\n'),
116 write('test_all_source: $(categories)\n\n'),
117 write('.PHONY: $(categories)\n'),
118 format('$(categories):\n\t~w\n', [TargetCommand]),
119 told.
120
121 %write_subtargets([G]) :- write(G).
122 %write_subtargets([G|Gs]) :-
123 % write(G), write(' '),
124 % write_subtargets(Gs).
125
126 generate_makefile_all_tests :-
127 % run tests where category G is the first category to avoid running tests multiple times
128 % also avoids concurrency issues when test run multiple times in parallel with diff checking of output
129 generate_makefile(generate_makefile_all_tests, 'tests/Makefile_All_Tests',
130 % Makefile_All_Tests is (indirectly) included from the top-level Makefile,
131 % so the working directory is the repository root
132 % and the test_runner.pl path must include the tests/ directory prefix.
133 './prolog.sh --file tests/test_runner.pl --goal "run_silently, run_tests_by_first_category($@), halt_tests ; halt(1)."').
134
135 :- use_module(probsrc(tools_platform), [host_platform/1]).
136 % TODO Do we want to install SWI-Prolog on all CI runners?
137 group_cannot_be_checked_on_ci_server(setlog).
138 group_cannot_be_checked_on_ci_server(smt_solver_integration) :-
139 host_platform(windows). % TODO Install Z3 on GitLab CI Windows runner
140
141 generate_makefile_for_coverage :-
142 generate_makefile(generate_makefile_for_coverage, 'Makefile_Coverage_Groups', './prolog.sh --file tests/prob_cov_runner.pl --goal "coverage_group_data($@), halt ; halt(1)."').
143
144 all_first_categories(X) :- % only categories that appear as first category
145 findall(Group1,
146 cli_testcase(_Id,[Group1|_],_Conf,_Args,_Desc),
147 ListOfGroups),
148 remove_dups(ListOfGroups,X).
149 all_categories(X) :-
150 findall(Groups,
151 cli_testcase(_Id,Groups,_Conf,_Args,_Desc),
152 List),
153 append(List,ListOfGroups),
154 remove_dups(ListOfGroups,X).
155
156 all_ids(X) :-
157 findall(Id,
158 cli_testcase(Id,_Groups,_Conf,_Args,_Desc),
159 List),
160 remove_dups(List,X).
161
162 ids_in_category(Category,IDs) :-
163 findall(Id,
164 (cli_testcase(Id,Groups,_Conf,_Args,_Desc),member(Category,Groups)),
165 List),
166 remove_dups(List,IDs).
167
168 prob_junit_dir(JUnitDir) :- environ('PROB_JUNIT_DIR', JUnitDir).
169
170 prob_junit_args(JUnitArgs) :-
171 prob_junit_dir(JUnitDir), !,
172 JUnitArgs = ['-junit', JUnitDir].
173 prob_junit_args([]).
174
175 halt1_allowed :- \+ repl_mode.
176
177 tests_multiply_timeout(Var) :-
178 environ('TESTS_MULTIPLY_TIMEOUT', Var2), !, tools:arg_is_number(Var2,Var).
179 tests_multiply_timeout(Var) :- multiply_timeout(Var).
180
181 :- dynamic multiply_timeout/1.
182
183 % call updir from within Spider, when you are in the src subdirectory
184 set_cur_dir :- (directory_exists('./src') -> true /* we seem to be in the right directory */
185 ; directory_exists('./cia') -> current_directory(_Old,'./..')
186 ; print('Please start tests from ProB directory or ProB/src directory'),nl,
187 print('Current directory: '),
188 current_directory(CD), print(CD), nl).
189
190 reset_test_runner(SessionInfo) :-
191 reset_nr_of_tests, set_cur_dir,
192 retractall(test_skipped(_)), retractall(test_failed(_)), retractall(last_test_failed(_)),
193 retractall(test_diff_failed(_,_,_)),
194 statistics(walltime,[WStart,_]),
195 bb_put(reset_test_runner_wtime,WStart),
196 bb_put(reset_test_runner_info,SessionInfo).
197
198
199 ids(Nr) :- show_command(Nr).
200 show_command(Nr) :- cli_testcase(Nr,_,_,L,Desc),
201 print('# '), print(Desc),nl, l_pr([probcli|L]).
202 l_pr([]) :- nl.
203 l_pr([H|T]) :- print(' '),print(H), l_pr(T).
204
205 :- volatile sanity_check_failed/0.
206 :- dynamic sanity_check_failed/0.
207 jenkins_sanity_check :-
208 sanity_check(true),
209 (sanity_check_failed -> halt1 ; halt(0)).
210 sanity_check(_Jenkins) :-
211 cli_testcase(Id,Categories,_,Args,_Comm1),
212 \+ member('-strict',Args),
213 format_warning('*** test does not use -strict option: ',[]), print_test(Id,Categories),nl,
214 assertz(sanity_check_failed), fail.
215 sanity_check(Jenkins) :-
216 Jenkins == false,
217 cli_testcase_diff_check_output(Id,_F1,_F2),
218 cli_testcase(Id,Categories,_I1,_A1,_Comm1),
219 (Categories = [_]
220 -> true
221 ; format_warning('*** multiple categories for test with output file; can generate race conditions when run in parallel: ',[]),
222 print_test(Id,Categories),
223 nl /* can generate race conditions when run in parallel */),
224 assertz(sanity_check_failed), fail.
225 sanity_check(_Jenkins) :-
226 cli_testcase_diff_check_output(Id,_F1,_F2),
227 \+ (cli_testcase(Id,_Categories,_I1,_A1,_Comm1)),
228 format_warning_nl('*** no cli_testcase, but diff output defined for id: ~w',[Id]),
229 assertz(sanity_check_failed), fail.
230 sanity_check(_Jenkins) :-
231 cli_testcase(Id,C1,I1,A1,Comm1),
232 cli_testcase(Id,C2,I2,A2,Comm2),
233 [C1,I1,A1,Comm1] \= [C2,I2,A2,Comm2],
234 format_warning_nl('*** multiple cli_testcases for id: ~w',[Id]),
235 assertz(sanity_check_failed), fail.
236 sanity_check(_Jenkins).
237
238
239 format_error_nl(F,Args) :- format_with_colour_nl(user_output,[red,bold],F,Args).
240 format_warning(F,Args) :- format_with_colour(user_output,[red],F,Args).
241 format_warning_nl(F,Args) :- format_with_colour_nl(user_output,[red],F,Args).
242 format_progress_nl(F,Args) :- format_with_colour_nl(user_output,[blue],F,Args).
243
244 print_test(Id,Comment) :- print(Id),print(' : '), print(Comment).
245
246 % run the latest test (with highest id)
247 last :- run_last_test.
248 run_last_test :- get_last_test_id(Id),
249 run_tests_by_id(Id,_,no_skipping).
250
251 get_last_test_id(Id) :- set_last_test_id, last_test_id(Id).
252
253 set_last_test_id :-
254 retractall(last_test_id(_)), assertz(last_test_id(-1)),
255 cli_testcase(Id,C1,I1,A1,Comm1),
256 (cli_testcase(Id,C2,I2,A2,Comm2),
257 (C1,I1,A1,Comm1) \= (C2,I2,A2,Comm2)
258 -> format_warning_nl('~n*** multiple entries for test id: ~w',[Id])
259 ; true),
260 update_last(Id),
261 fail.
262 set_last_test_id.
263
264 :- volatile last_test_id/1.
265 :- dynamic last_test_id/1.
266 update_last(Id) :- number(Id),!, % debug_print(9,Id), debug_print(9,' '),
267 retract(last_test_id(L)),
268 (Id>L -> Max = Id ; Max = L),
269 assertz(last_test_id(Max)).
270 update_last(Id) :- format_warning_nl('~n*** test id not a number: ~w',[Id]).
271
272 repeat_id(ID,Nr) :- repeat_id_aux(ID,0,Nr,success,10000000,0,0).
273 repeat_id_aux(ID,Nr,TotNr,Status1,Min,Max,Tot) :- Nr<TotNr, !, N1 is Nr+1,
274 statistics(walltime,[Start,_]),
275 run_tests_by_id(ID,Status,no_skipping),
276 (Status=success -> Status2=Status1, Col=blue ; Status2=Status, Col=red),
277 statistics(walltime,[Stop,_]),
278 WT is Stop - Start,
279 NewMin is min(Min,WT), NewMax is max(Max,WT), NewTot is Tot+WT,
280 Average is NewTot / N1,
281 format_with_colour_nl(user_output,[Col],'Test ~w repeated ~w/~w times~n Walltime Minumum = ~w ms, Maximum = ~w ms, Average = ~w ms~n Status=~w',[ID,N1,TotNr,NewMin,NewMax,Average,Status]),
282 repeat_id_aux(ID,N1,TotNr,Status2,NewMin,NewMax,NewTot).
283 repeat_id_aux(_,_,_TotNr,_Status,_Min,_Max,_).
284
285 % run a test with a specific id
286 run_id(X) :- run_tests_by_id(X,_,no_skipping).
287 run_tests_by_id(X) :- run_tests_by_id(X,_,allow_skipping).
288 run_tests_by_id(X,Status,AllowSkipping) :- reset_test_runner(id(X)),
289 phrase(tests_by_id(X), Testcases),
290 run_list(Testcases, AllowSkipping),
291 check_failed(Status).
292 tests_by_id([]) --> [].
293 tests_by_id(From-To) --> !,
294 ({From>To} -> []
295 ; tests_by_id(From), {F1 is From+1},
296 tests_by_id(F1-To)).
297 tests_by_id([Id | Ids]) --> !,
298 tests_by_id(Id),
299 tests_by_id(Ids).
300 tests_by_id(Id) -->
301 {cli_testcase(Id,TestCategories,TestInfos,Arguments,Comment)}, !,
302 [testcase(Id,TestCategories,TestInfos,Arguments,Comment)].
303 tests_by_id(Id) -->
304 {\+(cli_testcase(Id,_Category,_Infos,_Arguments,_Comment))}, !,
305 {format_warning_nl('*** No testcase with ID ~w found', [Id])}.
306
307 :- use_module(library(random),[random_select/3]).
308 run_random_tests(Nr) :-
309 findall(testcase(Id,TestCategories,TestInfos,Arguments,Comment),
310 cli_testcase(Id,TestCategories,TestInfos,Arguments,Comment),AllTests),
311 select_random_tests(Nr,AllTests,Tests),
312 reset_test_runner(random(Nr)),
313 run_list(Tests), check_failed(_).
314 select_random_tests(N,_,[]) :- N<1,!.
315 select_random_tests(_,[],[]) :- !.
316 select_random_tests(N,All,[X|Tests]) :-
317 random_select(X,All,Rest),
318 N1 is N-1,
319 select_random_tests(N1,Rest,Tests).
320
321
322 run_tests_all :- run_tests_all_except([]).
323
324 :- use_module(library(ordsets)).
325 run_tests_all_except(Categories) :- reset_test_runner(all_except(Categories)),
326 findall(Test, test_not_in_categories(Categories, Test), Tests),
327 run_list(Tests),
328 check_failed(_).
329
330 test_not_in_categories(Categories, Test) :-
331 sort(Categories, SC),
332 test_not_in_categories_aux(SC, Test).
333
334 :- use_module(library(ordsets), [ord_intersect/2]).
335 test_not_in_categories_aux(SC, testcase(Id,TestCategories,Infos,Arguments,Comment)) :-
336 cli_testcase(Id,TestCategories,Infos,Arguments,Comment),
337 sort(TestCategories,TC),
338 \+ ord_intersect(TC,SC).
339
340
341 :- dynamic silent_running/0, no_strict_running/0.
342 run_silently :- silent_running, !.
343 run_silently :- add_additional_arguments(['-silent']), assertz(silent_running).
344 run_no_strict :- (no_strict_running -> true ; assertz(no_strict_running)). % useful to see all errors of a test
345
346 % perform additional checks (e.g., used_ids info in AST) and throw errors when something is wrong
347 run_safely :- add_additional_preference('PROB_SAFE_MODE','TRUE').
348
349 % run all tests with a specific category
350 category(X) :- run_tests_by_category(X).
351 run_tests_by_category(X) :- run_tests_by_category(X,all).
352 run_tests_by_first_category(X) :- % used as entry in Jenkins when running all tests in parallel by category
353 run_tests_by_category(X,first).
354 run_tests_by_category(X,FirstOnly) :-
355 reset_test_runner(category(X,FirstOnly)),
356 (is_list(X) -> run_tests_by_category_aux(X,FirstOnly) ; run_tests_by_category_aux([X],FirstOnly)),
357 check_failed(_).
358 run_tests_by_category_aux(Categories,FirstOnly) :-
359 get_tests_for_categories(Categories,FirstOnly,List),
360 ( List=[] -> format_warning_nl('*** No testcase with categories ~w found', [Categories])
361 ; run_list(List)).
362
363 get_tests_for_categories(Categories,List) :-
364 get_tests_for_categories(Categories,all,List).
365 get_tests_for_categories(Categories,FirstOnly,List) :-
366 findall(testcase(Id, TestCategories, Infos, Arguments, Comment),
367 (cli_testcase(Id, TestCategories, Infos, Arguments, Comment),
368 (FirstOnly=first -> TestCategories=[C1|_], member(C1,Categories)
369 ; intersect(Categories, TestCategories)) ),
370 List).
371
372 run_tests_using_command(Command) :- reset_test_runner(cmd(Command)),
373 findall(testcase(Id, TestCategories, Infos, Arguments, Comment),
374 (cli_testcase(Id, TestCategories, Infos, Arguments, Comment),
375 member(Command, Arguments)),
376 List),
377 ( List=[] -> format_warning_nl('*** No testcase using command ~w found', [Command])
378 ; run_list(List), check_failed(_)).
379
380 run_tests_using_preference(Pref,Val) :- reset_test_runner(using_pref(Pref,Val)),
381 findall(testcase(Id, TestCategories, Infos, Arguments, Comment),
382 (cli_testcase(Id, TestCategories, Infos, Arguments, Comment),
383 append(_,['-p',CmdPref,Val|_],Arguments),
384 match_preference(Pref,CmdPref) ),
385 List),
386 ( List=[] -> format_warning_nl('*** No testcase using preference ~w with value ~w found', [Pref,Val])
387 ; run_list(List), check_failed(_)).
388
389 :- use_module(probsrc(preferences),[eclipse_preference/2]).
390 match_preference(A,A).
391 match_preference(A,B) :- eclipse_preference(A,B) ; eclipse_preference(B,A).
392
393 % Generate file list (for copying) of a category :
394 show_files(Cat) :-
395 cli_testcase(Id, TestCategories, _Infos, Arguments, _Comment),
396 member(Cat, TestCategories), % print(Arguments),nl,
397 ( file_in_arguments(File,Arguments)
398 ;
399 get_testcase_diff_check_output(Id,_GeneratedFile,File)),
400 format('~w ',[File]),
401 fail.
402 show_files(_) :- nl.
403
404 file_in_arguments(F,['-p',_,_|T]) :- !, file_in_arguments(F,T).
405 file_in_arguments(F,[C,_|T]) :- binary_command(C),!, file_in_arguments(F,T).
406 file_in_arguments(F,[H|T]) :- is_file(H),H=F ; file_in_arguments(F,T).
407 binary_command(eval). binary_command(evalt). binary_command(evalf).
408 binary_command('-mc').
409 is_file(F) :- atom(F), atom_codes(F,Codes), member(47,Codes),!.
410
411
412 % Generate a Makefile for all listed Categories
413 % example: makefile_by_category('AlstomMakefile',[animate,history,sptxt,rel_fnc])
414 % the Makefile can be run to perform the listed tests on a compiled version of probcli
415 makefile_by_category(File, Categories) :-
416 (is_list(Categories) -> Cats = Categories ; Cats = [Categories]),
417 my_open(File, Stream),
418 format(Stream, 'PROBCLI=probcli~n',[]),
419 format(Stream, 'test_all:', []),
420 makefile_write_categories(Stream, Cats),
421 makefile_by_category_aux(Stream, Cats),
422 my_close(Stream).
423 makefile_by_category_aux(_Stream, []).
424 makefile_by_category_aux(Stream, [Cat | Cats]) :-
425 makefile_by_category_single(Stream, Cat),
426 makefile_by_category_aux(Stream, Cats).
427 makefile_by_category_single(Stream, Cat) :-
428 findall(testcase(Id, TestCategories, Infos, Arguments, Comment),
429 (cli_testcase(Id, TestCategories, Infos, Arguments, Comment),
430 member(Cat, TestCategories)),
431 List),
432 (List=[] -> format_warning_nl('*** No testcase with category ~w found', [Cat])
433 ; format(Stream, '~n~w:~n', [Cat]), makefile_write_calls(Stream, List)).
434
435 my_open(user_output,S) :- !, S=user_output.
436 my_open(File,S) :- open(File,write,S).
437 my_close(user_output) :- !.
438 my_close(S) :- close(S).
439
440 makefile_write_categories(Stream, []) :-
441 format(Stream, '~n', []).
442 makefile_write_categories(Stream, [Cat | Cats]) :-
443 format(Stream, ' ~a', [Cat]),
444 makefile_write_categories(Stream, Cats).
445
446 makefile_write_calls(_Stream, []).
447 makefile_write_calls(Stream, [testcase(Id, _TestCategories, _Infos, Arguments, Comment) | Tests]) :-
448 (Comment='' -> true ; format(Stream,'\techo \"Test ~w : ~w\"~n',[Id,Comment])),
449 format(Stream, '\t$(PROBCLI)', []),
450 makefile_write_arguments(Stream, Arguments),
451 makefile_write_diff(Stream, Id),
452 makefile_write_calls(Stream, Tests).
453
454 makefile_write_arguments(Stream, []) :-
455 format(Stream, '~n', []).
456 makefile_write_arguments(Stream, [Arg | Args]) :-
457 format(Stream, ' ~w', [Arg]),
458 (quote_next_arg(Arg) -> makefile_write_arguments_quoted(Stream,Args)
459 ; makefile_write_arguments(Stream, Args)).
460
461 quote_next_arg('-goal'). % this will contain spaces ,... surround in "" for shell
462 quote_next_arg('--check_goal').
463 quote_next_arg('-check_goal').
464 quote_next_arg('-cbc_deadlock_pred').
465 quote_next_arg('-eval').
466 quote_next_arg('-evalt').
467 quote_next_arg('-evalf').
468 quote_next_arg('-cbc_sequence_with_target'). % actually quotes next two !
469 quote_next_arg('-cbc_sequence_with_target_all'). % ditto <- TO DO
470
471
472 makefile_write_arguments_quoted(Stream, []) :-
473 format(Stream, '~n', []).
474 makefile_write_arguments_quoted(Stream, [Arg | Args]) :-
475 format(Stream, ' \"~w\"', [Arg]),
476 makefile_write_arguments(Stream, Args).
477
478 makefile_write_diff(Stream, ID) :-
479 get_testcase_diff_check_output(ID, File1, File2),
480 format(Stream, '\tdiff -b ~w ~w~n', [File1, File2]),
481 fail.
482 makefile_write_diff(_Stream, _ID).
483
484 % -------------------------
485
486 copy(Cat) :- (Cat=[_|_] -> C=Cat ; C=[Cat]),
487 generate_copy_commands(C,'testarchive/').
488
489 :- use_module(probsrc(b_trace_checking),[get_default_trace_file/2]).
490 generate_copy_commands(Categories,Dest) :-
491 cli_testcase(ID, TestCategories, _Infos, Arguments, _Comment),
492 non_empty_inter(Categories,TestCategories), %print(ID),nl,
493 Arguments=[MainFile|_], generate_copy_command(MainFile,Dest), % print(MainFile),nl,
494 additional_testcase_file(ID,MainFile,Arguments,ExtraFile),
495 generate_copy_command(ExtraFile,Dest),
496 fail.
497 generate_copy_commands(_,_).
498
499 additional_testcase_file(ID,_,_,EFile) :- extra_testcase_file(ID,EFile).
500 additional_testcase_file(_ID,File,Arguments,TFile) :- member('-t',Arguments),
501 get_default_trace_file(File,TFile).
502 additional_testcase_file(ID,_,_,RefFile2) :- get_testcase_diff_check_output(ID,_File1,RefFile2).
503
504 non_empty_inter(A,B) :- member(X,A), member(X,B),!.
505
506 :- use_module(probsrc(tools_strings),[string_concatenate/3]).
507 :- use_module(probsrc(tools),[get_parent_directory/2]).
508 generate_copy_command(File,Dest) :-
509 safe_file_exists(File),
510 get_parent_directory(File,Dir),!,
511 string_concatenate(Dest,Dir,DestDir),
512 string_concatenate(Dest,File,DestFile),
513 format(user_output,'\tmkdir -p ~w~n',[DestDir]),
514 get_command_path(mkdir,MkCmdPath),
515 system_call(MkCmdPath, ['-p',DestDir],_Text1,_JExit1),
516 format(user_output,'\tcp ~w ~w~n',[File,DestFile]),
517 get_command_path(cp,CpCmdPath),
518 system_call(CpCmdPath, [File,DestFile],_Text2,_JExit2).
519 generate_copy_command(_,_).
520
521 % -------------------------
522
523 :- dynamic test_failed/1, last_test_failed/1, test_diff_failed/3, test_skipped/1.
524 halt_tests :-
525 prolog_error_occurred,
526 !,
527 halt1.
528 halt_tests :- test_failed(_),!,
529 halt1.
530 halt_tests :-
531 nl,print('TEST RUN SUCCESSFUL'),nl,
532 halt. % regular halt
533 halt1 :-
534 format_error_nl('TEST RUN FAILED',[]),
535 halt(1).
536
537 check_failed(failure) :- test_failed(X),!,print_failed_tests,
538 print('Use the following command to run individual tests: '),nl,
539 print(' ./prolog.sh --file tests/test_runner.pl --goal "run_id('),print(X),print(')."'),nl,
540 (halt1_allowed -> halt1 ; print('halt(1) :: dontstop mode'),nl).
541 check_failed(success) :- number_of_tests_run(Nr),
542 start_terminal_colour([green,bold],user_output),
543 findall(Y,test_skipped(Y),Skips), length(Skips,NrSkips),
544 bb_get(reset_test_runner_info,Info),
545 (Nr=1,NrSkips=0,Info\=category(_,_) -> format(user_output,'Test successful.~n',[])
546 ; NrSkips>0 -> format(user_output,'All ~w tests successful, ~w skipped (for ~w).~n',[Nr,NrSkips,Info])
547 ; format(user_output,'All ~w tests successful (for ~w).~n',[Nr,Info])),
548 reset_terminal_colour(user_output).
549 print_failed_tests :- number_of_tests_run(Nr),
550 findall(Y,test_failed(Y),Fails), length(Fails,NrFails),
551 findall(Y,test_skipped(Y),Skips), length(Skips,NrSkips),
552 start_terminal_colour([red,bold],user_error),
553 bb_get(reset_test_runner_info,Info),
554 format(user_error,'*** Tests run: ~w, skipped: ~w, failed: ~w (for ~w) ***~n*** Failed tests:~n',[Nr,NrSkips,NrFails,Info]),
555 test_failed(X), lookup_test_description(X,Desc),
556 format(user_error,'~w ~w~n',[X,Desc]),
557 fail.
558 print_failed_tests :- nl(user_error),
559 (user_interrupt_signal_received -> format(user_error,'Tests were interrupted by CTRL-C (user_interrupt)~n',[])
560 ; true),
561 reset_terminal_colour(user_error).
562
563 lookup_test_description(Id,Desc) :- cli_testcase(Id,_,_,_,Desc).
564
565 :- dynamic user_interrupt_signal_received/0.
566 run_list(List) :- run_list(List,allow_skipping).
567 run_list(List,AllowSkipping) :-
568 init_test_runner,
569 retractall(user_interrupt_signal_received),
570 length(List,Len),
571 run_single_testcase_list_aux(List,Len,AllowSkipping).
572 run_single_testcase_list_aux([],_,_) :- !.
573 run_single_testcase_list_aux(List,_Len,allow_skipping) :- user_interrupt_signal_received,!,
574 length(List,RestLen),
575 format('Skipping ~w remaining tests~n',[RestLen]),
576 maplist(assert_test_skipped,List).
577 run_single_testcase_list_aux([TC|Tail],Len,AllowSkipping) :-
578 print_progress_stats(Len),
579 run_single_testcase(TC,AllowSkipping),
580 run_single_testcase_list_aux(Tail,Len,AllowSkipping).
581 assert_test_skipped(Id) :- assertz(test_skipped(Id)).
582
583 print_progress_stats(All) :- number_of_tests_run(Nr), Nr>0,!,
584 findall(Y,test_failed(Y),Fails), length(Fails,NrFails),
585 (last_test_failed(Failed) -> ajoin(['(e.g. ', Failed, ')'],FailInfo) ; FailInfo = ''),
586 findall(Y,test_skipped(Y),Skips), length(Skips,NrSkips),
587 bb_get(reset_test_runner_wtime,WStart),
588 statistics(walltime,[WNow,_]), Delta is round((WNow - WStart) / 1000),
589 bb_get(reset_test_runner_info,Info),
590 format_progress_nl('Progress: ~w/~w tests run, ~w skipped, ~w failed ~w (running since ~w sec for ~w)',[Nr,All,NrSkips,NrFails,FailInfo,Delta,Info]).
591 print_progress_stats(_).
592
593 cache :- add_additional_arguments(['-cache','/Users/leuschel/svn_root/NewProB/examples/cache/']).
594 v :- add_additional_arguments(['-v']). % verbose
595 vv :- add_additional_arguments(['-vv']). % very_verbose
596
597 :- volatile additional_arguments/1.
598 :- dynamic additional_arguments/1.
599 % add additional cli arguments when running tests:
600 add_additional_arguments(List) :-
601 (retract(additional_arguments(Old)) -> true ; Old=[]),
602 append(Old,List,New),
603 assertz(additional_arguments(New)).
604
605 reset_additional_arguments :- retractall(additional_arguments(_)).
606
607 % auto complete a preference name and print error if no match found
608 get_preference_name(Prefix,Name) :- auto_complete_preference(Prefix,Name),!.
609 get_preference_name(Prefix,_) :-
610 format_warning_nl('No matching preference found for: ~w',[Prefix]),fail.
611
612 :- use_module(probsrc(preferences),[eclipse_preference/2]).
613 auto_complete_preference(Prefix,Name) :-
614 atom_codes(Prefix,PC),
615 eclipse_preference(EP,Name),
616 (atom_codes(EP,EPC) ; atom_codes(Name,EPC)),
617 append(PC,_,EPC).
618
619 % add additional preference when running tests:
620 add_additional_preference(PREF,PREFVAL) :-
621 (retract(additional_arguments(Old0)) -> remove_matching_pref(Old0,PREF,Old) ; Old=[]),
622 New = ['-p',PREF,PREFVAL|Old],
623 format('New additional arguments: ~w~n',[New]),
624 assertz(additional_arguments(New)).
625
626 % remove all preferences conflicting with PREF
627 remove_matching_pref([],_PREF,[]).
628 remove_matching_pref(['-p',P,OLD|T],PREF,Res) :- !,
629 (P=PREF -> Res=T ; Res = ['-p',P,OLD|RT], remove_matching_pref(T,PREF,RT)).
630 remove_matching_pref([H|T],PREF,[H|RT]) :- remove_matching_pref(T,PREF,RT).
631
632 % remove all preferencs conflicting with other list of prefs
633 remove_matching_prefs([],P,P).
634 remove_matching_prefs(['-cache',_File|T],InPrefs,Res) :- !,
635 remove_matching_prefs(T,InPrefs,Res).
636 remove_matching_prefs(['-p',PREF,_|T],InPrefs,Res) :-
637 remove_matching_pref(InPrefs,PREF,In2),
638 remove_matching_prefs(T,In2,Res).
639 remove_matching_prefs([_|T],InPrefs,Res) :-
640 remove_matching_prefs(T,InPrefs,Res).
641
642 :- use_module(probsrc(pathes_lib), [unavailable_extension/2]).
643 unavailable_extension_for_test(Id,TestCategories,Ext,Reason) :-
644 test_requires_extension(Id,TestCategories,Ext),
645 unavailable_extension(Ext,Reason).
646
647 :- dynamic skip_all_tests/0.
648
649 skip_test(_, _, _, ReasonMsg, ReasonTerm) :-
650 skip_all_tests,
651 !,
652 ReasonMsg = 'skipping all tests',
653 ReasonTerm = skip_all_tests.
654 skip_test(_Id, _TestCategories, TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
655 member(skip,TestConfigurationInfos),
656 !,
657 ReasonMsg = 'test marked as skipped',
658 ReasonTerm = skip.
659 skip_test(_Id, _TestCategories, TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
660 member(conditional_skip(Callable), TestConfigurationInfos),
661 % Evaluate the condition in the context of the testcases module,
662 % where it was defined, so that imports are visible to the condition.
663 % We can't use a meta_predicate declaration here,
664 % because the goal is nested inside a term.
665 call(testcases:Callable),
666 !,
667 ReasonMsg = 'skip condition is true',
668 ReasonTerm = conditional_skip(Callable).
669 skip_test(_Id, TestCategories, _TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
670 member(private, TestCategories),
671 \+ absolute_file_name(prob_examples(examples), _, [access(exist), file_type(directory), file_errors(fail)]),
672 !,
673 ReasonMsg = 'test requires non-public examples which are not available',
674 ReasonTerm = private.
675 skip_test(_Id, TestCategories, _TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
676 member(private_source_not_available, TestCategories),
677 \+ directory_exists('../private_examples'),
678 !,
679 ReasonMsg = 'test requires private examples which are not available',
680 ReasonTerm = private_source_not_available.
681 skip_test(_Id, _TestCategories, TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
682 current_prolog_flag(dialect, swi),
683 % TODO Handle swi_expected_failure differently.
684 % Ideally, tests that are expected to fail on SWI should not be skipped,
685 % but instead should be run normally with any failures ignored.
686 % This would allow detecting when tests unexpectedly succeed
687 % (i. e. have been fixed, but not unmarked as expected failure yet).
688 member(swi_expected_failure, TestConfigurationInfos),
689 !,
690 ReasonMsg = 'test expected to fail on SWI-Prolog',
691 ReasonTerm = swi_expected_failure.
692 skip_test(_Id, TestCategories, _TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
693 ? group_cannot_be_checked_on_ci_server(Category),
694 member(Category, TestCategories),
695 !,
696 ReasonMsg = 'category cannot be checked on CI',
697 ReasonTerm = Category.
698 skip_test(Id, TestCategories, _TestConfigurationInfos, ReasonMsg, ReasonTerm) :-
699 unavailable_extension_for_test(Id,TestCategories,Ext,Reason),
700 !,
701 ajoin(['test requires an unavailable extension (',Reason,')'],ReasonMsg),
702 ReasonTerm = Ext.
703
704
705 % RUNNING SINGLE TESTCASE:
706 % ------------------------
707
708 :- dynamic previous_test_run/1.
709 previous_test_run(none).
710
711 :- use_module(library(timeout), [time_out/3]).
712
713 run_single_testcase(testcase(Id,TestCategories,Infos,Arguments,Comment),AllowSkipping) :-
714 skip_test(Id,TestCategories,Infos,ReasonMsg,ReasonTerm),
715 (AllowSkipping = allow_skipping -> true
716 ; % Skipping disabled - print message, then continue to next clause to run the test anyway.
717 format_progress_nl('Running test that would normally be skipped - ~w: ~q', [ReasonMsg,ReasonTerm]),
718 fail),
719 !,
720 full_flush,
721 format_with_colour_nl(user_output,[blue,bold],'Skipping test ~w (~w) because ~w: ~q', [Id,Comment,ReasonMsg,ReasonTerm]),
722 print_junit_skip(Arguments),
723 assertz(test_skipped(Id)),
724 full_flush.
725 run_single_testcase(testcase(Id,_TestCategories,_Infos,Arguments,Comment),AllowSkipping) :-
726 test_requires_previous_test(Id,ReqPreviousId),
727 previous_test_run(PreviousId),
728 PreviousId \= ReqPreviousId,!,
729 format_warning_nl('~n*** Test id ~w requires test ~w to be run just before, but previous test is ~w',
730 [Id,ReqPreviousId,PreviousId]),
731 (AllowSkipping = allow_skipping -> true
732 ; format_progress_nl('Running test that would normally be skipped', []),fail),
733 !,
734 full_flush,
735 format_with_colour_nl(user_output,[blue,bold],'Skipping test ~w (~w)', [Id,Comment]),
736 print_junit_skip(Arguments),
737 assertz(test_skipped(Id)),
738 full_flush.
739 run_single_testcase(testcase(Id,_TestCategories,_Infos,Arguments,Comment),_) :-
740 full_flush,
741 format_progress_nl('Running test ~w ~n ~w',[Id,Comment]),
742 retractall(previous_test_run(_)), assert(previous_test_run(Id)),
743 (debug_mode(off) -> true ; print('testcase: probcli '), print_args(Arguments),nl),
744 clear_diff_output(Id),
745 clear_logxml_output(Id),
746 prob_junit_args(JUnitArgs),
747 maplist(patch_prob_examples_loc,Arguments,Arguments0), % update path to prob_examples if necessary
748 append(Arguments0, JUnitArgs, Arguments1),
749 (additional_arguments(ExtraArgs)
750 -> remove_matching_prefs(ExtraArgs,Arguments1,RemArguments1), % remove conflicting arguments now overriden
751 append(ExtraArgs,RemArguments1,Arguments2)
752 ; Arguments1=Arguments2),
753 ? (tests_multiply_timeout(Factor) -> modify_timeout(Factor,Arguments2,Arguments3) ; Arguments3 = Arguments2),
754 (no_strict_running, select('-strict',Arguments3,Arguments4) -> true ; Arguments4=Arguments3),
755 (silent_running -> true ; print('executing: probcli '), print_args(Arguments4),nl),
756 full_flush,
757 test_started(Id),
758 time_out(catch(prob_cli:run_probcli(Arguments4,[test_runner]), Exception, true),
759 2147483646,TO_Result), !,
760 test_finished(Id,Walltime),
761 (TO_Result \= success % time_out occured, possibly due to throw(time_out);
762 -> Exception = TO_Result % et Exception to time_out
763 ; true),
764 (test_exception_is_success(Exception)
765 -> check_diff_output(Id),
766 check_logxml(Id)
767 ; format_warning_nl('Exception in test ~w: ~w',[Id,Exception]),
768 handle_test_exception(Exception),
769 print_test_failed(Id,Exception)
770 ),
771 (prolog_error_occurred ->
772 print_test_failed(Id,'Error(s) from Prolog during test! See console log for details. Aborting test run.'),
773 halt1
774 ; true),
775 !,
776 (test_failed(Id) % asserted by check_diff_output/check_logxml/print_test_failed
777 -> true % messages already printed above
778 ; format_with_colour_nl(user_output,[green],'Test ~w completed successfully (in ~w ms)~n',[Id,Walltime])
779 ),
780 safe_reset_cli(Id),
781 full_flush.
782
783 test_exception_is_success(Exception) :- var(Exception), !. % No exception was actually thrown
784 test_exception_is_success(Exception) :- Exception == halt(0). % can be thrown by typechecker_test
785
786 handle_test_exception(Exception) :-
787 ( Exception = error(_,_) -> Type = 'Error'
788 ; Exception = enumeration_warning(_,_,_,_,_) -> Type = 'Enumeration exception'
789 ; Exception = solver_and_provers_too_weak -> Type = 'Solver exception'
790 ; fail
791 ),
792 !,
793 (halt1_allowed
794 -> format_warning_nl('~w during test execution: ~w',[Type,Exception]),
795 halt1
796 ; true).
797 handle_test_exception(halt(1)) :- !.
798 handle_test_exception(parse_errors(_)) :- !.
799 handle_test_exception(time_out) :- !.
800 handle_test_exception(user_interrupt_signal) :- !,
801 assertz(user_interrupt_signal_received),
802 format_warning_nl('CTRL-C received, aborting tests',[]).
803 handle_test_exception(Exception) :-
804 format_warning_nl('Exception not caught in test_runner: ~w',[Exception]),
805 halt1.
806
807 safe_reset_cli(Id) :-
808 catch(prob_cli:reset_cli, % when bundles/extensions are missing we can get exceptions here
809 Exception,
810 (halt1_allowed
811 -> format_warning_nl('Exception while resetting probcli: ~w',[Exception]),
812 halt1
813 ; print_test_failed(Id,Exception)
814 )).
815
816 :- dynamic test_took_aux/2.
817 :- dynamic last_testcase_run/1, number_of_tests_run/1.
818 number_of_tests_run(0).
819 reset_nr_of_tests :- retractall(number_of_tests_run(_)), assertz(number_of_tests_run(0)).
820
821 :- use_module(library(system),[now/1, datime/2]).
822 :- dynamic performance_session_running/1, performance_session_stats/4.
823
824 performance_session_start :-
825 now(When),
826 datime(When,datime(Year,Month,Day,Hour,Min,Sec)),
827 format('~nStarting Codespeed Performance Monitoring session ~w:~w:~w:~w:~w:~w~n',[Year,Month,Day,Hour,Min,Sec]),
828 retractall(performance_session_running(_)),
829 assertz(performance_session_running(When)).
830
831 :- use_module(probsrc(parsercall),[get_parser_version/1]).
832 :- use_module(probsrc(version), [version_str/1, revision/1, lastchangeddate/1, format_prob_version/1]).
833 performance_session_end(FilePrefix) :-
834 performance_session_running(When),
835 datime(When,datime(Year,Month,Day,Hour,Min,Sec)),
836 %tools:ajoin([FilePrefix,':',Year,Month,Day,Hour,Min,Sec],FileName),
837 format('~nFinishing Codespeed session ~w:~w:~w:~w:~w:~w~n -> File : ~w~n',[Year,Month,Day,Hour,Min,Sec,FilePrefix]),
838 open(FilePrefix,append,S),
839 format(S,'~n/* Codespeed session ~w:~w:~w:~w:~w:~w */~n',[Year,Month,Day,Hour,Min,Sec]),
840 version_str(Vers), portray_clause(S, session_prob_version(When,Vers)),
841 revision(Rev), portray_clause(S, session_prob_revision(When,Rev)),
842 lastchangeddate(DD), portray_clause(S, session_prob_lastchangeddate(When,DD)),
843 get_parser_version(PV), portray_clause(S, session_prob_parser_version(When,PV)),
844 current_prolog_flag(version, PrologVString),
845 portray_clause(S, session_prolog_version_string(When,PrologVString)),
846 current_prolog_flag(version_data, PrologVData),
847 portray_clause(S, session_prolog_version_data(When,PrologVData)),
848 current_prolog_flag(host_type, HostType),
849 portray_clause(S, session_prolog_host_type(When,HostType)),
850 write_perf_data(When,S).
851
852 write_perf_data(When,S) :- additional_arguments(New),
853 portray_clause(S, stored_additional_arguments(When,New)),
854 fail.
855 write_perf_data(When,S) :- performance_session_stats(When,Id,Time,WTime),
856 portray_clause(S, stored_performance_test_stats(When,Id,Time,WTime)),
857 fail.
858 write_perf_data(_When,S) :- nl(S), nl(S), close(S).
859
860
861
862 test_started(Id) :-
863 retractall(last_testcase_run(_)), assertz(last_testcase_run(Id)),
864 retractall(test_took_aux(_,_)),
865 statistics(runtime,[Start,_]),
866 statistics(walltime,[WStart,_]),
867 bb_put(test_started,Start),
868 bb_put(test_started_wtime,WStart),
869 bb_put(test_target_coverage,0). % a special value which can be increased in ProB's source code
870 % useful to measure the number of tests that cover a new feature
871
872 :- public inc_test_target_coverage/0.
873 % call this from prob_prolog if some code point you wish to test is covered
874 % using the stats repl command you can then see how many tests have covered this code point
875 inc_test_target_coverage :-
876 bb_get(test_target_coverage,X),
877 X1 is X+1,
878 bb_put(test_target_coverage,X1).
879
880 :- dynamic test_stats/6.
881 test_finished(Id,WTime) :-
882 statistics(runtime,[End,_]),
883 statistics(walltime,[WEnd,_]),
884 bb_get(test_started,Start),
885 bb_get(test_started_wtime,WStart),
886 bb_get(test_target_coverage,Covered),
887 Time is End - Start, WTime is WEnd- WStart,
888 retractall(test_took_aux(_,_)),
889 assertz(test_took_aux(Time,WTime)),
890 (retract(number_of_tests_run(Nr)) -> N1 is Nr+1 ; N1=1),
891 assertz(number_of_tests_run(N1)),
892 (retract(test_stats(Id,PrevTime,PrevWTime,_,_,_))
893 -> assertz(test_stats(Id,Time,WTime,PrevTime,PrevWTime,Covered))
894 ; assertz(test_stats(Id,Time,WTime,-1,-1,Covered))
895 ),
896 (performance_session_running(When)
897 -> assertz(performance_session_stats(When,Id,Time,WTime))
898 ; true).
899
900 print_delta_stats :- print('Comparing walltimes with previous test run: '),nl,
901 findall(delta(DeltaPerc,DeltaWTime,Id),test_delta_stat(Id,DeltaPerc,DeltaWTime),L),
902 (L=[] -> print('No previous run information available'),nl
903 ; print(' ID | % (delta absolute) | walltime (runtime)~n'),nl,
904 sort(L,SL),
905 maplist(print_delta,SL)).
906 test_delta_stat(Id,DeltaPerc,DeltaWTime) :-
907 test_stats(Id,_RTime,WTime,_PrevRTime,PrevWTime,_),
908 PrevWTime>0,
909 DeltaWTime is WTime - PrevWTime,
910 DeltaPerc is (100*DeltaWTime) / PrevWTime.
911 print_delta(delta(DeltaPerc,DeltaWTime,Id)) :-
912 test_stats(Id,RTime,WTime,_PrevRTime,PrevWTime,_Cov),
913 format(' ~w | ~2f % (~w ms) | ~w ms (~w ms runtime) [~w walltime ms previously]~n',
914 [Id,DeltaPerc,DeltaWTime,WTime,RTime,PrevWTime]).
915
916 :- use_module(probsrc(tools),[print_memory_used_wo_gc/0]).
917 print_current_stats :- print_current_stats(user_output,' | ').
918 print_current_stats(Stream,Sep) :-
919 print_memory_used_wo_gc,nl,
920 bb_put(test_target_coverage_nr,0),
921 bb_put(test_target_coverage_count,0),
922 bb_put(test_counter_nr,0),
923 format(Stream,' ID~wOK~wWALLTIME (ms)~wRUNTIME (ms)~wCOV~wDESCRIPTION~n',[Sep,Sep,Sep,Sep,Sep]),
924 test_stats(Id,RTime,WTime,_PrevRTime,_PrevWTime,NrTargetCoverage),
925 cli_testcase(Id,_Cat,_,_Args,Desc),
926 (test_failed(Id) -> OK = '*FAILED*' ; OK = ' OK '),
927 format(Stream,' ~w~w~w~w~w~w~w~w~w~w~w~n',
928 [Id,Sep,OK,Sep,WTime,Sep,RTime,Sep,NrTargetCoverage,Sep,Desc]),
929 bb_get(test_counter_nr,Nr), Nr1 is Nr+1, bb_put(test_counter_nr,Nr1),
930 (NrTargetCoverage>0
931 -> bb_get(test_target_coverage_nr,TN), TN1 is TN+1, bb_put(test_target_coverage_nr,TN1),
932 bb_get(test_target_coverage_count,NC), NC1 is NC+NrTargetCoverage,
933 bb_put(test_target_coverage_count,NC1)
934 ),
935 fail.
936 print_current_stats(_,_) :-
937 bb_get(test_counter_nr,NrT),
938 format('Number of tests: ~w~n',[NrT]),
939 (bb_get(test_target_coverage_nr,Nr), Nr>0 ->
940 bb_get(test_target_coverage_count,NrC),
941 Perc is (100.0 * Nr) / NrT,
942 format('Number of tests reaching test source code target: ~w (~2f %, total hits: ~w)~n',[Nr, Perc, NrC])
943 ; true).
944
945 test_took(Time,WTime) :- test_took_aux(Time,WTime), !.
946 test_took(0,0) :-
947 format_warning_nl('test_took/2 called before test_finished/2, this should not happen!',[]).
948
949 print_junit_skip(Arguments) :-
950 prob_junit_dir(Dir)
951 -> set_junit_dir(Dir),
952 create_and_print_junit_result(['Integration Tests'],Arguments,0,skip)
953 ; true.
954
955 % if the test expects a time_out error, the timeout is not expanded
956 % otherwise, timeout is increased to allow coverage analysis / junit / etc to finish
957 :- use_module(probsrc(tools_meta), [no_time_out_value/1]).
958 multiply_and_truncate_timeout(OrigTimeout, Factor, NewTimeout) :-
959 no_time_out_value(NoTimeout),
960 % Make sure that the new timeout never exceeds the special "no timeout" value,
961 % otherwise ProB prints lots of warnings about the timeout being too high.
962 NewTimeout is min(round(Factor * OrigTimeout), NoTimeout). % Factor could be float
963
964 modify_timeout(_,OldOptions,New) :-
965 append(_,['-expcterr','time_out'|_],OldOptions), !, New=OldOptions. % we expect a time_out
966 modify_timeout(Factor,[],['-p','TIME_OUT',NVal]) :-
967 % timeout was not set at all - set it to Factor*Default
968 % Note: there is a potential problem when the time_out is set inside the machine and not in the test !! TO DO: fix
969 preferences:preference_default_value(time_out,DEFAULT),
970 multiply_and_truncate_timeout(DEFAULT, Factor, NVal).
971 modify_timeout(Factor,[GTC,OLD|T],[GTC,NewT|MT]) :- is_global_time_out_cmd(GTC),!,
972 tools:arg_is_number(OLD,OLDT),
973 multiply_and_truncate_timeout(Factor, OLDT, NewT),
974 modify_timeout(Factor,T,MT).
975 modify_timeout(Factor,['-p','TIME_OUT',OLD|T],['-p','TIME_OUT',NewT|T]) :-
976 tools:arg_is_number(OLD,OLDT), !,
977 multiply_and_truncate_timeout(Factor, OLDT, NewT). % TODO: we currently assume global_time_out appears before
978 ?modify_timeout(Factor,[H|T],[H|MT]) :- modify_timeout(Factor,T,MT).
979
980 is_global_time_out_cmd('-global_time_out').
981 is_global_time_out_cmd('--global_time_out').
982 is_global_time_out_cmd('-time_out'). % old version
983
984 full_flush :- flush_output(user_output), flush_output(user_error).
985
986 print_args([]).
987 print_args([H|T]) :- print(H), print(' '), print_args(T).
988
989 print_test_failed(Id,Msg) :-
990 print_test_failed(Id,Msg,'').
991 print_test_failed(Id,Msg1,Msg2) :-
992 (Msg1 = user_interrupt_signal -> assertz(user_interrupt_signal_received) ; true),
993 test_took(RunTime,WallTime),
994 cli_testcase(Id,Categories,_Infos,Arguments,Comment),
995 Categories = [FirstCat|_], !,
996 ErrorMessage = ['Test with Id',Id,'failed.','Test Categories: ',Categories,'Test Arguments: ',Arguments,'Test Comment: ',Comment,'Error Messages in Test Runner: ',Msg1,Msg2,'Content of Error Manager: '],
997 (get_all_errors(AllErrors) -> true ; AllErrors = []),
998 append(ErrorMessage,AllErrors,FullErrorMessage),
999 create_and_print_junit_result(['Integration Tests',FirstCat],Id,RunTime,error(FullErrorMessage)),
1000 assert_test_failed(Id),
1001 start_terminal_colour(red,user_error),
1002 format(user_error,'*** Test ~w FAILED (after ~w ms runtime, ~w ms walltime): ~w~w~n',
1003 [Id,RunTime,WallTime,Msg1,Msg2]),
1004 write('***'),print_memory_used_wo_gc,nl,
1005 reset_terminal_colour(user_error).
1006
1007 assert_test_failed(Id) :-
1008 assertz(test_failed(Id)),
1009 retractall(last_test_failed(_)),
1010 assertz(last_test_failed(Id)).
1011
1012 diff_failed(Id,F1,F2,DiffOutputText,CmpOutputText) :-
1013 test_took(Time,_),
1014 atom_codes(DiffOutputAtom,DiffOutputText),
1015 atom_codes(CmpOutputAtom,CmpOutputText),
1016 ErrMsg = ['Diff for test with Id\n',Id,'\nfailed:\n','Output file\n',F1,'\ndoes not correspond to stored version\n',F2,
1017 '\nOutput of Diff:\n',DiffOutputAtom,
1018 '\nOutput of Cmp:\n',CmpOutputAtom],
1019 create_and_print_junit_result(['Diff Checking'],Id,Time,error(ErrMsg)),
1020 assert_test_failed(Id),
1021 assertz(test_diff_failed(Id,F1,F2)),
1022 start_terminal_colour(red,user_error),
1023 format(user_error,'*** Test ~w FAILED: Diff failed:~nOutput file ~w~ndoes not correspond to stored version~n~w~n', [Id,F1,F2]),
1024 format(user_error,'Diff:~n~s~n',[DiffOutputText]),
1025 format(user_error,'Cmp:~n~s~n',[CmpOutputText]),
1026 reset_terminal_colour(user_error).
1027
1028 :- use_module(probsrc(tools_commands),[diff_files_with_editor/2]).
1029 diff_in_editor :- findall(I,test_diff_failed(I,_,_),LI), sort(LI,SI),
1030 format('Opening failed diff files in editor: ~w~n',[SI]),
1031 test_diff_failed(Id,F1,F2),
1032 format('Test ~w~n~w ~w~n',[Id,F1,F2]),
1033 diff_files_with_editor(F1,F2),
1034 fail.
1035 diff_in_editor.
1036
1037
1038 clear_diff_output(Id) :- % clear all files that should be generated
1039 get_testcase_diff_check_output(Id,GeneratedFile,_StoredReferenceFile),
1040 safe_file_exists(GeneratedFile),
1041 (get_testcase_do_not_delete(Id,GeneratedFile) -> formatsilent(user_output,'% Keeping: ~w~n',[GeneratedFile])
1042 ; formatsilent(user_output,'% Deleting: ~w~n',[GeneratedFile]),
1043 delete_file(GeneratedFile)
1044 ),fail.
1045 clear_diff_output(_).
1046
1047 check_diff_output(Id) :-
1048 findall(diff(Id,GeneratedFile,StoredReferenceFile),
1049 get_testcase_diff_check_output(Id,GeneratedFile,StoredReferenceFile),
1050 ListOfDiffsToCheck),
1051 maplist(check_diff_output2, ListOfDiffsToCheck).
1052
1053 check_diff_output2(diff(Id,GeneratedFile,StoredReferenceFile)) :-
1054 \+ safe_file_exists(GeneratedFile) -> print_test_failed(Id,'Output file does not exist:',GeneratedFile) ;
1055 \+ safe_file_exists(StoredReferenceFile) -> print_test_failed(Id,'Stored file does not exist:',StoredReferenceFile) ;
1056 diff(Id,GeneratedFile,StoredReferenceFile).
1057
1058 diff(Id,F1,F2) :-
1059 formatsilent(user_output,'% Checking: diff / cmp ~w ~w~n',[F1,F2]),
1060 get_command_path(diff,DiffPath),
1061 get_command_path(cmp,CmpPath),
1062 (system_call(DiffPath,['-b',F1,F2],DiffOutputText,_ErrTextDiff,ExitDiff) % use -q for quiet
1063 -> true
1064 ; DiffOutputText = "*** CALLING DIFF FAILED !", ExitDiff = fail
1065 ),
1066 formatsilent(user_output,'% Checking: cmp ~w ~w~n',[F1,F2]),
1067 (system_call(CmpPath,['-b',F1,F2],CmpOutputText,_ErrTextCmp,_ExitCmp)
1068 -> true
1069 ; CmpOutputText = "*** CALLING CMP FAILED !"
1070 ),
1071 (ExitDiff = exit(0)%, ExitCmp = exit(0)
1072 -> true
1073 ; diff_failed(Id,F1,F2,DiffOutputText,CmpOutputText)).
1074
1075
1076 % ----------------
1077 :- volatile logxml_file/1.
1078 % logxml file used automatically when started by test_runner
1079 :- dynamic logxml_file/1.
1080 clear_logxml_output(_Id) :- logxml_file(GeneratedFile),
1081 safe_file_exists(GeneratedFile),
1082 delete_file(GeneratedFile),
1083 fail.
1084 clear_logxml_output(_).
1085
1086 set_logxml_file(File) :- retractall(logxml_file(_)), assertz(logxml_file(File)),
1087 format('Adding -logxml ~w.~n',[File]),
1088 add_additional_arguments(['-logxml',File]).
1089
1090 :- use_module(probsrc(logger),[read_xml_log_file/2]).
1091 check_logxml(Id) :-
1092 logxml_file(File),!,
1093 xmllint(Id,File),
1094 check_logxml(Id,File).
1095 check_logxml(_).
1096
1097 xmllint(Id,File) :-
1098 (call_xmllint(File,ErrText,ExitLint)
1099 -> (ExitLint = exit(0) -> true
1100 ; print_test_failed(Id,'xmllint reported errors for file:',File),
1101 formatsilent(user_error,'% xmllint result~n~s~n',[ErrText])
1102 )
1103 ; print_test_failed(Id,'Calling xmllint failed for file:',File)
1104 ).
1105
1106 :- dynamic use_logxml_schema/0.
1107 set_use_logxml_schema :- (use_logxml_schema -> true ; assertz(use_logxml_schema)).
1108 call_xmllint(File,ErrText,ExitLint) :- use_logxml_schema,
1109 absolute_file_name(prob_home('doc/logxml_xsd.xml'),SchemaFile),
1110 file_exists(SchemaFile),!,
1111 get_command_path(xmllint,LintPath),
1112 format('Calling ~w with schema ~w for logxml file ~w~n',[LintPath,SchemaFile,File]),
1113 system_call(LintPath,['--schema', SchemaFile, File, '-noout'],_OutputText,ErrText,ExitLint).
1114 call_xmllint(File,ErrText,ExitLint) :-
1115 get_command_path(xmllint,LintPath),
1116 format('Calling ~w without schema for logxml file ~w~n',[LintPath,File]),
1117 system_call(LintPath,[File],_OutputText,ErrText,ExitLint).
1118
1119 check_logxml(Id,File) :-
1120 catch(read_xml_log_file(File,Infos), E,
1121 print_test_failed(Id,'Exception trying to read logxml file:',E)),
1122 !,
1123 format_progress_nl('Read logxml file ~w, contains ~w.',[File,Infos]),
1124 check_error_cound(Id,Infos).
1125 check_logxml(Id,File) :-
1126 print_test_failed(Id,'Logxml file could not be read:',File).
1127
1128 check_error_cound(Id,Infos) :-
1129 testcase_expects_errors(Id),!,
1130 (member(expected_errors/E,Infos),E>0 -> true ; print_test_failed(Id,'Logxml file does not report errors:',Infos)).
1131 check_error_cound(Id,Infos) :-
1132 (member(errors/0,Infos),member(expected_errors/0,Infos) -> true
1133 ; print_test_failed(Id,'Logxml file reports errors:',Infos)).
1134
1135 testcase_expects_errors(Id) :-
1136 cli_testcase(Id,_Cat,_Infos,Arguments,_Comment),
1137 member(A,Arguments), expect_error_arg(A).
1138
1139 expect_error_arg('-expcterr').
1140 expect_error_arg('-expecterr').
1141 expect_error_arg('-expect').
1142 expect_error_arg('-expcterrpos').
1143
1144 % ------------------
1145
1146 :- volatile repl_mode/0.
1147 :- dynamic repl_mode/0.
1148
1149 :- use_module(test_paths, [get_prob_examples_override/1, set_prob_examples_override/1]).
1150
1151 set_prob_examples_location(Dir) :-
1152 (atom(Dir) -> DirAtom = Dir ; atom_codes(DirAtom, Dir)),
1153 format('Setting location of prob_examples directory to: ~s~n',[DirAtom]),
1154 set_prob_examples_override(DirAtom).
1155
1156 % Prefixes of arguments that contain file paths,
1157 % which may need to be patched to an alternate prob_examples location.
1158 path_arg_prefix("#file ").
1159 path_arg_prefix(":cdclt #file ").
1160 path_arg_prefix(":cdclt-file ").
1161 path_arg_prefix(":cdclt-free #file ").
1162 path_arg_prefix(":cdclt-double-check #file ").
1163 path_arg_prefix(":cdclt-free-double-check #file ").
1164 path_arg_prefix(":prob-file ").
1165 path_arg_prefix(":z3 #file ").
1166 path_arg_prefix(":z3-double-check #file ").
1167 path_arg_prefix(":z3-free-double-check #file ").
1168 path_arg_prefix(":z3-file ").
1169 path_arg_prefix(":z3-free #file ").
1170 path_arg_prefix(":z3-free-file ").
1171 path_arg_prefix("").
1172
1173 % update path to prob_examples if necessary:
1174 patch_prob_examples_loc_0(NewLoc,Arg,PatchedArg) :-
1175 path_arg_prefix(Prefix),
1176 append(Prefix, ArgTail, Arg),
1177 append("../prob_examples", PathTail, ArgTail),
1178 !,
1179 append(NewLoc, PathTail, PatchedArgTail),
1180 append(Prefix, PatchedArgTail, PatchedArg).
1181
1182 patch_prob_examples_loc(Arg,PatchedArg) :-
1183 get_prob_examples_override(NewLocAtom),
1184 atom_codes(NewLocAtom, NewLoc),
1185 atom(Arg),
1186 atom_codes(Arg,ArgC),
1187 patch_prob_examples_loc_0(NewLoc,ArgC,PatchedArgC),
1188 !,
1189 atom_codes(PatchedArg,PatchedArgC),
1190 format('Patched ~w to ~w~n',[Arg,PatchedArg]).
1191 patch_prob_examples_loc(A,A).
1192
1193 get_testcase_do_not_delete(Id,PF) :-
1194 cli_testcase_do_not_delete(Id,F),
1195 patch_prob_examples_loc(F,PF).
1196
1197 get_testcase_diff_check_output(Id,PF1,PF2) :-
1198 cli_testcase_diff_check_output(Id,F1,F2),
1199 patch_prob_examples_loc(F1,PF1),
1200 patch_prob_examples_loc(F2,PF2).
1201
1202 :- use_module(extension('counter/counter'),[counter_init/0]).
1203 :- use_module(probsrc(prob_startup), [startup_prob/0]).
1204 init_test_runner :- startup_prob,counter_init.
1205 :- use_module(library(lists),[maplist/2]).
1206 % a minimal shell to execute tests:
1207 test_repl :-
1208 init_test_runner,
1209 format_prob_version(user_output),nl,
1210 assertz(repl_mode),
1211 current_prolog_flag(argv,ArgV), treat_argv(ArgV),
1212 test_repl_loop,
1213 retractall(repl_mode).
1214
1215
1216 treat_argv(['-prob-examples',Dir|T]) :- !, set_prob_examples_location(Dir),
1217 treat_argv(T).
1218 treat_argv(Args) :- maplist(eval_argv,Args).
1219
1220 % execute tests provided on the command-line:
1221 eval_argv(Cmd) :- format('ARGV ==> ~w~n',[Cmd]),
1222 atom_codes(Cmd,C), safe_number_codes(Nr,C), !, test_eval(Nr).
1223 eval_argv(Cmd) :- test_eval(Cmd),!.
1224
1225 test_repl_loop :- safe_read(T), test_eval(T), !, test_repl_loop.
1226 test_repl_loop.
1227
1228 safe_read(T) :-
1229 catch(
1230 (prompt(OldPrompt, 'TEST ==> '), call_cleanup(read(T), prompt(_, OldPrompt))),
1231 error(syntax_error(E),_),
1232 (format_warning_nl('*** Syntax error: ~w~n*** Type Prolog term followed by a dot(.) and enter.',[E]),
1233 safe_read(T))).
1234
1235 :- meta_predicate wall(0).
1236 wall(Call) :-
1237 statistics(walltime,[Start,_]),
1238 call(Call),
1239 statistics(walltime,[Stop,_]), WT is Stop-Start,
1240 format('Walltime: ~w ms~n',[WT]),
1241 print_memory_used_wo_gc,nl.
1242
1243 % -------------------------
1244
1245 :- use_module(library(file_systems)).
1246 :- use_module(probsrc(tools),[get_options/5]).
1247 % true for test_files
1248 test_file(Id,File,AbsFileName) :- cli_testcase(Id,_Cat,_Infos,Arguments,_Comment),
1249 get_options(Arguments,prob_cli:recognised_cli_option,_Options,Files,fail),
1250 member(File,Files),
1251 is_existing_file(File),
1252 absolute_file_name(File,AbsFileName).
1253
1254 is_existing_file(X) :- \+ number(X), atom(X),
1255 atom_codes(X,Codes),[BS] = "/", (member(BS,Codes) -> true),
1256 file_exists(X).
1257
1258 % obtain a list of all files used in tests
1259 all_files(Files) :- findall(F,test_file(_,_,F),A), sort(A,Files).
1260
1261 % a test file that can be loaded:
1262 valid_test_file(Id,File,AbsFileName,XT) :-
1263 test_file(Id,File,AbsFileName),
1264 cli_testcase(Id,_Cat,_Infos,Arguments,_Comment),
1265 \+ append(_,['-expcterr', load_main_file |_],Arguments),
1266 tools:get_filename_extension(File,XT).
1267 all_valid_files(Files,Mode) :-
1268 findall(F, (valid_test_file(_,_,F,XT),
1269 relevant_extension(XT,Mode)),
1270 A),
1271 sort(A,Files).
1272
1273 % traverse a directory and indicate which specification files are used in tests and which ones not
1274 traverse :- traverse('../prob_examples/public_examples/').
1275 traverse(SD) :- all_files(Files), absolute_file_name(SD,StartDir),
1276 format('Examining files in ~w~n + means file is used in some test~n~n',[StartDir]),
1277 traverse(StartDir,Files).
1278
1279 traverse(Dir,AllFiles) :- file_member_of_directory(Dir,_,FullFile),
1280 tools:get_filename_extension(FullFile,XT),
1281 (member(FullFile,AllFiles) -> format(' + ~w~n',[FullFile])
1282 ; relevant_extension(XT,_) -> format('--- ~w~n',[FullFile])),
1283 fail.
1284 traverse(Dir,AllFiles) :- directory_member_of_directory(Dir,_,SubDir),
1285 %format('~nSTART ~w~n',[SubDir]),
1286 traverse(SubDir,AllFiles),
1287 %format('~n END ~w~n',[SubDir]),
1288 fail.
1289 traverse(_,_).
1290
1291 relevant_extension('mch',b).
1292 relevant_extension('ref',b).
1293 relevant_extension('imp',b).
1294 relevant_extension('tla',tla).
1295 relevant_extension('fuzz',z).
1296 relevant_extension('tex',z).
1297 relevant_extension('csp',csp).
1298 relevant_extension('cspm',csp).
1299 relevant_extension('eventb',eventb).
1300
1301 % --------------------------
1302
1303 test_eval(quit) :- !,fail.
1304 test_eval(q) :- !,fail.
1305 test_eval(end_of_file) :- !,fail. % Ctrl-D
1306 test_eval(Cmd) :- test_eval1(Cmd),!.
1307 test_eval(Cmd) :-
1308 format_warning_nl('Error executing command: ~w',[Cmd]).
1309
1310 test_eval1(N) :- number(N),!, wall(run_id(N)).
1311 test_eval1(last) :- !, wall(run_last_test).
1312 test_eval1(N-M) :- number(N), number(M),!, wall(run_tests_by_id(N-M)).
1313 test_eval1('..'(N,M)) :- !, test_eval1(N-M).
1314 test_eval1(repeat(ID,M)) :- !, repeat_id(ID,M).
1315 test_eval1(r) :- !, run_random_tests(25).
1316 test_eval1(v) :- !,v.
1317 test_eval1(verbose) :- !,v.
1318 test_eval1(all_files) :- !, all_files(Files), length(Files,Len),
1319 format('~nFiles = ~n~w~n # Files = ~w~n',[Files,Len]).
1320 test_eval1(valid_files(Mode)) :- !, all_valid_files(Files,Mode), length(Files,Len),
1321 format('~nValid ~w Files = ~n~w~n # Files = ~w~n',[Mode,Files,Len]).
1322 test_eval1(files) :- !, traverse.
1323 test_eval1(files(Dir)) :- !, traverse(Dir).
1324 test_eval1(ex(Dir)) :- !, set_prob_examples_location(Dir).
1325 test_eval1(cache) :- !,print('Enabling cache'),nl,
1326 cache.
1327 test_eval1(debug) :- !,print('Enabling Prolog debugging mode (use -v or -vv for ProB debugging info)'),nl,
1328 debug,
1329 retractall(multiply_timeout(_)),
1330 assertz(multiply_timeout(10)).
1331 test_eval1(factor(X)) :- !,
1332 retractall(multiply_timeout(_)),
1333 format('Setting timeout factor to ~w~n',[X]),
1334 assertz(multiply_timeout(X)).
1335 test_eval1(timeout(X)) :- !,
1336 format('Adding -timeout ~w for model checking, disproving~nUse factor(X) to set TIME_OUT factor.~n',[X]),
1337 add_additional_arguments(['-timeout',X]).
1338 test_eval1(coverage) :- !,
1339 format('Adding -coverage to all commands.~n',[]),
1340 add_additional_arguments(['-coverage']).
1341 test_eval1(opreuse) :- !,
1342 format('Adding operation reuse to all commands.~n',[]),
1343 add_additional_arguments(['-p', 'OPERATION_REUSE',true]).
1344 test_eval1(opc) :- !,
1345 format('Adding operation reuse and compression to all commands.~n',[]),
1346 add_additional_arguments(['-p', 'COMPRESSION', 'TRUE', '-p', 'OPERATION_REUSE',true]).
1347 test_eval1(opcf) :- !,
1348 format('Adding operation reuse and compression to all commands.~n',[]),
1349 add_additional_arguments(['-p', 'COMPRESSION', 'TRUE', '-p', 'OPERATION_REUSE',full]).
1350 test_eval1(reset) :- !, reset_additional_arguments.
1351 test_eval1(logxml) :- !, File = './test_runner_logxml.xml',
1352 set_logxml_file(File).
1353 test_eval1(xsd) :- !, File = './test_runner_logxml.xml',
1354 set_logxml_file(File), set_use_logxml_schema.
1355 test_eval1(debug_off) :- !,print('Disabling Prolog debugging mode'),nl,
1356 nodebug,
1357 retractall(multiply_timeout(_)).
1358 test_eval1(fast) :- !,print('Enabling jvm_parser_fastrw'),nl,
1359 % TODO: only works at the very beginning; we need to be able to switch parser?
1360 add_additional_preference('jvm_parser_fastrw','true'),
1361 add_additional_preference('jvm_parser_force_parsing','true').
1362 test_eval1(force) :- !,print('Setting jvm_parser_force_parsing'),nl,
1363 add_additional_preference('jvm_parser_force_parsing','true').
1364 test_eval1(clpfd) :- !,print('Enabling CLPFD'),nl,
1365 add_additional_preference('CLPFD','TRUE').
1366 test_eval1(clpfd_off) :- !,print('Disabling CLPFD'),nl,
1367 add_additional_preference('CLPFD','FALSE').
1368 test_eval1(smt) :- !,print('Enabling SMT'),nl,
1369 add_additional_preference('SMT','TRUE').
1370 test_eval1(smt_off) :- !,print('Disabling SMT'),nl,
1371 add_additional_preference('SMT','FALSE').
1372 test_eval1(chr) :- !,print('Enabling CHR'),nl,
1373 add_additional_preference('CHR','TRUE').
1374 test_eval1(chr_off) :- !,print('Disabling CHR'),nl,
1375 add_additional_preference('CHR','FALSE').
1376 test_eval1(cse_off) :- !,print('Disabling CSE'),nl,
1377 add_additional_preference('CSE','FALSE').
1378 test_eval1(cse) :- !,print('Enabling CSE'),nl,
1379 add_additional_preference('CSE','TRUE').
1380 test_eval1(cse_subst) :- !,print('Enabling CSE_SUBST'),nl,
1381 add_additional_preference('CSE','TRUE'),
1382 add_additional_preference('CSE_SUBST','TRUE').
1383 test_eval1(trace_info) :- !,print('Enabling TRACE_INFO'),nl,
1384 add_additional_preference('TRACE_INFO','TRUE').
1385 %
1386 test_eval1(p(PREF)) :- !,
1387 get_preference_name(PREF,PName),
1388 print('Enabling Preference '),print(PName),nl,
1389 add_additional_preference(PName,'true').
1390 test_eval1(p(PREF,VAL)) :- !,
1391 get_preference_name(PREF,PName),
1392 print('Setting Preference '),print(PName),nl,
1393 add_additional_preference(PName,VAL).
1394 test_eval1(random) :- !,print('Enabling RANDOMISE_ENUMERATION_ORDER'),nl,
1395 add_additional_preference('RANDOMISE_ENUMERATION_ORDER','TRUE').
1396 test_eval1(random_off) :- !,print('Enabling RANDOMISE_ENUMERATION_ORDER'),nl,
1397 add_additional_preference('RANDOMISE_ENUMERATION_ORDER','FALSE').
1398 test_eval1(safe) :- !,print('Setting PROB_SAFE_MODE'),nl,
1399 run_safely.
1400 test_eval1(sanity_check) :- !, sanity_check(false).
1401 test_eval1(sc) :- !, sanity_check(false).
1402 test_eval1(trace) :- !, print('Enabling TRACE_UPON_ERROR'),nl,
1403 add_additional_preference('TRACE_UPON_ERROR','TRUE').
1404 test_eval1(trace_off) :- !, print('Disabling TRACE_UPON_ERROR'),nl,
1405 add_additional_preference('TRACE_UPON_ERROR','FALSE').
1406 test_eval1(raise) :- !,print('Enabling STRICT_RAISE_ENUM_WARNINGS'),nl,
1407 add_additional_preference('STRICT_RAISE_ENUM_WARNINGS','TRUE').
1408 test_eval1(nopt) :- !,print('Disabling OPTIMIZE_AST'),nl,
1409 add_additional_preference('OPTIMIZE_AST','FALSE').
1410 test_eval1(vv) :- !,vv.
1411 test_eval1(version(X)) :- !, print_version(X). %cpp, java, full, lib
1412 test_eval1(version) :- !, print_version(full_verbose).
1413 test_eval1(silent) :- !, run_silently.
1414 test_eval1(nostrict) :- !, run_no_strict.
1415 test_eval1(no_strict) :- !, run_no_strict.
1416 test_eval1(strict) :- !, retractall(no_strict_running).
1417 test_eval1(skip) :- !, (retract(skip_all_tests) -> X=false ; assertz(skip_all_tests),X=true),
1418 format('Marking all tests as skipped: ~w~n',[X]).
1419 test_eval1(x) :- !,halt.
1420 test_eval1(reload) :- !,use_module(probsrc(test_runner)), use_module(probsrc(testcases)).
1421 :- if(predicate_property(make, _)).
1422 test_eval1(make) :- !, make.
1423 :- else.
1424 test_eval1(make) :- !,
1425 print('make/0 is only supported on SWI-Prolog - reloading just the testcases instead.'),nl,
1426 test_eval1(reload).
1427 :- endif.
1428 test_eval1(edit) :- last_testcase_run(Id),
1429 cli_testcase(Id,_,_Infos,Arguments,_Comment),
1430 member(File,Arguments), safe_file_exists(File),!,
1431 edit_file(File).
1432 test_eval1(e) :- !, test_eval1(edit).
1433 test_eval1(diff) :- !, diff_in_editor.
1434 test_eval1(halt) :- !,halt.
1435 test_eval1(info) :- !, get_total_number_of_errors(X), format('~nTotal number of errors: ~w~n~n',[X]).
1436 test_eval1(cat) :- !, print('Categories: '),
1437 findall(Cat,
1438 (cli_testcase(_, TestCategories, _, _, _), member(Cat, TestCategories)), List),
1439 sort(List,SL), print(SL),nl,
1440 format('Type cat(Cat) or name of category to run it.~n',[]),
1441 format('Note: priv is an alias for private_source_not_available.~n',[]).
1442 test_eval1(all) :- !, wall(run_tests_all).
1443 test_eval1(cata) :- !, category_analysis.
1444 test_eval1(cat(Category)) :- !,
1445 wall(run_tests_by_category(Category,all)).
1446 test_eval1(priv) :- !, test_eval1(cat(private_source_not_available)).
1447 test_eval1(first(Category)) :- !,
1448 wall(run_tests_by_first_category(Category)).
1449 test_eval1(list(Category)) :- !,
1450 get_tests_for_categories([Category],TList),
1451 findall(Id,member(testcase(Id, _, _, _, _),TList),List),
1452 format('Tests for category: ~w~n',[List]).
1453 test_eval1(make(Categories)) :- !,
1454 wall(makefile_by_category(user_output,Categories)).
1455 test_eval1(make(File,Categories)) :- !,
1456 wall(makefile_by_category(File,Categories)).
1457 test_eval1(files(Category)) :- !, show_files(Category).
1458 test_eval1(uses(Command)) :- !, wall(run_tests_using_command(Command)).
1459 test_eval1(uses(Pref,Val)) :- !, wall(run_tests_using_preference(Pref,Val)).
1460 test_eval1(p) :- !, test_eval1(profile).
1461 test_eval1(ps) :- !, test_eval1(print_profile).
1462 test_eval1(pc) :- !, test_eval1(print_coverage).
1463 test_eval1(delta) :- !, print_delta_stats.
1464 test_eval1(stats) :- !, print_current_stats.
1465 test_eval1(statscsv) :- !, print_current_stats(user_output,',').
1466 test_eval1(start) :- !, performance_session_start.
1467 test_eval1(stop) :- !, performance_session_end('log/test_runner_performance_log.pl').
1468 test_eval1(codespeed) :- !,
1469 performance_session_start,
1470 test_eval1(cat(codespeed)),
1471 performance_session_end('log/test_runner_performance_codespeed_log.pl').
1472 test_eval1(prob_profile) :- !,
1473 cli_print_statistics(prob_profile),
1474 cli_print_statistics(disprover_profile).
1475 test_eval1(profile) :- !, print('PROFILING : '), %spy([avl:avl_size/2]),
1476 (current_prolog_flag(profiling,on)
1477 -> set_prolog_flag(profiling,off), print('OFF') ;
1478 set_prolog_flag(profiling,on), print('ON')),
1479 nl,print('USE ps to print_profile or pc to print_coverage info'),nl.
1480 test_eval1(profile_stats) :- !, test_eval1(print_profile).
1481 test_eval1(print_profile) :- !, nl,print('PROFILE INFORMATION:'), nl,
1482 catch(print_profile,
1483 error(existence_error(_,_),_),
1484 print('CAN ONLY BE USED WHEN RUNNING PROB FROM SOURCE')),
1485 nl, debug:timer_statistics.
1486 test_eval1(print_coverage) :- !, nl,print('COVERAGE INFORMATION:'), nl,
1487 (current_prolog_flag(source_info,on) -> true ; format_warning_nl('Only useful when current_prolog_flag(source_info,on)!',[])),
1488 catch(print_coverage,
1489 error(existence_error(_,_),_),
1490 print('CAN ONLY BE USED WHEN RUNNING PROB FROM SOURCE')),
1491 nl.
1492 test_eval1(profile_reset) :- !, nl,print('RESETTING PROFILE INFORMATION'), nl,
1493 catch(profile_reset,
1494 error(existence_error(_,_),_),
1495 print('CAN ONLY BE USED WHEN RUNNING PROB FROM SOURCE')),
1496 nl.
1497 test_eval1(perf) :- !, toggle_perfmessages.
1498 test_eval1(Category) :- valid_category(Category),!,
1499 wall(run_tests_by_category(Category,all)).
1500 test_eval1(codespeed(Id)) :- !, codespeed_id(Id).
1501 test_eval1(cs(Id)) :- !, codespeed_id(Id).
1502 test_eval1('-'(ProBCLICommand)) :- atom(ProBCLICommand),
1503 atom_concat('-',ProBCLICommand,Atom),
1504 (prob_cli:recognised_option(Atom,Call) -> true % from probcli.pl
1505 ; format('Unknown probcli command: -~w~n',[ProBCLICommand]),fail),
1506 !,
1507 format('Executing probcli command ~w~n',[Call]),
1508 call(prob_cli:Call).
1509 test_eval1([H|T]) :- number(H),!, wall(run_tests_by_id([H|T],_,no_skipping)).
1510 test_eval1([Category|T]) :- valid_category(Category),!,
1511 wall(run_tests_by_category([Category|T],all)).
1512 test_eval1(C) :-
1513 (C=help -> true ; format('*** Unknown command ~w~n',[C])),
1514 print(' Commands: Nr, Nr-Nr, last, q, x, v, vv, uses(Cmd), uses(Pref,Val),'),nl,
1515 print(' repeat(id,nr), timeout(ms),factor(f), e,edit,diff, r (for 25 random tests),'),nl,
1516 print(' cat, cat(Cats),make(Cats),make(File,Cats),files(Cat),'),nl,
1517 print(' profile, profile_stats, (to turn Prolog profiling on and print info)'),nl,
1518 print(' debug,debug_off, (to turn Prolog debug mode on or off)'),nl,
1519 print(' perf, reload, sc,'),nl,
1520 print(' -CMD, (for probcli commands like -profile)'),nl,
1521 print(' * for setting preferences:'),nl,
1522 print(' p(PREF), p(PREF,VAL),'),nl,
1523 print(' clpfd,clpfd_off, smt,smt_off, chr,chr_off, cse,cse_subst,cse_off,'),nl,
1524 print(' random,random_off, trace_info, nopt,'),nl,
1525 print(' cache, perf, (turn ProB caching or performance messages on)'),nl,
1526 print(' trace,trace_off, (set TRACE_UPON_ERROR preference)'),nl,
1527 print(' * statistics:'),nl,
1528 print(' delta, stats, info.'),nl.
1529
1530 safe_file_exists(F) :- atom(F), file_exists(F).
1531
1532 valid_category(Cat) :-
1533 cli_testcase(_Id, TestCategories, _Infos, _Arguments, _Comment),
1534 member(Cat, TestCategories).
1535
1536
1537 % ---------------------------
1538
1539 % run test using binary
1540 % binaries are registered in a file probcli_bak/codespeed_versions.pl
1541 % results for test ID are written to probcli_bak/codespeed/res_ID.csv
1542
1543 :- use_module(probsrc(tools), [ajoin/2,ajoin_with_sep/3]).
1544 codespeed_id(Ids) :- codespeed_id(Ids,[min_date(date(2020,1,1))]).
1545 codespeed_id([],_) :- !.
1546 codespeed_id([H|T],Options) :- !, codespeed_id(H,Options),!, codespeed_id(T,Options).
1547 codespeed_id(Cat,Options) :- atom(Cat),!,
1548 get_tests_for_categories([Cat],List),
1549 codespeed_id(List,Options).
1550 codespeed_id(testcase(Id, _, _, _, _),Options) :- !, codespeed_id(Id,Options).
1551 codespeed_id(Id,Options) :-
1552 use_module('probcli_bak/codespeed_versions.pl'), % contains probcli_binary
1553 cli_testcase(Id,_TestCategories,_TestInfos,Arguments,Comment),
1554 format('Benchmarking test ~w using probcli binaries ~w~nTest Description: ~w~n',[Id,Options,Comment]),
1555 maplist(convert_arg_to_atom,Arguments,Args2),
1556 findall(probcli_binary(V1,V2,V3,F,Path,Sics,Date),
1557 get_probcli_binary_info(Options,V1,V2,V3,F,Path,_Hash,Sics,Date),Binaries),
1558 Repeats=3,
1559 maplist(bench_probcli_binary_testcase(Id,Args2,Repeats),Binaries,Walltimes,Oks),
1560
1561 ajoin(['codespeed/res_',Id,'.csv'],FName),
1562 absolute_file_name(FName,AF,[relative_to(probcli_bak)]),
1563 format('Writing codespeed results for ~w to ~w~n',[Id,AF]),
1564 open(AF,write,Stream),
1565 format(Stream,'\"Test ~w\"~n',[Id]),
1566 format(Stream,'\"~w\"~n',[Comment]),
1567 ajoin_with_sep(Arguments,' ',ArgsStr),
1568 format(Stream,'Command:,\"probcli ~w\"~n~n',[ArgsStr]),
1569 format(Stream,'\"~w.~w.~w-~w\",~w,~w,~w,~w,~w,~w,\"~w\"~n',[v1,v2,v3,f,sics,date,avg,min,max,ok,walltimes]),
1570 maplist(print_codespeed_results(Stream,Repeats),Binaries,Walltimes,Oks),
1571 close(Stream),
1572 maplist(print_codespeed_results(user_output,Repeats),Binaries,Walltimes,Oks).
1573
1574 get_probcli_binary_info(Options,V1,V2,V3,F,Path,Hash,Sics,Date) :-
1575 probcli_binary(V1,V2,V3,F,Path,Hash,Sics,Date), % from codespeed_versions.pl
1576 (( member(min_date(D2),Options), D2 @> Date
1577 ; member(max_date(D3),Options), D3 @< Date
1578 ; member(min_version(V12,V22,V32),Options), v(V12,V22,V32) @> v(V1,V2,V3)
1579 )
1580 -> format('Excluding version ~w.~w.~w-~w ~w~n',[V1,V2,V3,F,Options]),
1581 fail
1582 ; % format('Including version ~w.~w.~w-~w ~w~n',[V1,V2,V3,F,Options]),
1583 true).
1584
1585
1586 %:- use_module(library(statistics),[min_max/3]).
1587 print_codespeed_results(Stream,Repeats,probcli_binary(V1,V2,V3,F,_Path,sicstus(S1,S2,S3),date(Y,M,D)),Walltimes,Ok) :-
1588 sumlist(Walltimes,Sum),
1589 Average is Sum / Repeats,
1590 min_member(Min,Walltimes),
1591 max_member(Max,Walltimes),
1592 ajoin([S1,'.',S2,'.',S3],Sics),
1593 ajoin([Y,'/',M,'/',D],Date),
1594 format(Stream,'\"~w.~w.~w-~w\",~w,~w,~w,~w,~w,~w,\"~w\"~n',[V1,V2,V3,F,Sics,Date,Average,Min,Max,Ok,Walltimes]).
1595
1596 %bench_probcli_binary_testcase(Id,_Args,_Repeats,probcli_binary(V1,V2,V3,F,Path,_,_),Walltimes,Ok) :-
1597 % format('~nDry run test ~w using version ~w.~w.~w-~w (~w)~n',[Id,V1,V2,V3,F,Path]),!, Ok=skipped, Walltimes=[0].
1598 bench_probcli_binary_testcase(Id,Args,Repeats,probcli_binary(V1,V2,V3,F,Path,_,_),Walltimes,Ok) :-
1599 format('~nRunning test ~w using version ~w.~w.~w-~w (~w)~n',[Id,V1,V2,V3,F,Path]),
1600 run_probcli_binary_testcase(Id,Path,Args,_WT,Ok), % run once for parser
1601 rep_bench(Repeats,Id,Path,Args,Walltimes).
1602
1603 rep_bench(0,_,_,_,[]).
1604 rep_bench(Nr,Id,Path,Args,[WT1|WTR]) :- Nr>0,
1605 run_probcli_binary_testcase(Id,Path,Args,WT1,_),
1606 N1 is Nr-1,
1607 rep_bench(N1,Id,Path,Args,WTR).
1608
1609 :- use_module(probsrc(system_call), [system_call/5]).
1610 run_probcli_binary_testcase(Id,Path,Arguments,WT,Ok) :-
1611 statistics(walltime,[Start,_]),
1612 % absolute_file_name('probcli_bak/',BakPath),
1613 % atom_concat(BakPath,Path,Cmd),
1614 absolute_file_name(Path,Cmd,[relative_to(probcli_bak)]),
1615 format(' Test ~w :: ~w ~w~n',[Id,Cmd,Arguments]),
1616 system_call(Cmd,Arguments,_OutputText,ErrText,Exit),
1617 statistics(walltime,[Stop,_]), WT is Stop-Start,
1618 format(' Walltime: ~w ms; ~w~n',[WT,Exit]),
1619 (Exit=exit(0), ErrText = [] -> Ok=true
1620 ; format_error_nl('STD-ERROR (~w):~n~s',[Exit,ErrText]), Ok=false
1621 ).
1622
1623 % convert for system_call/process_create which do not accept numbers:
1624 convert_arg_to_atom(Nr,Atom) :- number(Nr), number_codes(Nr,C),!,atom_codes(Atom,C).
1625 convert_arg_to_atom(A,A).
1626
1627
1628 % --------------------
1629
1630 category_analysis :-
1631 format('Analysing test categories:~n',[]),
1632 findall(Cat-Id,(cli_testcase(Id,Categories,_,_Args,_Comm1),member(Cat,Categories)),L),
1633 sort(L,SL),
1634 keyclumped(SL,Groups), % Groups = [ Cat1 - [TestNr1, ...], Cat2 - [...], ...]
1635 maplist(print_cat_group,Groups).
1636
1637 nr_of_files(Cat,Id,Nr,DistinctNr) :-
1638 findall(File,(cli_testcase(Id,Categories,_,Args,_Comm1),
1639 member(Cat,Categories),
1640 file_in_arguments(File,Args)),Files),
1641 length(Files,Nr),
1642 sort(Files,SFile),
1643 length(SFile,DistinctNr).
1644
1645 print_cat_group(Cat-Tests) :-
1646 Tests = [First|T],
1647 nr_of_files(Cat,_,NrFiles,Distinct),
1648 (T=[]
1649 -> format(' ~w : 1 test : [~w] : ~w files, ~w distinct~n',[Cat,First,NrFiles,Distinct])
1650 ; length(Tests,Len),
1651 last(T,Last),
1652 format(' ~w : ~w tests : [~w .. ~w] : ~w files, ~w distinct~n',[Cat,Len,First,Last,NrFiles,Distinct])
1653 ).
1654