-- collection of top-level input routines for logic scenario files. starts at verifier_invoker
package prynter; -- auxiliary routines for capturing and transforming 'print' statements
var verif_start_time; -- start time for verification
var step_start_time; -- start time for verification of single proof step
var step_time_list,step_kind_list; -- list of times and kinds for verification of steps in proof
var best_time_so_far; -- optimized time, if automated context optimization is used
var retained_line := OM; -- start line of report; retained until disposition known
var step_kind; -- one-character step-kind indicator for reporting
var statement_being_tested; -- to report verification errors
var number_of_statement; -- to report verification errors
var number_of_statement_theorem; -- to report verification errors
var name_of_statement_theorem; -- to report verification errors
var prior_num := 0; -- to track success omissions and modify green intensity
var retain_thm_name := ""; -- retained for error printing
var retain_thm_num := ""; -- retained for error printing
var retain_line_num := 0; -- retained for error printing
var ncharacters_so_far := 0; -- num characters written to secondary file during
var was_open_div := OM; -- flag: is there a prior error division?
var divlist := []; -- list of all error divisions
var err_ohandle; -- handle for detailed error output during second output phase.
var was_syntax_error := false; -- flag: was there a syntax error?
var cycles_inferencing := 0; -- cycles spent in base-level inferencing
var dump_theorems_handle; -- handle for writing theorems to theorem_summary file
var dump_defs_handle; -- handle for writing definitions to definition_summary file
var dump_theories_handle; -- handle for writing theories to theory_summary file
var dump_theorems_handle2; -- handle for writing theorems to theorem_summary2 file
var dump_defs_handle2; -- handle for writing definitions to definition_summary2 file
var dump_theories_handle2; -- handle for writing theories to theory_summary2 file
var dump_theorems_flag; -- flag: should theorems be written to theorem_summary file?
var theorem_count := 0; -- count of theorems during dump
var first_button_call := OM; -- call of first button in formatted error table
var ntheorem_w_proofs := 0; -- count of theorems with proofs plus those defined by top-level 'APPLY' statements
var tpa_to_just_tp_num := []; -- vector, mapping of ntheorem_plus_top_apply number into ntheorem_w_proofs number
var extra_message; -- supplementary part of error message
procedure printy(tup); -- prints a tuple by converting/concatenating to string and then printing the result
procedure set_output_phase(n); -- set the output phase, controlling the target file to which output phase will be written
procedure unicode_unpahrse(tree); -- ******** unparse in one of two modes ********
end prynter;
package body prynter; -- auxiliary routines for capturing and transforming 'print' statements
use logic_parser_globals,sort_pak,string_utility_pak,logic_syntax_analysis_pak; -- to get user_prefix
var ohandle; -- handle for sucess output during second output phase.
var ferr_ohandle; -- handle for formatted error output during second output phase.
var prynter_phase := 1; -- during printer phase 1 we write to standard output; subsequently to user_prefix + "Outfile_" + n
procedure set_output_phase(n); -- set the output phase, controlling the target file to which output phase will be written
-- this allows the prynter_phase to be set back to 1, but not back to any other value
if prynter_phase = n then return; end if; -- since the phase is not actually changing
if prynter_phase > 1 then
if prynter_phase /= 2 then printa(ohandle,"<P>*** Completed output phase ",prynter_phase,"<P>"); end if;
close(ohandle); close(ferr_ohandle);
else
print("<P>*** Completed output phase ",prynter_phase,"<P>");
end if;
if n > 1 then
ohandle := open(user_prefix + "Outfile_" + n,"TEXT-OUT");
err_ohandle := open(user_prefix + "Err","TEXT-OUT");
ferr_ohandle := open(user_prefix + "Ferr","TEXT-OUT");
end if;
prynter_phase := n;
end set_output_phase;
-- This organizes printing and output into two 'phases'. During the first phase, printing of proofs is direct;
-- during the second phase, messages received are separated into a report stream and a error stream, and formatted
-- into html tables.
procedure printy(tup); -- prints a tuple by converting/concatenating to string and then printing the result
-- During phase 2 this sorts the printed output into 'success' and 'error' streams,
-- formatting them both and directing them to different files.
-- 'error' stream formatting works as follows: Lines starting with '++++++++' are suppressed,
-- but the theorem name and line number are extracted from them and retained.
-- Then, when a line containing '****** Error verifying step:' is encountered,
-- we extract the step number, and (from the next two lines) the offending statement and the
-- 'remark' describing the offense. Subsequent material from the same error record is
-- written to a secondary error file, and we keep track of the starting and ending characters
-- of the section of this file that corresponds to each verification error.
-- The fields describing the error line itself are assembled into an HTML table row string of the form
--print(tup); return;
-- <TR><TD BGCOLOR = "#ff8888" rowspan = 2><Input type ="radio" name ="pick" onclick ="butt_click('usr',1,100);">
-- </TD><TD align = center rowspan = 2>182</TD><TD rowspan = 2>Theorem 116</TD>
-- <TD align = center rowspan = 2>14</TD><TD align = center rowspan = 2>F</TD>
-- <TD BGCOLOR = "#ffdddd">(m,n - m) --> T189 ==> #(m + (n - m)) = m •PLUS (n - m)</TD>
-- </TR><TR><TD>Explanatory remark</TD></TR>
-- The .php server code associated with error display prefixes these generated lines with header lines having the form
-- <HTML><HEAD><Title>EtnaNova Preliminary - Verification Failures Report</Title></HEAD> <BODY><B>
-- <FORM name = "etna_nova_errform" method = "POST" enctype = "multipart/form-data">
-- <table border="1" cellpadding = 5>
-- <TR><TD></TD><TD BGCOLOR = '#ff8888'>Thm. No.</TD><TD BGCOLOR = '#ff8888'>Thm. Name</TD>
-- <TD BGCOLOR = '#ff8888'>Line No.</TD><TD BGCOLOR = '#ff8888'>F/A</TD><TD align = center BGCOLOR = '#ff8888'>Line</TD></TR>"
-- and then echoes all the preceding lines to the client, followed by a concluding block of the form
-- </Table></FORM></B>
-- <script>
-- function butt_click(usr,st,nd) {
-- dfee = document.forms['etna_nova_errform'];
-- dfee.action = 'etna_nova_err_dets.php?usr,st,nd';
-- dfee.submit();}
-- </script>
-- </BODY></HTML>
if prynter_phase = 1 then -- during the first phase output is simply directed to the standard ouput stream
print("<BR>" +/[str(x): x in tup]); -- the components of the tuple are simply converted into strings and concatenated
else -- during phase one, output is directed to a designated file
-- lines relating to success and to timing are written to the second phase output file;
-- lines related to error are written to the error file. These files are written
-- in tabular formats for display
if (t1 := tup(1)) /= OM and #str(t1) > 39 and t1(1..40) = "\n+++++++++++ starting verifications for:" then -- we have a 'holdback' line
-- A line in this format is written at the start of verification for each theorem that gives the
-- theorem name and number separated by a '#' sign.
tup(1) := "\n\n++++++++" + t1(41..); -- abbreviate the prefix
t1 := ("" +/tup)(3..); -- bypass the eol's
span(t1,"+ "); name_and_no := break(t1," "); -- get theorem name and number
[retain_thm_name,retain_thm_num] := breakup(name_and_no,"#");
if retained_line /= OM then printa(ohandle,retained_line); end if; -- print any previous 'starting' line
retained_line := "" +/[str(x): x in tup]; -- retain the new 'starting' line
else -- otherwise simply print the line, along with any previously retained line
-- but if this line starts with " OKtime: " or " NOKtime: " then process the line into HTML table form
if (t1 := tup(1)) = " OKtime: " or t1 = " NOKtime: " then
-- Lines of this form are issued at the start of each proof, giving the number of steps of the proof,
-- the proof time, detailed timing and step kind information for all proof steps, and indicating
-- success or failure of each step.
printa(ohandle,convert_to_row(if retained_line = OM then "" else retained_line end if +/[str(x): x in tup]));
retained_line := OM; -- and in this case the retained line has been used up
else -- we have an error header or detail line
-- error lines are written to the 'Err_' file, but the formatted error lines are written to the
-- 'Ferr_' file. We keep track of the 'Err_' file starting position of each item,
-- so that this portion can be displayed when a radio button in the error summary table is selected.
nt1 := #t1; -- get length of t1
-- step verification failure always issues a message starting with "\n****** Error verifying step:"
-- and continuing with a step number and indication of whether the step failed or was abandoned
if nt1 >= 29 and t1(1..29) = "\n****** Error verifying step:" then -- we have a new error indication
[l1,offending_statement,comment] := breakup(("" +/+/[str(x): x in tup])(30..),"\n");
-- bypass the '\n****** Error verifying step:'
span(l1,"\t "); stepno := break(l1,"\t "); -- get the step number
-- now determine if we have an 'Abandoned' case
fa_flag := if #comment >= 10 and comment(1..10) = "\nAbandoned" then "A" else "F" end if;
-- The information provided by the error tuple is used to set up the lines of the error table giving the
-- theorem number, name, the line number of the error, failure/abandoned indication and summary comment
-- failure.
-- Each such line is prefixed by a radio button, which, if clicked, brings up more detailed
-- information concerning the error.
-- Compose the formatted lines for the error summary;
-- enclose the information items in HTML tags
rbutton_part := "<TR><TD BGCOLOR = '#ff8888' rowspan = 2><Input type ='radio' name ='pick' " +
if first_button_call /= OM then "" else " checked " end if + -- set first error button on
"onclick ='butt_click(\"S" + stepno + "." + retain_thm_name + "\");'>";
-- keep first button-click command
first_button_call ?:= "butt_click(\"S" + stepno + "." + retain_thm_name + "\");";
thmno_part := "</TD><TD align = center rowspan = 2>" + retain_thm_num?"?" + "</TD>";
thmname_part := "<TD rowspan = 2>" + retain_thm_name?"?" + "</TD>";
stepno_part := "<TD align = center rowspan = 2>" + stepno?"?" + "</TD>";
fa_part := "<TD align = center rowspan = 2>" + fa_flag + "</TD>";
-- badline_part := "<TD BGCOLOR = '#ffdddd'>" + unicode_unpahrse(parse_expr(offending_statement + ";")) + "</TD>";
badline_part := "<TD BGCOLOR = '#ffdddd'>" + offending_statement + "</TD>";
comment_part := "</TR><TR><TD>" + comment + "</TD></TR>";
-- assemble the pieces
formatted_line := rbutton_part + thmno_part + thmname_part + stepno_part + fa_part + badline_part + comment_part;
printa(ferr_ohandle,formatted_line);
-- end the prior testarea and division in the 'Err' file. Note that EtnaNova_main.php must set up
-- an inital dummy textarea and div, and must close the last textarea and div set up by this SETL code
if was_open_div /= OM then
printa(err_ohandle,"</div>"); -- and set up a new division with Id = Sstepno.thmname
-- this is exactly the id referenced in the corresponding error radiobutton set up above
end if;
divlist with:= (dvid := "S" + stepno + "." + retain_thm_name); -- retain name of new division
printa(err_ohandle,"<div id ='" + dvid + "'>"); -- set up new division
was_open_div := true; -- which will need to be closed
--printa(err_ohandle,"<textarea cols = 100 rows = 300>"); -- and up a new textarea in this division (dont use textarea)
end if;
-- write all the error information, including the header lines just processed, to the 'Err' file
printa(err_ohandle,"<BR>",stg_written := if retained_line = OM then "" else retained_line end if + " " +/[str(x): x in tup]);
end if;
end if;
end if;
end printy;
procedure convert_to_row(stg); -- convert normal succcess output string to HTML row format
span(stg," \t\n"); -- format of stg is like ++++++++ T0#1 -- 1554 OKtime: 9[0, 1, 5, 1]["P", "S", "D"]; the tuple being step times (possibly NOKtime:)
pref := match(stg,"++++++++"); -- check start of string
if pref = "" then return ""; end if; -- process only rows in expected format, suppress others
span(stg," \t"); -- advance to the 'T' field
thmsig := break(stg," \t"); -- break out the 'T' field
span(stg," \t-0123456789"); -- span past irrelevant time field
ok_nok := match(stg,"OKtime: "); -- advance to the times field
ok_nok := match(stg,"NOKtime: "); -- advance to the times field
stepkinds := rbreak(stg,"["); stepkinds := unstr("[" + stepkinds); rmatch(stg,"[");
--print("<P>stepkinds: ",stepkinds);
thmtime := break(stg,"["); -- break out the total theorem time
-- for theorems verified very rapidly, suppress time portion of report
[name,num] := breakup(thmsig,"#"); -- get the theorem name and number
if unstr(thmtime)?0 < 20 then -- theorem proof is short; don't give step details
res := "<TR><TD BGCOLOR = " + if ok_nok = "NOKtime: " then "'#ff8888'" elseif prior_num = unstr(num?"-1000") - 1 then "'#88ff88'" else "'#00dd00'" end if +
" align = 'center'>" + join([num,name],"</TD><TD align = 'center'>") + "</TD></TR>";
prior_num := unstr(num?"1000");
return res; -- done with this case
end if;
-- otherwise the theorem time is long enough for the step times to be reported in some detail
nsteps := #(time_list := unstr(stg)?"[]"); -- convert the step time list to a numericl tuple
time_list := merge_sort([[-t,j]: t = time_list(j) | t > 6]); -- sort it in descending time order
time_list := time_list(1..#time_list min 10); -- take at most 10 of the longest step times
time_list := merge_sort([[j,-t]: [t,j] in time_list]); -- rearrange into original order
time_list := ["<TD align = 'center'" + if t > 500 then " BGCOLOR = '#aaaaff' " elseif t > 100 then " BGCOLOR = '#ffaaaa' " elseif t > 50 then " BGCOLOR = '#dddddd' " else "" end if + ">" + stepkinds(j) + str(j) + ": " + t: [j,t] in time_list]; -- convert to string
res := "<TR><TD BGCOLOR = " + if ok_nok = "NOKtime: " then "'#ff8888'" elseif prior_num = unstr(num) - 1 then "'#88ff88'" else "'#00dd00'" end if + " align = 'center'>" +
join(([num,"<TD align = 'center'>" + name] with ("<TD align = 'center'>#" + str(nsteps) + ": t" + thmtime)) + time_list,"</TD>") + "</TD></TR>";
prior_num := unstr(num);
return res;
end convert_to_row;
procedure unicode_unpahrse(tree); -- ******** unparse in one of two modes ********
return if running_on_server then unicode_unparse(tree) else unparse(tree) end if;
end unicode_unpahrse;
end prynter;
-- *******************************************
-- ******* Overall Survey of this file *******
-- *******************************************
--
--The collection of routines which follow falls into the following principal sections and subsections:
--
--(A) Top level parsing procedures
--
-- [A.1] Main program for scenario decomposition into numbered sections
-- division of proof lines into 'hint' and 'body' portions
-- [A.2] Collection and numbering of theory sections
-- [A.3] Enforcement of special Ref syntactic/semantic rules
-- [A.4] Interfaces to the native SETL parser
--
--(B) Master control of in-proof inferencing and related utilities
--
-- [B.0] Initial read-in of digested proof and theory related files
-- [B.1] Master entry for checking inferences in a single proof
-- [B.2] conjunction building for inference checking
-- [B.3] Definition checking
-- Checking of recursive definitions
-- [B.4] Removal of internal labels from proof lines
-- [B.5] Interface to the MLSS routines
-- [B.6] Routine to disable/enable checking of particular classes of inference
--
--(C) Library of routines which handle details of individual inference classes
--
-- [C.1] Discharge inference checking
-- [C.2] Statement citation inference checking
-- [C.3] Theorem citation inference checking
-- [C.4] Equality inference checking
-- [C.5] Algebra inference checking
-- [C.6] Simplification inference checking
-- [C.7] Use_def inference checking
-- [C.8] Suppose_not inference checking
-- [C.9] Monotonicity inference checking
-- [C.10] Loc_def inference checking
-- [C.11] Assumption inference checking for theorems within theories
-- [C.12] THEORY application inference checking (outside proofs)
-- APPLY conclusion checking (for THEORY application outside proofs)
-- [C.12a] THEORY application inference checking (within proofs)
-- Syntax checking for THEORY application
-- Analysis of APPLY hints within proofs
-- Finding the variables to be substituted for APPLY _thryvars
-- [C.13] Skolemization inference checking (outside proofs)
-- [C.13a] Skolemization inference checking (within proofs)
--
--(D) Interfacing to external provers
--
-- [D.1] Interface to 'Otter' prover
--
--(E) Miscellaneous utilities
--
-- [E.1] Formula feature search utilities
-- [E.2] Utilities for statistical analysis of the proof scenarios
-- [E.3] Code for automated optimization of proof scenarios
--
--(F) Test Program collection for Ref top level routines
-- ******************************************************************
-- *************** Detailed Index, showing procedures ***************
-- ******************************************************************
--(A) ***** Top level parsing procedures *****
--
-- procedure parse_scenario(file_name,first_proof,last_proof); -- parse a specified Defs_w_proofs file
-- procedure parse_Defs_w_proofs(lines_tup); -- parse the Defs_w_proofs file
-- [A.1] Main program for scenario decomposition into numbered sections
-- procedure parse_file(lines_tup); -- extracts the sequence of definitions, theorems, proofs, and theories from a scenario file
-- division of proof lines into 'hint' and 'body' portions
-- procedure digest_proof_lines(proof_sect_num); -- finds location of ==> in string, if any; position of last character is returned
-- procedure theorem_text(sect_no); -- gets stripped text of theorem
-- procedure definition_text(sect_no); -- gets stripped text of definition
-- [A.2] Collection and numbering of theory sections
-- procedure take_section(now_in,current_section); -- collects section
-- [A.3] Enforcement of special Ref syntactic/semantic rules
-- procedure tree_check(node); -- checks that there are no compound functions, enomerated tuples of length > 2, and unrestricted iterators in setformers
-- procedure get_theorem_name(stg); -- extracts theorem name from string
-- procedure check_theorem_map(); -- check the syntax of all the theorems written to the theorem_map_file
-- [A.4] Interfaces to the native SETL parser
-- procedure parze_expr(stg); -- preliminary printing/diagnosing parse
-- procedure pahrse_expr(stg); -- parse with check of syntactic restrictions
-- procedure collect_fun_and_pred_arity(node,dno); -- collect the arity of functions and predicates (main entry)
-- procedure collect_fun_and_pred_arity_in(node,bound_vars); -- collect the arity of functions and predicates (workhorse)
--(B) ***** Master control of in-proof inferencing and related utilities *****
--
-- [B.0] Initial read-in of digested proof and theory related files
--
-- procedure read_proof_data(); -- ensure that the list of digested_proofs,
-- procedure init_proofs_and_theories();
-- procedure check_proofs(list_of_proofs); -- check ELEM and discharge inferences in given range
-- [B.1] Master entry for checking inferences in a single proof
-- procedure check_a_proof(proofno); -- read a given proof and check all its inferences
-- [B.2] conjunction building for inference checking
-- procedure form_elem_conj(hint,statement_stack); -- build conjunction to use in ELEM-type deductions
-- procedure build_conj(citation_restriction,statement_stack,final_stat); -- context-stack to conjuction conversion for inferencing in general
-- [B.3] Definition checking
-- procedure check_definitions(start_point,end_point); -- check the definition citation inferences in the indicated range
-- Checking of recursive definitions
-- procedure recursive_definition_OK(symbdef,arglist,right_part); -- check a recursive definition for syntactic validity
-- procedure recursive_definition_OK_in(node,var_bindings); -- check a recursive definition for syntactic validity (inner workhorse)
-- [B.4] Removal of internal labels from proof lines
-- procedure drop_labels(stg); -- finds location of Statnnn: in string, if any. These labels are dropped, and positions of first characters are returned
-- [B.5] Interface to the MLSS routines
-- procedure test_conj(conj); -- test a conjunct for satisfiability
-- procedure show_conj();
-- procedure conjoin_last_neg(stat_tup); -- invert the last element of a collection of clauses and rewrite using 'ands'
-- procedure conjoin_last_neg_nosemi(stat_tup);
-- procedure conjoin_last(stat_tup); -- rewrite a collection of clauses and using 'ands'
-- procedure collect_conjuncts(tree); -- extract all top-level terms from a conjunction
-- procedure collect_equalities(tree); -- extract all top-level collect_equalities from a conjunction
-- [B.6] Routine to disable/enable checking of particular classes of inference
-- procedure disable_inferences(stg); -- disables and re-enables the various kinds of inferences
--(C) ***** Library of routines which handle details of individual inference classes *****
--
-- [C.1] Discharge inference checking
-- procedure check_discharge(statement_stack,prior_suppose_m1,stat_in_discharge,discharge_stat_no,hint);
-- [C.2] Statement citation inference checking
-- procedure check_a_citation_inf(count,statement_cited,statement_stack,hbk,citation_restriction,piece);
-- [C.3] Theorem citation inference checking
-- procedure check_a_tsubst_inf(count,theorem_id,statement_stack,hbk,piece,statno); -- check a single tsubst inference
-- [C.4] Equality inference checking
-- procedure check_an_equals_inf(conj,stat,statement_stack,hint,stat_no); -- handle a single equality inference
-- procedure extract_equalities(conj); -- extract and arrange all top-level equalities and equivalences from a conjunction
-- [C.5] Algebra inference checking
-- procedure check_an_algebra_inf(conj,stat); -- handle a single algebraic inference
-- procedure alg_node_bottoms(tree,plus_op); -- return list of algebraic node base elements for given node
-- procedure alg_node_bottoms_in(tree,plus_op); -- recursive workhorse
-- [C.6] Simplification inference checking
-- procedure check_a_simplf_inf(statement_stack,stat,stat_no,hint,restr); -- handle a single set-theoretic standardization inference
-- [C.7] Use_def inference checking
-- procedure check_a_use_def(statement_stack,stat,theorem_id,hint,j); -- check a single Use_def inference
-- procedure get_symbol_def(symb,thry); -- get the definition of a symbol, in 'thry' or any of its ancestors
-- [C.8] Suppose_not inference checking
-- procedure check_suppose_not(supnot_trip,pno,th_id); -- check the suppose_not inferences in the indicated range
-- [C.9] Monotonicity inference checking
-- procedure check_a_monot_inf(count,statement_stack,hbk,theorem_id); -- check a single Set_monot inference
-- [C.10] Loc_def inference checking
-- procedure check_a_loc_def(statement_stack,theorem_id); -- check a single Loc_def inference
-- [C.11] Assumption inference checking for theorems within theories
-- procedure check_an_assump_inf(conj,stat,theorem_id); -- handle a single 'Assumption' inference
-- [C.12] THEORY application inference checking (outside proofs)
-- procedure check_an_apply_inf(next_thm_name,theory_name,apply_params,apply_outputs);
-- procedure check_apply_syntax(text_of_apply); -- special processing for global definitions by "APPLY"
-- APPLY conclusion checking (for THEORY application outside proofs)
-- procedure test_conclusion_follows(next_thm_name,desired_concl); -- test desired conclusion of a top-level APPLY inference
-- [C.12a] THEORY application inference checking (within proofs)
-- procedure check_an_apply_inf_inproof(thm_name,stat_stack,theory_name,apply_params,apply_outputs);
-- Syntax checking for THEORY application
-- procedure test_apply_syntax(theory_name,apply_params,apply_outputs);
-- Analysis of APPLY hints within proofs
-- procedure decompose_apply_hint(hint); -- decomposes the 'hint' portion of an APPLY statement
-- Finding the variables to be sustituted for APPLY _thryvars
-- procedure get_apply_output_params(apply_outputs,hint); -- decompose and validate apply_outputs, returning them as a tuple of pairs
-- [C.13] Skolemization inference checking (outside proofs)
-- procedure check_a_skolem_inf(next_thm_name,theorem_to_derive,apply_params,apply_outputs);
-- procedure check_skolem_conclusion_tree(desired_conclusion,apply_params,apply_outputs);
-- procedure build_Skolem_hypothesis(desired_conclusion,tree,split_apply_outputs);
-- [C.13a] Skolemization inference checking (within proofs)
-- procedure check_a_Skolem_inf_inproof(stat_stack,theory_name,apply_params,apply_outputs);
-- procedure conclusion_follows(theory_name,apply_outputs,apply_params_parsed,conclusion_wanted);
-- procedure build_required_hypothesis(hypotheses_list,apply_params_parsed);
-- procedure def_as_eq(symb,args_and_def); -- rebuild a definition as an equality or equivalence
-- procedure tree_is_boolean(tree); -- test a tree to see if its value is boolean
--(D) ***** Interfacing to external provers *****
--
-- [D.1] Interface to 'Otter' prover
--
-- procedure check_an_external_theory(th,assumed_fcns,assumps_and_thms); -- check declaration of an external THEORY
-- procedure otter_clean(line); -- removes otter comments and forumla_to_use lines
-- procedure check_an_otter_theory(th,assumed_fcns,assumps,thms); -- check declaration of an otter THEORY
-- procedure suppress_thryvar(stg); -- suppresses all instances of '_THRYVAR' in string
-- procedure otter_to_ref(otter_item,otfile_name); -- converts an otter item to SETL syntax; returns unparsed tree or OM
--(E) ***** Miscellaneous utilities *****
-- [E.1] Formula feature search utilities
-- procedure find_free_vars_and_fcns(node); -- find the free variables and function symbols in a tree (main entry)
-- procedure find_free_vars_and_fcns_in(node,bound_vars); -- find the free variables in a tree (recursive workhorse)
-- procedure find_quantifier_bindings(node); -- find the variable bindings at the top of an iterator tree
-- procedure list_of_vars_defined(theory_in,kind_hint_stat_tup); -- find the ordered list of variables defined in a proof
-- procedure trim_front(stg); -- removes leading whitespace
-- procedure front_label(stg); -- finds prefixed Statnnn: in string, if any and returns it
-- procedure loc_break(stg); -- finds location of ==> in string, if any; returns pos. of last character
-- procedure split_at_bare_commas(stg); -- splits a string at commas not included in brackets
-- procedure trim(line); -- trim off whitespace
-- procedure paren_check(stg); -- preliminary parenthesis-check
-- procedure find_defined_symbols(); -- extracts the sequence of definitions, theorems, and theories from a scenario file
-- procedure strip_quants(tree,nquants); -- strip a specified number of quantifiers from a formula
-- procedure strip_quants_in(tree,nquants); -- strip a specified number of quantifiers from a formula
-- procedure setup_vars(il,offs); -- 'offs' flags nature of variable returned (universal or existential)
-- procedure conjoin_iters(list_of_iters,quant_body); -- conjoin list of iterators to formula body
-- procedure ordered_free_vars(node); -- find the free variables in a tree, in order of occurrence (main entry)
-- procedure ordered_free_vars_in(node,bound_vars); -- find the free variables in a tree (recursive workhorse)
-- procedure remove_arguments(node,fcn_list); -- remove arguments from list of functions
-- procedure remove_arguments_in(node,fcn_list,bound_vars); -- remove arguments from list of functions (recursive workhorse)
-- procedure symbol_occurences(tree,symbol_list);
-- procedure symbol_occurences_in(node,symbol_list,bound_vars);
-- procedure free_vars_and_fcns(thm); -- get all the free variables and functions of a theorem
-- procedure fully_quantified(theory_nm,thm); -- construct the fully quantified form of a theorem in a theory
-- procedure fully_quantified_external(theory_nm,thm);
-- procedure freevars_of_theorem(theory_nm,thm); -- find the quantifiable free variables of a theorem, given theory
-- procedure not_all_alph(stg);
-- [E.2] Utilities for statistical analysis of the proof scenarios
-- procedure get_hints(proofno1,proofno2); -- examine the hints which occur in a given range of proofs and report statistics
-- procedure view_theorem_citations(proofno1,proofno2); -- count the number of theorem citations in a given range, and print them
-- procedure inspect_proofs(tup_of_numbers); -- inspect a specified list of proofs, from digested_proof_file
--(F) ***** Test Program collection for Ref top level routines *****
-- procedure do_tests3(); -- do tests for this package
-- procedure test_check_a_skolem_inf; -- test of check_a_skolem_inf function
-- procedure test_check_an_apply_inf; -- test of check_an_apply_inf function
--
-- *******************************************
-- *************** Main Package **************
-- *******************************************
package verifier_top_level; -- top level routines of logic verifier
var statement_tuple_being_searched,known_criticals; -- full tuple of statements being searched during automated context optimization
var save_optimizing_now,optimizing_now := false; -- flag indication optimization, suppresses error diagnosis
var extra_conj; -- supplementray clause, to be added during search
var show_details := false;
var detail_limit := 300000; -- cutoff for statement citation examination
var error_count := 0; -- accumulated error count for a single proof, to be reported
var total_err_count := 0,total_fully_verified_proofs := 0; -- total error and correct proof count for reporting
var disable_elem := false, disable_tsubst := false, disable_algebra := false, disable_simplf := false;
var disable_discharge := false,disable_citation := false,disable_monot := false,disable_apply := false;
var disable_loc_def := false,disable_equal := false,disable_use_def := false,disable_assump := false;
-- flags for disabling various kinds of inference during testing
-- globals for the THEORY mechanism
var parent_of_theory := {},last_theory_entered := "Set_theory",theors_defs_of_theory := {};
var theory_of := {}; -- maps each definition and theorem into its theory
var theory_of_section_no := {}; -- maps each section into its theory
var show_error_now := true; -- flag for error print in test_conj
var theorem_list,theorem_name_to_number; -- list of theorem names in order, and inverse
var defs_of_theory := {},defsymbs_of_theory := {},defconsts_of_theory := {};
var def_in_theory := {}; -- maps theory name into set of symbols defined in theory
var theorem_templates := OM; -- for automatic analysis of theorem citations
var topsym_to_tname := {}; -- maps top symbol of theorem conclusion conjunct to name of theorem
var def_tuple; -- the tupe of pairs written to definition_tup_file
var use_proof_by_structure := false; -- switch: should proof_by_structure be used in the current proof?
const lowcase_form := {["#","ast_nelt"],["DOMAIN","ast_domain"],["RANGE","ast_range"],["POW","ast_pow"],["AST_ENUM_TUP","ast_enum_tup"],["[]","ast_enum_tup"]};
var assumps_and_consts_of_theory := { -- Set_theory is initial endowment
["Set_theory",[["arb(x)","s_inf"],["(FORALL x in OM | ((x = 0) and (arb(x) = 0)) or ((arb(x) in x) & (arb(x) * x = 0)))",
"s_inf /= 0 and (FORALL x in s_inf | {x} in s_inf)"],["arb(x)","s_inf"]]]};
-- **********************************************************************************************
-- ********** the MASTER SCENARIO PARSING ENTRY; does syntax checking **********
-- ********** This produces all the files used subsequently for verification checking: **********
-- ********** namely the suppose_not_file, digested_proof_file,theorem_map_file **********
-- ******** See comment at the start of 'parse_file' for details of scenario file syntax ********
-- **********************************************************************************************
procedure verifier_invoker(uid); -- master invocation routine
-- procedure parse_scenario(file_name,first_proof,last_proof); -- parse a specified Defs_w_proofs file
-- procedure parse_Defs_w_proofs(lines_tup); -- parse a raw scenario, producing all the files used subsequently
procedure disable_inferences(stg); -- disables and re-enables the various kinds of inferences
-- include a '*' in the list to disable by default
procedure find_defined_symbols(); -- extracts the sequence of definitions, theorems, and theories from a scenario file
procedure collect_fun_and_pred_arity(node,dno); -- collect the arity of functions and predicates (main entry)
procedure test_conj(conj); -- test a conjunct for satisfiability
procedure check_proofs(list_of_proofs); -- check all inferences in given range
procedure check_suppose_not(supnot_trip,pno,th_id); -- check the indicated suppose_not inference (initial statements of proof)
procedure check_definitions(start_point,end_point); -- check the definitions in the indicated range
procedure check_an_equals_inf(conj,stat,statement_stack,hint,stat_no); -- handle a single equality inference
-- *********** lower-level routines ***********
procedure strip_quants(tree,nquants); -- strip a specified number of quantifiers from a formula
-- *********** auxiliaries for having a glimpse at the digested_proof_file and related items ***********
procedure inspect_proofs(tup_of_numbers); -- inspect a specified list of proofs, from digested_proof_file and the
procedure get_hints(proofno1,proofno2); -- read the hints which occur in a given proof
procedure view_theorem_citations(proofno1,proofno2); -- count the number of theorem citations in a given range, and print them
-- *********** procedures for APPLY inference checking ***********
procedure check_a_skolem_inf(next_thm_name,theorem_to_derive,apply_params,apply_outputs);
-- checks a skolem inference
procedure check_an_apply_inf(next_thm_name,theory_name,apply_params,apply_outputs);
-- checks a non-skolem APPLY inference
procedure ordered_free_vars(node); -- find the free variables in a tree, in order of occurrence (main entry)
-- *********** procedures for ALGEBRA inference checking ***********
procedure check_an_algebra_inf(conj,stat); -- handle a single algebraic inference
procedure alg_node_bottoms(tree,plus_op); -- return list of algebraic node base elements for given node
-- *********** procedures for external prover interface ***********
procedure otter_to_ref(otter_item,otfile_name); -- converts an otter item to SETL syntax
-- ********** Miscellaneous utilities. **********
procedure read_range(stg); -- convert to list of proofs to be printed; force range indicator to legal form and return it
-- procedure first_last(stg); -- reduce dotted ranges to first and last elements, and read
-- *********** test procedures ***********
procedure miscellaneous_tests; -- repository for miscellaneous top-level logic verifier tests under development
end verifier_top_level;
package body verifier_top_level; -- top level routines of logic verifier
-- *********** auxiliaries for having a glimpse at the digested_proof_file and the ***********
-- procedure inspect_proofs(tup_of_numbers); -- inspect a specified list of proofs, from digested_proof_file
-- ********** Decomposition of scenario lines into hints, labels, formula, etc. **********
-- procedure digest_proof_lines(proof_sect_num); -- finds location of ==> in string, if any; position of last character is returned
-- procedure drop_labels(stg); -- finds location of Statnnn: in string, if any. These labels are dropped, and positions of first characters are returned
-- procedure front_label(stg); -- finds prefixed Statnnn: in string, if any and returns it; otherwise returns an empty string
-- procedure loc_break(stg); -- finds location of ==> in string, if any; position of last character is returned
-- procedure theorem_text(sect_no); -- gets stripped text of theorem
-- procedure definition_text(sect_no); -- gets stripped text of definition
-- procedure take_section(now_in,current_section); -- collects section
-- procedure split_at_bare_commas(stg); -- splits a string at commas not included in brackets
-- procedure get_hints(proofno1,proofno2); -- read the hints which occur in a given proof
-- procedure view_theorem_citations(proofno1,proofno2); -- count the number of theorem citations in a given range, and print them
-- ********** Collect symbols used and their properties. **********
-- procedure collect_fun_and_pred_arity(node,dno); -- collect the arity of functions and predicates (main entry)
-- procedure collect_fun_and_pred_arity_in(node,bound_vars); -- collect the arity of functions and predicates (workhorse)
-- procedure find_defined_symbols(); -- extracts the sequence of definitions, theorems, and theories from a scenario file
-- procedure parse_file(lines_tup); -- extracts the sequence of definitions, theorems, proofs, and theories from a scenario file
-- procedure parze_expr(stg); -- preliminary printing/diagnosing parse
-- procedure check_theorem_map(); -- check the syntax of all the theorems written to the theorem_map_file
-- ********** inference tests, to be moved to logic_parser_pak after initial tests **********
-- procedure check_suppose_not(supnot_trip,pno,th_id); -- check the indicated suppose_not inference (initial statements of proof)
-- procedure check_a_proof(proofno); -- read a given proof and check its ELEM conclusions
-- procedure conjoin_last_neg(stat_tup); -- invert the last element of a collection of clauses and rewrite using 'ands'
-- procedure conjoin_last_neg_nosemi(stat_tup); -- invert the last element of a collection of clauses and rewrite using 'ands'
-- procedure check_discharge(statement_stack,prior_suppose_m1,stat_in_discharge,discharge_stat_no,hint); -- checks a discharge operation
use string_utility_pak,get_lines_pak,parser,logic_syntax_analysis_pak,sort_pak,proof_by_computation,logic_syntax_analysis_pak2;
use logic_parser_globals,prynter,algebraic_parser,proof_by_structure; -- need access to parser global flags,etc.
const oc_per_ms := 8000; -- approximate SETL opcodes per millisecond on windows server
const refot := {["<->"," ¥eq "],["->"," ¥imp "],["!="," /= "],["|"," or "]}; -- for conversion of Otter syntax to Ref
-- const refot := {["<->"," •eq "],["->"," •imp "],["!="," /= "],["|"," or "]}; -- for conversion of Otter syntax to Ref (Mac version)
const definition_section := 1, theorem_section := 2, proof_section := 3, theory_section := 4, apply_section := 5, enter_section := 6, unused_section := 10;
-- constant designating the kind of section we are currently in
const otter_op_precedence := {["-",11],["->",6],["<->",6],["|",7],["&",8],["=",10],["!=",10],["",22],
[",",1],["(",-1],["[",-1],["{",-1],["$0",30],["$1",30],["$2",30]};
-- otter op precedences
const otter_can_be_monadic := {"-"}; -- otter negation
const otter_refversion := {["-","ast_not"],["|","ast_or"],["->","DOT_IMP"],["<->","DOT_EQ"],["","ast_of"],
["=","ast_eq"],[",","ast_list"],["&","AMP_"],["!=","/="]};
-- globals used in 'parse_file' and 'take_sections'. See comment in 'take_sections' for explanation
var sections,def_sections,theorem_sections,proof_sections,theory_sections,apply_sections,enter_sections;
var enter_secno_map := {}; -- maps theory names into section numbers of statements which ENTER the THEORY
var num_sections := 0,last_thm_num;
var is_auto; -- flag for the 'auto' special case
var auto_gen_stat; -- for return of AUYTO generated statements during citation inferencingg
var was_just_qed := false,incomplete_proofs := []; -- global flag for detecting proof ends; list of unfinished proofs
var arity_symbol := {},symbol_definition := {}; -- map each defined symbol into its arity and definition
var symbol_def := {}; -- map each defined symbol into its arity and definition
var left_of_def := {}; -- map each defined symbol defined by DEF() into the parsed left-hand-side of its definition
var left_of_def_found; -- nontivial left-hand side found on definition lookup
var current_def; -- current definition global for collect_fun_and_pred_arity
var try_harder := false,squash_details := false; -- flag to allow extra time in satisfiability search
var ok_counts := ok_discharges := 0,nelocs,ndlocs; -- counts of verified cases
var conjoined_statements,neglast; -- elements of conjunction, for listing on failure
var tested_ok; -- indicates result of conjunction satisfiability test
var current_proofno; -- for selective debugging
var all_free_vars,all_fcns; -- globals for 'find_free_vars_and_fcns' function
var first_proof_to_check,last_proof_to_check; -- limits for range of proofs to be checked syntactically
var digested_proof_handle := OM,theorem_map_handle := OM,theorem_sno_map_handle := OM,theory_data_handle := OM;
-- handle for file of digested proofs, theorem_map, theorem_sno_map
var digested_proofs,theorem_map,theorem_sno_map,inverse_theorem_sno_map; -- globals for proof checking
const omit_citation_set := {}; -- theorem citation cases to be bypassed
var nelocs_total := 0, ok_total := 0; -- to get total OKs in ELEM inferences
var ndisch_total := 0, ok_disch_total := 0; -- to get total OKs in discharge inferences
var citation_check_count := 0,citation_err_count := 0; -- to track errors in discharge inferences
var ordrd_free_vars; -- for finding free variables of a formula in order of occurrence
var list_of_symbol_occurences; -- for finding free symbols of a formula
var labeled_pieces := {}; -- maps proof labels into labeled pieces of statements
var theorems_file; -- string designating the raw scenario file (path included)
var definitions_handle;
var suppose_not_map; -- maps proof number into triple [suppose_vars,suppose_stat,thm]
var optimization_mark := false; -- 1/0 on proof steps, indicating optimization desired
var optimize_whole_theorem := false; -- flag: optimize by default?
var range_to_pretty; -- string descriptor of range of theorems to be prettyprinted
-- var running_on_server := true; -- flag: are we running on the server, or standalone
const just_om_set := {"OM"}; -- pointless descriptor since it applies to everything
var addnal_assertions := ""; -- addnal_assertions supplied by proof_by_structure; vars_to_descriptors used
var statement_stack; -- made global for debugging only
var use_pr_by_str_set := {}; -- collection of section numbers for proofs in which proof_by_structure is to be used
--procedure geht_lines(file); printy(["opening file: ",file]); handl := open(file,"TEXT-IN"); if handl = OM then printy(["bad open"]); stop; end if; close(handl); printy(["closed"]); end geht_lines;
-- *******************************************************************************
-- ******** MAIN ENTRY TO THE LOGIC SYSTEM, CALLED BY THE PHP SERVER CODE ********
-- *******************************************************************************
-- THIS IS SET UP TO ALLOW THE VERIFIER TO BE EXECUTED IN A 'STANDALONE' MODE ALSO
-- *******************************************************************************
--procedure unicode_unpahrse(tree); -- ******** unparse in one of two modes ********
-- return if running_on_server then unicode_unparse(tree) else unparse(tree) end if;
--end unicode_unpahrse;
procedure verifier_invoker(uid); -- ******** master invocation routine ********
-- first determine the location of the input, working, and output files fom the uid.
-- if this is OM, it is assumed that we are running standalone.
abend_trap := stoprun; -- set an abend for debugging after possible crash
uid_copy := uid;
span(uid_copy,".,0123456789"); -- determine if the supposed uid is actually a range of theorems to be checked
if uid_copy /= "" then -- we are running on the server, so the file access depends on the user id
-- otherwise the nominal uid is actually the list of proofs to be checked,
-- given in the same form as for the web environment
running_on_server := true;
command_file := uid + "F0"; -- command line for this user
-- use the command_line parameters to set up a call to the main logic verifier
-- because of command_line problems in the php version these are read from a file set up by the php
command_handle := open(command_file,"TEXT-IN");
geta(command_handle,cl); -- get the information described below
close(command_handle); -- release the small file that it comes from
-- the information supplied gives user_prefix,share_flag,range_to_check,files_list,range_to_pretty
-- in blank-delimited format
[user_prefix,share_flag,range_to_check,files_list,range_to_pretty] := breakup(cl," ");
--print("user_prefix: ",[user_prefix,share_flag,range_to_check,files_list]); stop;
files_list := breakup(files_list,",");
range_to_check := read_range(range_to_check)?[]; -- convert to list of proofs to be printed; use empty range if range is bad
dump_theorems_handle := open(uid + "theorem_summary","TEXT-OUT"); -- open handles to the working files to be used
dump_defs_handle := open(uid + "definition_summary","TEXT-OUT");
dump_theories_handle := open(uid + "theory_summary","TEXT-OUT");
dump_theorems_flag := (range_to_check = [1]); -- dump theorems in this case
--print("FILE SETUP");
--- set up the list of files to be loaded. This will include the common scenario if the share flag is set,
-- as well as whatever supplementary scenario file sthe user has uploaded to the server
files_to_load := if share_flag = "Share" then ["common_scenario.txt"] else [] end if;
files_to_load +:= [user_prefix + "F" + j: fname = files_list(j) | fname /= "" ];
files_to_load with:= (upf4 := user_prefix + "F4"); -- include the final scenario segment
close(open(uid + "Err","TEXT-OUT")); -- clear the two error files that might be used
close(open(uid + "Ferr","TEXT-OUT"));
else -- we are running standalone in the setlide folder
running_on_server := false;
user_prefix := file_path := "jacks_not_in_distribution/aetnaNova_paks_eug_visit/"; -- folder containing files
range_to_check := read_range(uid)?[]; -- set the range to check from the parameter
dump_theorems_handle := open(file_path + "theorem_summary","TEXT-OUT"); -- open handles for the files to which the theorems will be dumped
dump_defs_handle := open(file_path + "definition_summary","TEXT-OUT");
dump_theories_handle := open(file_path + "theory_summary","TEXT-OUT");
dump_theorems_flag := (range_to_check = [1]); -- dump theorems in this case
dump_theorems_handle2 := open(file_path + "theorem_summary2","TEXT-OUT"); -- in the online case, a dump in unpretty form will also be generated
dump_theories_handle2 := open(file_path + "theory_summary2","TEXT-OUT"); -- in the online case, a dump in unpretty form will also be generated
--- set up the list of files to be loaded. This will include the common scenario if the share flag is set,
-- as well as whatever supplementary scenario file sthe user has uploaded to the server
files_to_load := [file_path + "common_scenario.txt",file_path + "supp_scenario.txt"]; -- allow two pieces
end if;
--print("files_to_load: ",files_to_load," ",user_prefix," ",#get_lines(files_to_load(1))," ",#get_lines(files_to_load(2)));
-- concatenate all the files constituting the overall scenario. There are up to 5 pieces.
lines := [] +/ [if file /= upf4 then get_lines(file) else [fix_line(line): line in get_lines(file)] end if: file = files_to_load(j)];
--printy(["lines: "," ",files_to_load," ",[get_lines(file): file in files_to_load],"*",share_flag,"*"]); stop;
starting_time := time(); cycles_inferencing := 0; -- note the starting time of the run
-- cycles_inferencing will be accumulated
parse_Defs_w_proofs(lines); -- parse the assembled lines
if was_syntax_error then print("STOPPED ON SYNTAX ERROR *******"); stop; end if;
close(dump_theorems_handle); -- done with this file
close(dump_defs_handle); -- done with this file
close(dump_theories_handle); -- done with this file
if not running_on_server then -- also close the raw form dump files if not on server
close(dump_theorems_handle2);
close(dump_theories_handle2);
-- def_tuple, collected by parse_Defs_w_proofs, has entries [theory_name,definition];
-- reconstruct it as a map {[symbol_defined,[theory,definition]]}; check the single_valuedness of this map;
-- and then (subseqently) generate a map {[symbol_defined,symbols_used]} for graphical display
-- 'theory' is the theory to which the symbol belongs.
--for [theory,definition] in def_tuple loop print("get_symbols_from_def: ",definition," ",get_symbols_from_def(definition)); end loop;
symbs_to_defs := {[symb,[theory,definition]]: [theory,definition] in def_tuple,symb in get_symbols_from_def(definition)};
symbs_to_defs_as_setmap := {[x,symbs_to_defs{x}]: x in domain(symbs_to_defs)};
multiply_defined := {x: [x,y] in symbs_to_defs_as_setmap | #y > 1}; -- check for symbols with more than one definition
if multiply_defined /= {} then -- there are multiply defined symbols: diagnose them and note error
printy(["\n****** Error: the following symbols are multiply defined: " + multiply_defined + ". They have the following definitions: "]);
error_count +:= 1; -- note the error
for symb in multiply_defined loop
printy(["<P>Multiple definitions for symbol",symb]);
for [theory,definition] in symbs_to_defs_as_setmap(symb) loop
printy(["<br> ",definition]);
end loop;
end loop;
symbs_to_defs := {[x,arb(y)]: [x,y] in symbs_to_defs_as_setmap}; -- force this map to be single-valued
end if;
dump_defs_handle2 := open(file_path + "definition_summary2","TEXT-OUT"); -- target file for this form of definition dump
printa(dump_defs_handle2,symbs_to_defs); -- write out the map {[symbol_defined,[theory,definition]]}
close(dump_defs_handle2); -- release the file
end if;
suppose_not_lines := get_lines(user_prefix + "suppose_not_file"); -- get the suppose_not lines for the various proofs
suppose_not_map := {}; -- convert to map by converting strings to tuples
for lin in suppose_not_lines loop reads(lin,x); suppose_not_map with:= [x(4),x(1..3)]; end loop;
--print("suppose_not_map: ",suppose_not_map);
post_alg_roles("DOT_S_PLUS,+,S_0,0,S_1,1,DOT_S_MINUS,-,S_REV,--,DOT_S_TIMES,*,SI,ring");
post_alg_roles("DOT_PLUS,+,DOT_TIMES,*,0,0,1,1,DOT_MINUS,OM,ZA,ring");
post_alg_roles("DOT_RA_PLUS,+,RA_0,0,RA_1,1,DOT_RA_MINUS,-,RA_REV,--,DOT_RA_TIMES,*,RA,ring");
post_alg_roles("DOT_R_PLUS,+,R_0,0,R_1,1,DOT_R_MINUS,-,R_REV,--,DOT_R_TIMES,*,RE,ring");
-- post_alg_roles("DOT_C_PLUS,+,C_0,0,C_1,1,DOT_C_MINUS,-,C_REV,--,DOT_C_TIMES,*,CM,ring");
--print("SERVER no syntax error*********");
check_proofs(range_to_check); -- check the indicated range of proofs
--print("SERVER checked_proofs*********"); stop;
ending_time := time(); -- note the ending time of the run
[hrs,mins,secs] := [unstr(x): x in breakup(starting_time,":")]; sts := 3600 * hrs + 60 * mins + secs;
[hrs,mins,secs] := [unstr(x): x in breakup(ending_time,":")]; ets := 3600 * hrs + 60 * mins + secs;
printy(["\n<BR>Starting time of run: ",starting_time," ended at: ",ending_time," elapsed time in seconds: ",ets - sts]);
printy(["\n<BR>Nominal milliseconds of run: ",opcode_count()/ oc_per_ms," millisecs in basic inferencing: ",cycles_inferencing/oc_per_ms]);
if dump_theorems_flag then -- if the theorem list is being displayed then
citation_analysis(if running_on_server then uid else file_path end if);
-- prepare a citation analyis table using the three semidigested files prepared from the scenarios
end if;
err_ohandle ?:= open(uid + "Err","TEXT-OUT"); -- open handle for detailed error output if not already open
printa(err_ohandle,"</DIV>"); --close the last textarea(no) and division left open by the SETL code
printa(err_ohandle,"<script>");
for l_item = divlist(jd) loop -- hide all the error detail reports
printa(err_ohandle,"document.getElementById('" + l_item + "').style.display = " + if jd = 1 then "'block'" else "'none'" end if + ";");
end loop;
printa(err_ohandle,"misc_style = document.getElementById('misc').style;");
printa(err_ohandle,"good_cases_style = document.getElementById('good_cases').style;");
printa(err_ohandle,"bad_summary_style = document.getElementById('bad_summary').style;");
printa(err_ohandle,"bad_details_style = document.getElementById('bad_details').style;");
printa(err_ohandle,"thm_summary_style = document.getElementById('thm_summary').style;");
printa(err_ohandle,"def_summary_style = document.getElementById('def_summary').style;");
printa(err_ohandle,"thry_summary_style = document.getElementById('thry_summary').style;");
printa(err_ohandle,"citations_style = document.getElementById('citations_div').style;");
printa(err_ohandle,"current_errcase_shown = '';"); -- keep track of the current error case being shown in detail
printa(err_ohandle,"display_bad_cases();"); -- show the bad cases division
printa(err_ohandle,"function display_good_cases() {"); -- show the good cases division
printa(err_ohandle,"clear_all_divs();");
printa(err_ohandle,"good_cases_style.display = 'block';");
printa(err_ohandle,"}");
printa(err_ohandle,"function display_bad_cases() {"); -- show the bad cases division
printa(err_ohandle,"clear_all_divs();");
printa(err_ohandle,"bad_summary_style.display = 'block';");
printa(err_ohandle,"bad_details_style.display = 'block';");
printa(err_ohandle,"}");
printa(err_ohandle,"function display_misc_info() {"); -- show the misc. info division
printa(err_ohandle,"clear_all_divs();");
printa(err_ohandle,"misc_style.display = 'block';");
printa(err_ohandle,"}");
printa(err_ohandle,"function butt_click(div_id) {"); -- turn of prior detail id and turn on new
printa(err_ohandle,"if (current_errcase_shown != '') {document.getElementById(current_errcase_shown).style.display = 'none';};");
printa(err_ohandle,"current_errcase_shown = div_id;");
printa(err_ohandle,"document.getElementById(div_id).style.display = 'block';");
printa(err_ohandle,"}");
printa(err_ohandle,"function display_thm_summary() {"); -- display the theorem summary
printa(err_ohandle,"clear_all_divs();");
printa(err_ohandle,"thm_summary_style.display = 'block';");
printa(err_ohandle,"}");
printa(err_ohandle,"function display_def_summary() {"); -- display the definitions summary
printa(err_ohandle,"clear_all_divs();");
printa(err_ohandle,"def_summary_style.display = 'block';");
printa(err_ohandle,"}");
printa(err_ohandle,"function display_thry_summary() {"); -- display the definitions summary
printa(err_ohandle,"clear_all_divs();");
printa(err_ohandle,"thry_summary_style.display = 'block';");
printa(err_ohandle,"}");
printa(err_ohandle,"function display_citations() {"); -- display the definitions summary
printa(err_ohandle,"clear_all_divs();");
printa(err_ohandle,"citations_style.display = 'block';");
printa(err_ohandle,"}");
printa(err_ohandle,"function clear_all_divs() {"); -- clear the main divisions of the output window
printa(err_ohandle,"misc_style.display = 'none';");
printa(err_ohandle,"good_cases_style.display = 'none';");
printa(err_ohandle,"bad_summary_style.display = 'none';");
printa(err_ohandle,"bad_details_style.display = 'none';");
printa(err_ohandle,"thm_summary_style.display = 'none';");
printa(err_ohandle,"def_summary_style.display = 'none';");
printa(err_ohandle,"thry_summary_style.display = 'none';");
printa(err_ohandle,"citations_style.display = 'none';");
printa(err_ohandle,"}");
printa(err_ohandle,"</script>");
end verifier_invoker;
procedure fix_line(line); return join(breakup(line,char(149)),char(165)); end fix_line; -- undo HTML character scramble
procedure citation_analysis(uid); -- prepare a citation analyis table using the three semidigested files prepared from the scenarios
-- utility for preparing inter-theorem citation graph
-- uses digested_proof_file, theorem_sno_map_file, and theorem_map_file,
-- which are all perpared by parse_and_check.stl
-- prepares an inverted citation index of the form illustrated by:
-- note that if we are running standalone, then 'uid' is actually the file path being used
-- Theorem 17: Ult_membs({S}) = {S} + Ult_membs(S) is cited by: {"T18", "Ttransfinite_member_induction1"}
-- the set of theorems never cited is also reported.
-- a direct citation index is available by e,abling a print loop disabled below.
handle := open(uid + "digested_proof_file","TEXT-IN"); reada(handle,digested_proofs); close(handle);
handle := open(uid + "theorem_sno_map_file","TEXT-IN"); reada(handle,theorem_sno_map); close(handle);
handle := open(uid + "theorem_map_file","TEXT-IN"); reada(handle,theorem_name_to_stat); close(handle);
citations_handle := open(uid + "citations_file","TEXT-OUT");
sno_theorem_map := {[y,x]: [x,y] in theorem_sno_map};
thm_secnos_to_citations := {};
for pf_secno_then_hints_and_stats in digested_proofs(2..) loop
thm_name := sno_theorem_map(thm_secno := abs(pf_secno_then_hints_and_stats(1)) - 1);
hints_and_stats := pf_secno_then_hints_and_stats(2..);
thm_secnos_to_citations(thm_name) := [thm_secno,{thm_cited: [hint,-] in hints_and_stats | (thm_cited := citation_of(hint)) /= OM}];
end loop;
thms_with_citations := [[x,theorem_name_to_stat(x),y]: [-,x,y] in merge_sort([[tsecno,tname,t_cites]: [tname,[tsecno,t_cites]] in thm_secnos_to_citations])];
all_citations := {} +/ [z: [-,-,z] in thms_with_citations];
uncited_thms := [x: [x,-,-] in thms_with_citations | x notin all_citations];
-- now prepare the inverted citation index
thms_to_cited_by := {[yy,x]: [x,-,y] in thms_with_citations, yy in y};
thms_to_cited_by := merge_sort([[theorem_sno_map(yy),yy,theorem_name_to_stat(yy),thms_to_cited_by{yy}]: yy in domain(thms_to_cited_by) | theorem_sno_map(yy) /= OM]);
printa(citations_handle,"<B>Theorems never cited are:<P></B>",merge_sort(uncited_thms));
printa(citations_handle,"<B><P>Inverted citation index<P></B>");
for [-,name,stat,cited_by] in thms_to_cited_by loop
printa(citations_handle,"<B><P>Theorem ",name(2..),": </B>",stat,"<P> <B>is cited by: </B>",cited_by);
end loop;
close(citations_handle);
end citation_analysis;
procedure get_symbols_from_def(def_stg); -- get the symbol or symbols defined from the text of the definition
-- this routine must handle various styles of definition: simple definitions of the form name(params) := expn;
-- operator definitions of the form Def(X •op Y) := expn; and the corresponding monadic form;
-- definitions by theory application, of the form APPLY(thryvar1:output_symbol1,...).
-- this last form of definition can define multiple symbols simutaneously.
-- we return the tuple of defined symbols, which is a singleton in most cases.
span(def_stg,": \t"); -- scan whitespace
mayapp := match(def_stg,"APPLY_otter"); -- see if we have an APPLY case. Note that these have no label
if mayapp = "" then mayapp := match(def_stg,"APPLY"); end if; -- see if we have an APPLY case. Note that these have no label
if mayapp /= "" then -- we have an APPLY case
span(def_stg," \t"); match(def_stg,"("); outlist := break(def_stg,")"); -- get the output list of the definition
outlist := breakup(breakup(suppress_chars(outlist," \t"),","),":"); -- redo the outlist as a tuple of pairs
--print("outlist: ",outlist);
return [y: [-,y] in outlist]; -- return the list of output symbols
end if;
break(def_stg,":"); span(def_stg,": \t"); -- drop the definition label
lbr := match(def_stg,"[");
if lbr /= "" then break(def_stg,"]"); span(def_stg,"] "); end if; -- drop the definition comment if any
-- at this point we should have reached the left side of the definition
defleft := break(def_stg,":"); rspan(def_stg," \t"); -- otherwise find the separating ':=" of the definition
--print("defleft: ",defleft);
left_parsed := parse_expr(defleft + ";")(2); -- get the parse tree of the left-hand side
if is_string(left_parsed) then return [left_parsed]; end if; -- this is the definition of a constant, like Za; return i directly
opleft := left_parsed(2); -- the 'top operator' of the definition, which must be [ast_of,op,[list,args]]
if opleft /= "DEF" then return [opleft]; end if; -- if this is not "DEF", return it directly as a unary tuple
-- otherwise we must have DEF(expn) := ..., expn being either monadic or binary
return [left_parsed(3)(2)(1)]; -- e.g. [ast_of,op,[list,[op,x,y]]]; return this as a unary tuple
end get_symbols_from_def;
procedure citation_of(hint); -- find the theorem cited by a hint, or return OM if is not a theorem citation
pieces := segregate(suppress_chars(hint,"\t "),"-->T");
return if exists piece = pieces(j) | piece = "-->T" then "T" + thm_part(pieces(j + 1)) else om end if;
end citation_of;
procedure thm_part(thm_and_constraint); -- find the theorem cited by a hint, or return OM if is not a theorem citation
front := break(thm_and_constraint,"(");
return front;
end thm_part;
procedure stoprun(); print("Stopped by error"); stop; end stoprun;
-- **************************** Procedures **************************
procedure miscellaneous_tests(); -- repository for miscellaneous top-level logic verifier tests under development
--abend_trap := lambda; for j in [1..2000000] loop x := j + 1; end loop; end lambda;
init_logic_syntax_analysis(); -- initialize for logic syntax-tree operations
-- ******************************************************************
-- *************** Auxiliary tests of single formulae ***************
-- ******************************************************************
--stg := "(Ord(s) and Ord(t) and t •incin s and t /= s and t /= arb(s - t)) and (s - t = 0 and arb(s - t) = 0 or" +
-- "(arb(s - t) in (s - t) and arb(s - t) * (s - t) = 0)) and ((not(arb(s - t) in s and arb(s - t) * (s - t) = 0)));";
--printy([parse_expr(stg)]); printy([sne := setl_num_errors()]);if sne > 0 then printy([setl_err_string(0)]); end if;
printy(["stopped due to: stop in test"]); stop;
printy([find_free_vars(parse_expr("Card(#S) and (FORALL f in {} | one_1_map(f) and range(f) = S and domain(f) = #S);"))]);
printy([(parse_expr("Card(#S) and (FORALL f in {} | one_1_map(f) and range(f) = S and domain(f) = #S);"))]);
printy(["stopped due to: stop in test"]); stop;
printy([time()," ",tree := parse_expr("{{c},{{d},d}} - {{c}} /= {{{{d},d}}};")]); -- INFERENCE CASE WHICH IS FAILING ******
printy([(model_blobbed(tree(2))?"UNSATISFIABLE")," ",time()," "]);
printy(["stopped due to: stop in test"]); stop;
get_hints(1,400);
printy(["stopped due to: stop in test"]); stop;
view_theorem_citations(241,340); stop;
-- ******************************************************************
-- **************** Major tests of multiple formulae ****************
-- ******************************************************************
--->reparse
-- parse_Defs_w_proofs();
set_output_phase(1); -- direct the following output to the main output file
printy(["<P>*********** Run Ended **********"]);
printy(["stopped due to: end of run"]); stop; -- ********* parse the Defs_w_proofs file
check_theorem_map();
printy(["stopped due to: end of run"]); stop; -- check the syntax of all the theorems written to the theorem_map_file
end miscellaneous_tests;
-- ******************************************************************
-- ***************** Top level parsing procedures *******************
-- ******************************************************************
--procedure parse_scenario(file_name,first_proof,last_proof); -- parse a specified Defs_w_proofs file
-- printy(["Parsing of: ",theorems_file := file_name," starts at;",time()]);
-- parse_Defs_w_proofs();
-- note the range of proofs to be checked syntactically
--end parse_scenario;
procedure parse_Defs_w_proofs(lines_tup); -- parse the Defs_w_proofs file
--->scenario source
-- theorems_file ?:= "Diana:Pub:Logic_repository:Defs_w_proofs_modif.pro";
-- theorems_file ?:= "Diana:Pub:Logic_repository:Defs_w_proofs_modif";
-- lines_tup := get_lines(theorems_file); -- read the full theorems_file (with definitions and theories)
printy(["Proof scenario file consists of ",#lines_tup," lines"]);
first_proof_to_check := 1; last_proof_to_check := 1000000;
parse_file(lines_tup); -- extract the sequence of definitions, theorems, proofs, and theories from the scenario file
read_proof_data(); -- ensure that the list of digested_proofs,
-- the theorem_map of theorem names to theorem statements,
-- the theorem_sno_map of theorem section numbers to theorem statements,
-- its inverse inverse_theorem_sno_map,
-- the theory-related maps parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory
-- and the theory_of map sending each theorem and definition into its theory
-- are all available. NOTE: this checks internally to ensure that it is done only once
pretty_range := read_range(range_to_pretty?"")?[]; -- convert to list of proofs to be printed; use empty range if range is bad
-- print("<BR>Ready for prettyprinting ******** "," ",pretty_range);
for ptno in pretty_range loop print(pretty_theorem(ptno)); end loop;
end parse_Defs_w_proofs;
-- procedure find_includes(tup_of_sets,test_set);
-- return [j: set = tup_of_sets(j) | set incs test_set];
-- end find_includes;
--
-- procedure find_included(tup_of_sets,test_set);
-- return [j: set = tup_of_sets(j) | test_set incs set];
-- end find_included;
procedure pretty_theorem(theorem_num); -- returns [thm_name (with 'T'),thm_statement,thm_proof (digested into pairs)]
if (dpj := digested_proofs(theorem_num)) = OM then return ["TUNDEF","UNDEF",["UNDEF"]]; end if;
if (dpj(1) > 0) then -- allow for theorems marked for use of proof by structure, where these numbers are reversed
thm_name := othm_name := inverse_theorem_sno_map(dpj(1) - 1)?("TUNDEF" + dpj(1)); -- name of the theorem
else
thm_name := othm_name := inverse_theorem_sno_map(-(dpj(1) + 1))?("TUNDEF" + dpj(1)); -- name of the theorem
end if;
thm_name := "Theorem " + thm_name(2..) + ": ";
thm_stat := theorem_map(othm_name); -- theorem statement
thm_proof := dpj(2..); -- proof [hint,labeled_stat]s
return "<P>" + thm_name + unicode_unpahrse(parse_expr(thm_stat + ";")) + ". Proof:<BR>" +/ [pretty_line(line): line in thm_proof];
end pretty_theorem;
procedure pretty_line(proof_line_pair); -- create the pretty version of a single proof line
[hint,stat] := proof_line_pair;
-- if there is a '-->' in the hint, make sure that this is prettuprinted
pieces := segregate(hint,"->");
if pieces(2) = "-->" then -- prettyprint the prefixed section
piece1 := pieces(1);
span(piece1," \t"); rspan(piece1," \t"); -- remove any encoling whitespace
piece1a := if piece1 = "" then piece1 elseif piece1(1) = "(" then unicode_unpahrse(parse_expr("F" + piece1 + ";"))(2..) else
unicode_unpahrse(parse_expr("F(" + piece1 + ");"))(2..) end if;
hint := "" +/ ([piece1a] + pieces(2..));
--print("prefixed section hint: ",pieces(1)," ",piece1a);
end if;
-- we must also unicode the substituted functions and constants of 'APPLY statements'
span(hint," \t"); -- removed prefixed whitespace;
hint_copy := hint;
ap := match(hint_copy,"APPLY");
if ap /= "" then -- we have an APPLY statement: APPLY(..) theory_name(assumed-->replacement,..)
[theory_name,apply_params,apply_outputs] := decompose_apply_hint(hint)?[];
--print("[theory_name,apply_params,apply_outputs]: ",[theory_name,apply_params,apply_outputs]);
hint := "APPLY(" + apply_outputs + ") " + theory_name + "(" + join([unicode_apply_param(para): para in apply_params],",") + ") ";
end if;
[unlab_stat,lablocs,labs] := drop_labels(stat); -- get the unlabeled form, the label location, and the labels
--print("[unlab_stat,lablocs,labs]: ",[unlab_stat,lablocs,labs]);
if lablocs = [] then
stat := unicode_unpahrse(parse_expr(unlab_stat + ";")); -- no label
elseif lablocs = [1] then
stat := labs(1) + " " + unicode_unpahrse(parse_expr(unlab_stat + ";")); -- just one label
else
pieces := [to_unicode(unlab_stat(ll..lablocs(j + 1)?#unlab_stat),labs(j)): ll = lablocs(j)];
--print("the pieces: ",pieces);
stat := "" +/ pieces; -- multiple labels; in this case we break the satement at the label positions
-- and convert the pieces separately into unicode form,
-- then re-inserting the labels and concatenating
end if;
return 4 * " " + hint + " ==> " + stat + "<BR>";
end pretty_line;
procedure unicode_apply_param(stg_param); -- convert item from theory application substitution list to unicode form
front := break(stg_param,">"); mid := match(stg_param,">");
return front + mid + unicode_unpahrse(parse_expr(stg_param + ";"));
end unicode_apply_param;
procedure to_unicode(stg_piece,lab); -- convert piece of formula, possibly ending in ampersand, to unicode
conj_sign := rspan(stg_piece," \t&"); -- break off the trailing ampersand
return lab + " " + unicode_unpahrse(parse_expr(stg_piece + ";")) + conj_sign;
end to_unicode;
procedure parse_file(lines_tup); -- extracts the sequence of definitions, theorems, proofs, and theories from a scenario file
-- This procedure reads and parses a raw scenario file, converting it to the 'digested' set of files
-- used during subsequent logical checking of the scenario. These are: theory_data_file, theorem_sno_map_file,
-- theorem_map_file, suppose_not_file, digested_proof_file, definition_tup_file.
-- The work of this procedure is complete when these files have been written. All the files are written in readable
-- 'plain text' format.
-- The main file read to check various kinds of inferences in the verifier is 'digested_proof_file'.
-- (1) digested_proof_file is a large list of tuples, each tuple representing one of the successive lines of a proof as a pair
-- [step_descriptor,formula], for example ["ELEM", "{z} * s_inf /= 0 & {z} * s_inf = {z}"]. The labels of internally labeled
-- statements are left in the 'formula' part of this pair, for example
-- ["a --> Stat1", "Stat2: (a in {cdr([x,f(x)]): x in s} & a notin {f(x) : x in s})
-- or (a notin {cdr([x,f(x)]): x in s} & a in {f(x) : x in s})"]
-- ******* Note however ******* that each proof-representing tuple is prefixed by an integer,
-- which is the 'section number' of the corresponding proof.
-- This number is one more than the section number of the corresponding theorem statement, which is used in the theorem_sno_map_file
-- (2) suppose_not_file is a list of triples [subst_list,statement_in_suppose_not,statement_in_theorem] used to check the correctness
-- of the Suppose_not statements used to open proofs by contradiction.
-- (3) theorem_map_file contains a set of pairs giving a map of abbreviated theorem names (often of the form Tnnn)
-- into the corresponding theorem statement.
-- (4) theorem_sno_map_file contains a set of pairs giving a map of abbreviated theorem names (often of the form Tnnn)
-- into the corresponding theorem section numbers. It also contains pairs which map symbol definition names into their associated
-- section numbers, and which map theory names into their section numbers.
-- (5) definition_tup_file contains a set of triples, each giving the name of a definition, the body of the definition,
-- and the theory to which it belongs
-- (6) theory_data_file is comprised of three subparts. The first is a map of theories into their parent theories.
-- The second maps the name of each theory into the list of names of all the theorems and definitions belonging to the theory.
-- The third maps the name of each theory into a pair giving the assumed function and the assumptions of the theory.
-- This routine handles about 1,700 lines/second on a 1.6 gigacycle Mac G5, and about 7,000 lines/second on a modern Pentium.
-- Once syntax analysis is successful, this routine does various more detailed semi-syntactic, semi-semantic checks.
-- We allow segments of the scenario file input to be indicated by special lines starting with --BEGIN HERE
-- and --PAUSE HERE (which suspends analysis of scenario lines); also --END HERE, which terminates scenario processing.
-- Lines can be grouped together into single logical lines by including a '¬' (option l) mark at the end. Over the web this is Â
-- Comment lines starting with -- are ignored.
-- Definitions must start with either "Def " or "Def:".
-- Theorems must start with either "Theorem " or "Theorem:".
-- Theorem statements can contain a prefixed comment in square brackets, as in Theorem 251: [Zorn's Lemma] .... Proof:...;
-- Theorems without a number are given dummy numbers of the form "TUnnn";
-- Theorem statements must consist either of a single extended line, or of two such lines, the first containing the
-- theorem comment in square brackets.
-- Proofs must start with "Proof:"
-- We now start to assemble (extended) lines into sections, which can be either definition_sections,
-- theorem_sections, proof_sections, or unused_sections. Section starts are marked
-- by the appearance of one of the significant marks "Def ","Def:","Theorem ","Theorem:", or "Proof:"
-- Definition and theorem sections are also terminated by the first empty line after their opening,
-- and all following lines without one of the above headers are skipped.
-- These sections are collected into an overall tuple called 'sections'; the indices of elements
-- of various kinds (theorems, definitions, proofs, etc.) are collected into auxiliary tuples called
-- theorem_sections, def_sections, theory_sections, apply_sections, enter_sections, and proof_sections
var
lines_count,
use_lines,
n_ltup, line_no;
init_logic_syntax_analysis(); -- initialize for logic syntax-tree operations
lines_count := 0;
use_lines := false; -- needs to be turned on by a "--BEGIN HERE" switch
sections := []; -- Will collect the usable lines, parsing them into sections consisting of definitions, theorems, theories, and
-- theory entry and display directives. Theories consist of theory headers and theory bodies. A theory body runs from
-- the end of its header or entry directive to the next following header or entry directive.
current_section := def_sections := theorem_sections := proof_sections := theory_sections := apply_sections := enter_sections := [];
-- initialize the tuples used to collect section lines and section indices
n_ltup := #lines_tup; line_no := 0;
while (line_no +:= 1) <= n_ltup loop -- process all the scenario lines till EOF or --END HERE
--print("line_no: ",line_no," ",lines_tup(line_no));
line := lines_tup(line_no); -- get the next line
rspan(line," \t"); -- remove trailing whitespace
--while line_no < n_ltup and (nl := #line) > 0 and (line(nl) = "¬") loop -- group using line continuation symbols
while line_no < n_ltup and (nl := #line) > 0 and (line(nl) = "Â") loop -- group using line continuation symbols
line(nl..nl) := lines_tup(line_no +:= 1); -- continue as long as there is a continuation mark
rspan(line," \t"); -- remove trailing whitespace
--printy(["CONTINUING: ",line]);
end loop;
-- examine for --BEGIN HERE, --PAUSE HERE, and --END HERE marks, which delimit the ranges of lines to be considered
if not use_lines then -- look for "--BEGIN HERE"
ism := match(line,"--BEGIN HERE");
if ism /= "" then
nprint(" - processing starts at line: ",line_no);
use_lines := true;
end if; -- if found, then switch on line collection
continue;
end if;
is_off := match(line,"--PAUSE HERE");
if is_off /= "" then use_lines := false; print( " - suspended at line: ",line_no); continue; end if;
-- if found, then switch off line collection
is_end := match(line,"--END HERE");
if is_end /= "" then -- collect the final section and exit
print(" - ends at line: ",line_no);
exit;
end if;
-- Here we start assembling (the already extended) lines into sections, which can either be
-- a definition_section, theorem_section, proof_section, or unused_section depending on the
-- setting of the 'now_in' value. This value is set, and the preceding run of lines collected,
-- by the appearance of one of the significant marks "Def ","Def:","Theorem ","Theorem:", or "Proof:".
-- Definition and theorem sections are also terminated by the first empty line after their opening,
-- and all following lines without one of the above headers are skipped.
-- The 'take_section' routine called whenever a prior section is ended by the start of a new section
-- collects all the sections into an overall tuple called 'sections', and also (depending on their type) into
-- auxiliary tuples called def_sections, theorem_sections, and proof_sections
break_at_minus := breakup(line,"-"); -- ignore all comments, which must start with '--', followed by a blank or tab
if exists b = break_at_minus(kk) | b = "" and ((nb := break_at_minus(kk + 1)) = OM or (#nb > 0 and nb(1) notin ">BPE"))
then
line := join(break_at_minus(1..kk - 1),"-");
rspan(line," \t");
if line = "" then continue; end if; -- ignore lines containing comment only
end if;
line_copy := line; span(line_copy," \t"); rspan(line_copy," \t"); -- scan off whitespace
-- is_comment := match(line_copy,"--"); -- look for initial comment marks
-- if is_comment /= "" then
-- continue; -- bypass comments
-- end if;
is_def := match(line_copy,"Def "); is_def2 := match(line_copy,"Def:"); -- look for a definition header
-- check for start of definition, which may have one of two forms
-- ******* note that the 'take_section' routine takes full lines, with all tags and comments *******
if is_def /= "" or is_def2 /= "" then -- end the old section and start a new one
take_section(now_in,current_section); now_in := definition_section; current_section := [];
end if; -- the definition now runs up to the next significant or blank line
is_theorem := match(line_copy,"Theorem "); is_theorem2 := match(line_copy,"Theorem:"); -- look for a Theorem header
-- check for start of Theorem, which may have one of two forms
if is_theorem /= "" or is_theorem2 /= "" then -- end the old section and start a new one
take_section(now_in,current_section); now_in := theorem_section; current_section := [];
tpa_to_just_tp_num with:= (ntheorem_w_proofs +:= 1);
-- increment, and add to vector mapping all_theorems (including a top-level APPLY) number to ntheorem_w_proofs number
end if; -- the theorem statement header now runs up to the next significant or blank line,
-- or to the next line (including this) ending with 'Proof'
is_proof := match(line_copy,"Proof+:"); -- look for a Proof header, allowing for proof_by_structure flag
if is_proof = "" then is_proof := match(line_copy,"Proof:"); else use_pr_by_str_set with:= (num_sections + 1); end if;
-- look for a Proof header; if it flags for use_proof_by_structure, then invert the section number prepended to the proof
if is_proof /= "" then -- end the old section and start a new one
take_section(now_in,current_section); now_in := proof_section; current_section := [];
end if; -- the theorem statement header now ends
is_theory := match(line_copy,"THEORY"); -- look for a THEORY header
if is_theory /= "" then -- end the old section and start a new one
take_section(now_in,current_section); now_in := theory_section; current_section := [];
end if; -- the theory statement header now runs up to the next significant or blank line,
is_apply := match(line_copy,"APPLY"); -- look for an APPLY statement
if is_apply /= "" and now_in /= proof_section then -- note that APPLY headers can appear within proofs
take_section(now_in,current_section); now_in := apply_section; current_section := [];
tpa_to_just_tp_num with:= ntheorem_w_proofs; -- add to vector, but don't increment
end if; -- the apply statement now runs up to the next significant or blank line,
is_enter := match(line_copy,"ENTER_THEORY"); -- look for an ENTER_THEORY statement
if is_enter /= "" then -- end the old section and start a new one
take_section(now_in,current_section); now_in := enter_section; current_section := [];
end if; -- the ENTER_THEORY statement now runs up to the next significant or blank line
if line = "" and (now_in in {definition_section,theorem_section,theory_section,apply_section,enter_section}) then
-- end the section and go to an unused_section
take_section(now_in,current_section); now_in := unused_section; current_section := [];
end if;
current_section with:= line; -- collect the line into the current section, now that we know in what section it belongs
if now_in = theorem_section then -- look for 'Proof:' at the end of lines. These terminate the statement of a theorem.
is_proof := rmatch(line_copy,"Proof+:"); -- look for a Proof header, allowing for proof_bY_structure flag
if is_proof = "" then is_proof := rmatch(line_copy,"Proof:"); else use_pr_by_str_set with:= (num_sections + 1); end if;
-- look for a Proof header; if it flags for use_proof_by_structure, then invert the section number prepended to the proof
if is_proof /= "" then -- end the old section and start a new empty one
take_section(now_in,current_section); now_in := proof_section; current_section := [];
end if; -- the theorem statement header now ends
end if;
if now_in = proof_section then -- look for 'QED' at the end of lines. These terminate the specification of a proof.
is_qed := rmatch(line_copy,"QED"); is_qed2 := rmatch(line_copy,"QED.");
if is_qed /= "" or is_qed2 /= "" then
was_just_qed := true; -- global flag for detecting proof ends
take_section(now_in,current_section); now_in := unused_section; current_section := [];
end if; -- the theorem statement header now ends
end if;
end loop; -- main while-loop of scenario processing ends here; sections have
-- been put together and will now be analyzed individually
take_section(now_in,current_section); -- take the final section if any
-- print statistics on the scenario file just analyzed
nds := #def_sections;
nts := #theorem_sections;
nps := #proof_sections;
-- printy(["Number of source lines and sections: ",#lines_tup," ",ns := #sections," num defs: ",nds," num thms: ",nts," num proofs: ",nps]);
if nds + nts + nps = 0 then
print("**** NO THEOREMS, DEFINITIONS, OR PROOFS HAVE BEEN DETECTED, possibly due to missing '--BEGIN HERE line.'");
stop;
end if;
thms_with_proofs := {n - 1: n in proof_sections}; thms_without_proofs := [n: n in theorem_sections | n notin thms_with_proofs];
printy(["thms_without_proofs: ",thms_without_proofs]); --for n in thms_without_proofs loop printy([sections(n)]); end loop;
printy(["incomplete_proofs: ",incomplete_proofs]); --for n in incomplete_proofs loop printy([sections(n)]); end loop;
-- Begin syntactic and semantic checks of all the theorems, and definitions.
-- Make sure that no symbol is defined twice. Collect the 'arity' of all defined symbols.
-- then go on to parse all the proof lines.
-- printy(["Starting theorem parse: ",time()]); -- check the syntax of all the theorems
for tno in theorem_sections loop -- 'theorem_sections' gives the section numbers of all the theorems, in order
if (pt := pahrse_expr((tt := theorem_text(tno)) + ";")) = OM then
printy(["***** Illformed theorem statement: "," ",tt]); printy(["prior theorems are: ",[theorem_text(k): k in [(tno - 10) max 1..tno]]]);
printy(["stopped due to: syntax error"]); was_syntax_error := true; return;
end if;
collect_fun_and_pred_arity(pt(2),0);
end loop;
-- printy(["Done theorem parse: ",time()]);
-- printy(["Starting definitions parse: ",time()]);
for dno in def_sections loop
dt := definition_text(dno); span(dt," \t"); -- get the text of the definition
if (ndt := #dt) > 5 and dt(1..5) = "APPLY" then check_apply_syntax(dt); continue; end if;
-- special processing for definitions by "APPLY"
if ndt = 0 then continue; end if; -- bypass blank text
dt_start := break(dt,":"); may_eq := match(dt,":•eq"); if may_eq = "" then may_eq := match(dt,":¥eq"); end if;
-- reduce predicate definitions to function form to keep parser happy
dt := dt_start + if may_eq /= "" then ":=" else "" end if + dt;
if (pt := pahrse_expr(dt + ";")) = OM then
printy(["***** Syntactically illformed definition: "," ",dt + ";"]);
printy(["stopped due to: syntax error"]); was_syntax_error := true; return;
end if;
collect_fun_and_pred_arity(pt,dno);
end loop;
--printy(["definition_text: ",["\n" + definition_text(dno): dno in def_sections]]);
-- printy(["Done definitions parse: ",time(),"\n"]);
-- print diagnostic messages for symbols with multiple arities and symbols with multiple definition
multip_arity := {[symb,arities]: symb in domain(arity_symbol) | #(arities := arity_symbol{symb}) > 1};
printy(["symbols with multiple arities: ",multip_arity]); -- check for symbols with multiple, conflicting arities
-- printy([merge_sort(arity_symbol)]); printy([merge_sort(symbol_definition)]); --
-- Now go on to parse all the proof lines............; these lines are broken up into pairs
-- [step_descriptor,formula], and the assembled list of pairs for all sections will be written to a file
-- 'digested_proof_file". This is a list of tuples of such pairs, each such tuple representing statements
-- of a proof broken up into a pair of strings [hint, body]; there is one such tuple for each hint in the scenario.
-- We also begin to write out the "suppose_not_file", consisting of pairs
-- [statement_in_suppose_not,statement_in_theorem] used to check the correctness
-- of the Suppose_not statements often used to open proofs by contradiction.
-- printy(["\nStarting proofs parse: ",time()]);
num_checks := 0;
digested_proofs := []; -- will collect all the digested proofs
suppose_not_handle := open(user_prefix + "suppose_not_file","TEXT-OUT"); -- open file used for suppose_not collection
for pno = proof_sections(proofno) | pno > 0 loop -- process all the proof sections; pno is the section number
digested_proofs with:= (dpl := digest_proof_lines(pno)); -- break the proof lines into pairs [step_descriptor,formula]
labeled_pieces := {}; -- will collect labeled pieces of statements to check for duplicate labels
for hstat = dpl(k) loop -- process the lines of the proof, parsing them and collecting all the "Suppose_not" pairs
if k = 1 then continue; end if; -- ignore the theorem section number prefixed to the list of pairs
[hint,stat] := hstat; -- break pair into hint, statement
if k = 2 and #hint >= 11 and hint(1..11) = "Suppose_not" then -- Special processing for the initial 'Suppose_not' statement in the proof
hintvars := hint(12..); span(hintvars,"("); rspan(hintvars,")");
-- drop the parentheses surrounding the list of variables in the hint
-- write a quadruple representing a theorem and its following 'Suppose_not' statement to the suppose_not_file
-- the first component of the triple is the list of variables to be substitued for the
-- universally quantified variables of the theorem statement
printa(suppose_not_handle,[hintvars,drop_labels(stat)(1),theorem_text(pno - 1),proofno]);
end if;
[clean_stat,lab_locs,labs] := drop_labels(stat); -- take note of the labeled portions of the statement if any
for lab = labs(j) loop -- use the auxiliary 'labeled_pieces' map built hear to check for duplicate labels
if labeled_pieces(lab) /= OM then printy(["****** Error: label is duplicated --- ",lab," ",clean_stat(lab_locs(j)..)]); end if;
labeled_pieces(lab) := csj := join(breakup(clean_stat(lab_locs(j)..),"&")," and ");
--print("csj: ",csj," ",parse_expr(csj + ";"));
if (pt := parse_expr(csj + ";")) = OM then
printy(["***** Syntactically illformed labeled statement portion in proof: ",dpl," ",csj]);
printy(["stopped due to: syntax error"]); was_syntax_error := true; return;
end if;
end loop;
--print("<P>clean_stat: ",clean_stat);
if proofno >= first_proof_to_check and proofno <= last_proof_to_check and (pt := pahrse_expr(clean_stat + ";")) = OM then
printy(["***** SYNTACTICALLY ILLFORMED STATEMENT in proof:<BR>\nStatement number is ",k - 1,"\n<BR>Statement is: \n<BR>",hint," ==> ",stat,
"<BR>\n<BR>\n***** Statements in proof are:<BR>\n",
join(["[" + kk + "] " + if x(1) = "" then str(x(2)) else join([str(y): y in x(1..#x - 1)]," ==> ") end if: x = dpl(2..)(kk)],"<BR>\n")]);
-- in the preceding line, 'x(1..#x - 1)' drops an appended 0
printy(["<BR>\nSTOPPED DUE TO SYNTAX ERROR; internal proofno and section number are: ",proofno," ",pno," raw form of hinted statement is: <BR>\n",hstat]); was_syntax_error := true; return;
end if;
num_checks +:= 1;
end loop;
--if proofno > 5 then printy(["\n",proofno,"\n",drop_labels(stat)(1)]); end if; if proofno > 10 then stop; end if;
-- if proofno mod 300 = 0 then printy(["Done: ",proofno]); end if; -- note progress of the operation
end loop;
-- printy(["Done parse of all proof lines: ",time()," ",num_checks," proof steps checked syntactically"]);
-- note conclusion of proof-lines parse and Suppose_not file writeout
close(suppose_not_handle); -- close file used for suppose_not collection
-- ********** ********** ********** ********** ********** ********** ********** ********** **********
-- Now prepare to write out the remaining files produced by this routine. These represent various of the maps
-- that have just been collected. They are digested_proof_file,theorem_map_file,theory_data_file,
-- and (a bit subsequently) definition_tup_file and theorem_sno_map_file.
-- ********** ********** ********** ********** ********** ********** ********** ********** **********
digested_proof_handle := open(user_prefix + "digested_proof_file","TEXT-OUT"); -- first write out the digested proofs.....
printa(digested_proof_handle,digested_proofs); close(digested_proof_handle);
theorem_map := {}; -- will map theorem numbers, generally in the form 'Tnnn", into the corresponding formulae
th_sno_map := {}; -- will map theorem numbers, generally in the form 'Tnnn", into the corresponding section numbers
-- also has corresponding pairs for definitions and theories
unnumbered_ctr := 0; -- counter for theorems not specifically numbered
for th_sno in theorem_sections loop -- process the theorem sections, building the theorem_map to be written out below
if #(ttup := sections(th_sno)) = 1 then -- theorem consists of a single extended line
stg := ttup(1); span(stg," \t"); match(stg,"Theorem"); span(stg," \t"); -- scan off the "Theorem" header
tnum := break(stg,":"); -- get the actual theorem number
span(stg," \t:"); -- past colon and whitespace
if #stg > 1 and stg(1) = "[" then break(stg,"]"); match(stg,"]"); span(stg," \t"); end if;
-- drop any prefixed comment in square brackets
rspan(stg," \t."); rmatch(stg,"Proof+:"); rmatch(stg,"Proof:"); rspan(stg," \t.");
-- drop the concluding 'Proof:', and any final period
theorem_map with:= pair := [th_finding_key := if tnum = "" then "TU"+ str(unnumbered_ctr +:= 1) else "T" + tnum end if,stg];
th_sno_map with:= [th_finding_key,th_sno]; -- record the theorem's section number in th_sno_map
--if parse_expr(stg + ";") = OM then printy(["Bad Theorem formula: ",stg]); stop; end if; printy([pair]);
else -- theorem consists of two parts
pref := ttup(1); -- get prefix and following part
stg := join(ttup(2..)," ");
span(pref," \t"); match(pref,"Theorem"); span(pref," \t"); tnum := break(pref,":"); -- get the actual theorem number
match(pref,":"); span(pref," \t");
if #pref > 1 and pref(1) = "[" then break(pref,"]"); match(pref,"]"); span(pref," \t"); end if;
-- drop any prefixed comment in square brackets
stg := pref + " " + stg; -- prepend remaining part of prefix to stg
if #stg > 1 and stg(1) = "[" then break(stg,"]"); match(stg,"]"); span(stg," \t"); end if;
-- drop any prefixed comment in square brackets
rspan(stg," \t."); rmatch(stg,"Proof+:"); rmatch(stg,"Proof:"); rspan(stg," \t."); -- drop the concluding 'Proof:', and any final period
theorem_map with:= (pair := [th_finding_key := if tnum = "" then "TU"+ str(unnumbered_ctr +:= 1) else "T" + tnum end if,stg]);
th_sno_map with:= [th_finding_key,th_sno]; -- record the theorem's section number in th_sno_map
--if parse_expr(stg + ";") = OM then printy(["Bad Theorem formula:: ",stg]); stop; end if; printy([pair]);
end if;
end loop; -- print the theorem sections
--printy(["theorem sections: ",{[x,tmx]: x in domain(theorem_map) | #(tmx := theorem_map{x}) > 1}]);
check_theorem_map(); -- check that the theorem mapp is single_valued
handle := open(user_prefix + "theorem_map_file","TEXT-OUT"); printa(handle,theorem_map); close(handle);
-- write out the theorem_map_file, containing 'theorem_map'.....
handle := open(user_prefix + "theory_data_file","TEXT-OUT");
enter_secno_map := {p: p in enter_sections};enter_secno_map := {[x,enter_secno_map{x}]: x in domain(enter_secno_map)};
printa(handle,[parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory,enter_secno_map]); close(handle);
-- ************* DEFINITIONS PROCESSING *************
-- Now we collect all the 'global' symbol definitions. These can have various sources:
-- (1) explicit definitions, which can be recursive
-- (2) definitions by Skolemization of a statement in Prenex normal form
-- (3) uses of the Theory mechanism, via 'APPLY'. The definition of functions originating in this way
-- is taken to be the 'APPLY' text itself. Note that an 'APPLY' statement has the syntax
-- APPLY(new_symbol:defined_symbol_of_theory,...) theory_name(list_of_replacements_for_assumed_symbols),
-- where the symbols being defined are the new_symbols that appear. The list_of_replacements_for_assumed_symbols
-- has the syntax
-- func_or_pred__assumed_in_theory->replacement_expresssion-supplied,...
-- Each func_or_pred__assumed_in_theory must appear in the THEORY line heading the THEORY being applied,
-- and must have the arity with which it is used in the THEORY.
-- 'APPLY' statements can appear either in the body of a proof, or externally. In the former case the symbols
-- that they define are local to the proof in which they appear, in the second case they are global.
-- THEORYs with empty defined_symbol_of_theory lists are possible, and are used as repositories
-- for frequently occuring proof strategies that do not require combinatorial search.
-- Several theory checks resemble the checks performed during definition processing and must be performed
-- in the same way. Introduction of a THEORY must precede any of its applications. Each item in the
-- list_of_replacements_for_assumed_symbols supplied by an 'APPLY' is in effect a definition for the
-- func_or_pred__assumed_in_theory appearing in it and must obey the rules for an ordinary algebraic definition.
-- A THEORY can be entered repeatedly, to prove new theorems and make additional definitions.
-- ****************************************************
-- Build a map of all symbols defined explictly or by a theory application into the number of
-- the section in which they are defined.
-- This will be used subsequently to check that all Use_defs and other symbol uses
-- follow the definition or APPLY which define them
-- ****************************************************
for j in def_sections loop
ds := trim_front("" +/ sections(j)); -- get the definition or apply section
if #ds >= 5 and ds(1..5) = "APPLY" then -- we have a definition by 'APPLY'
null; -- **************** FILL IN ****************
else -- we have an ordinary definition
ds_pieces := segregate(ds,":="); -- find the left/right separator of the definition
if (not (exists piece = ds_pieces(ix) | piece = ":=")) or ix = 1 then
printy(["\n****** Error: illformed definition: ",ds]);
error_count +:= 1; continue;
end if;
-- get the part of the definition before the assignment mark
before := obefore := ds_pieces(ix - 1); rspan(before," \t"); span(before," \t");
-- if this opens with a start-tuple bracket, then break off everything up to the following end-tuple-bracket
if #before > 0 and before(1) = "[" then break(before,"]"); span(before," \t]"); end if;
-- if this is an infix-operator definition
if #before > 4 and before(1..4) = "Def(" then -- have infix operator definition
before := before(5..); -- take everything on the left side of the definition following 'Def('
rspan(before," \t)"); -- remove the enclosing parentheses and any whitespace
if #before > 1 then
break(before,"¥•@~#"); -- remove everything up to the operator mark, e.g. '•' or '@'
-- for the special case [x,y] we have before = [x,y]
if #before > 0 and before(1) notin "¥•" then -- the operator mark is not '•'
ma1 := any(before,"@~#");
before := ma1; -- break it out as a single character
elseif #before > 0 and before(1) = "[" then
before := "[]"; -- enumerated tuple special case
else -- but if the operator mark is '•'
befront := breakup(before," \t")(1); -- take everything following the operator mark,
-- up to the next whitespace, as the operator
if before = "" then before := "[]"; else before := befront; end if;
end if;
end if;
--printy(["before: ",before," ",obefore]);
else -- we have a definition of a constant or function symbol
befront := break(before,"("); before := befront; span(before," \t"); span(before," \t");
-- take part before open parenthesis and trim blanks
end if;
symb_referenced := -- convert the prefix characters of infix operators to 'name_' form and capitalize
if before in {"range","domain","pow"} then case_change("ast_" + before,"lu")
elseif #before >= 1 and before(1) in "¥•" then "DOT_" + case_change(before(2..),"lu") -- infixes
elseif #before >= 1 and before(1) = "~" then "TILDE_" + case_change(before(2..),"lu")
elseif #before >= 1 and before(1) = "@" then "AT_" + case_change(before(2..),"lu")
elseif #before >= 1 and before(1) = "#" then case_change("ast_nelt","lu")
elseif before = "[]" then case_change("ast_enum_tup","lu")
else case_change(before,"lu") end if;
--print("before: ",before," ",symb_referenced);
th_sno_map with:= ["D" + symb_referenced,j]; -- prefix the defined operator or function with a 'D',
-- and note the section number of the definition
end if;
-- ds := "" +/ sections(j); span(ds,"\t ");
-- if #ds >= 5 and ds(1..5) = "APPLY" then printy(["**** with following theorem: ","" +/ sections(j) + get_theorem_name("" +/ sections(j + 1))]); end if;
end loop;
def_tuple := [[theory_of_section_no(j),(ds := trim_front("" +/ sections(j))) + if #ds >= 5 and ds(1..5) = "APPLY" then get_theorem_name("" +/ sections(j + 1)) else "" end if]: j in def_sections];
-- Form the tuple of all definitions, with the theories to which they belong:
-- Note that this includes global 'APPLY' statements, for which the hint, followed by the name of the deduced theorem
-- (no space) is written out
handle := open(user_prefix + "definition_tup_file","TEXT-OUT"); printa(handle,def_tuple); close(handle);
-- write out the definition_tup_file, containing all definitions .....
handle := open(user_prefix + "theorem_sno_map_file","TEXT-OUT"); printa(handle,th_sno_map); close(handle);
-- write out the theorem_sno_map_file, mapping all theorems and definitions into their section numbers .....
printy(["Done: ",time()," Files suppose_not_file, digested_proof_file, theorem_map_file, definition_tup_file were written with prefix ",user_prefix]);
-- note conclusion of this whole syntax checking and file writeout operation
end parse_file;
-- ************* division of proof lines into 'hint' and 'body' portions ***************
procedure digest_proof_lines(proof_sect_num); -- digests the proof lines into triples
-- if (proof_sect_num - 1) in use_pr_by_str_set then print("detected use_pr_by_str_set case: ",use_pr_by_str_set," ",proof_sect_num); end if;
proof_sect_trips := [if (proof_sect_num - 1) in use_pr_by_str_set then -proof_sect_num else proof_sect_num end if]; cur_stg := cur_pref := "";
-- We collect triples [prefix,formula,marked_for_optimize] the formula may need to be collected
-- across several lines. But note that each proof is prefixed by its section number, which is made negative to indicate use_proof_by_structure
--if proof_sect_num = 1124 then printy([sections(proof_sect_num)]); end if;
for line in sections(proof_sect_num) loop -- iterate over the lines in the section
for piece in breakup(line,";") loop -- the lines are broken by ;'s if several inference statements are put on one line
if (lb := loc_break(piece)) = OM then cur_stg +:= piece; continue; end if;
-- loc_break finds location of the last character before the mark ==> or ===> in the string, if there is any such;
-- this separates the hint from the formula of an inference line. If there is none such, OM is returned
-- otherwise we collect the preceding pair and start a new one
if cur_stg /= "" then -- remove surrounding whitespace and terminal dot
span(cur_stg," \t"); rspan(cur_stg,". \t"); span(cur_pref," \t"); rspan(cur_pref,". \t");
proof_sect_trips with:= [cur_pref,cur_stg,marked_for_optimize]; cur_stg := "";
end if;
marked_for_optimize := if lb >= 4 and piece(lb - 3..lb) = "===>" then 1 else 0 end if;
cur_pref := piece(1..lb - (3 + marked_for_optimize)); cur_stg := piece(lb + 1..);
--if marked_for_optimize = 1 then print("<P>marked_for_optimize: ",line," ",cur_pref," ",cur_stg); end if;
end loop;
end loop;
if cur_stg /= "" then
span(cur_stg," \t"); rspan(cur_stg,". \t"); span(cur_pref," \t"); rspan(cur_pref,". \t");
proof_sect_trips with:= [cur_pref,cur_stg,marked_for_optimize]; cur_stg := "";
end if;
return proof_sect_trips;
end digest_proof_lines;
-- ******** Analysis of the text of theorems *******
procedure theorem_text(sect_no); -- gets stripped text of theorem
stg := "" +/ sections(sect_no); thead := break(stg,":"); tcol := match(stg,":"); twhite := span(stg," \t");
if #stg > 0 and stg(1) = "[" then tcom := break(stg,"]"); trb := match(stg,"]"); span(stg," \t"); end if;
tpref := thead + tcol + twhite + (tcom?"") + (trb?"");
rspan(stg," \t"); rmatch(stg,"Proof+:"); rmatch(stg,"Proof:"); rspan(stg," \t.");
return stg;
end theorem_text;
-- ******** Analysis of the text of definitions *******
procedure definition_text(sect_no); -- gets stripped text of definition
stg := stg_copy := "" +/ sections(sect_no);
span(stg_copy," \t"); mayap := match(stg_copy,"APPLY");
if mayap /= "" then rspan(stg," \t."); return mayap + stg; end if;
thead := break(stg,":"); tcol := match(stg,":"); twhite := span(stg," \t");
if #stg > 0 and stg(1) = "[" then tcom := break(stg,"]"); trb := match(stg,"]"); span(stg," \t"); end if;
tpref := thead + tcol + twhite + (tcom?"") + (trb?"");
rspan(stg," \t.");
return stg;
end definition_text;
-- !!!!!************* Collection and numbering of theory sections ***************!!!!!
procedure take_section(now_in,current_section); -- collects section
-- this routine is called whenever a prior section is ended by the start of a new section. It collects
-- all the sections into an overall tuple called 'sections', and also (depending on their type)
-- collects their section numbers into auxiliary tuples called def_sections, theorem_sections,
-- theory_sections, apply_sections, enter_sections, and proof_sections
-- This routine also keeps track of incomplete or missing proofs
--print("take_section: ",sections," ",current_section);
if current_section = [] or now_in = unused_section then
--if ("" +/ current_section) /= "" then printy(["\nunused_section: ",current_section]); end if;
return;
end if;
sections with:= current_section; num_sections +:= 1; -- put the section itself into the list of all sections
theory_of_section_no(num_sections) := last_theory_entered; -- note the theory to which this section belongs
-- and now record the section number into a list of section indices of appropriate type
if now_in = definition_section then -- collect a definition
def_sections with:= num_sections;
def_start := current_section(1); span(def_start,"\t "); def_name := break(def_start,":");
theors_defs_of_theory(last_theory_entered) := (theors_defs_of_theory(last_theory_entered)?{}) with def_name;
if dump_theorems_flag then -- dump definitions in this case
full_def_statement := "" +/ current_section;
rspan(full_def_statement," \t");
full_def_copy := full_def_statement;
front := break(full_def_copy,":"); mid := span(full_def_copy,": \t");
if full_def_copy(1) = "[" then -- pull of the comment
comment := break(full_def_copy,"]"); aft_comment := span(full_def_copy,"] \t");
--print("full_def_copy: ",full_def_copy);
if (pe := parse_expr(full_def_copy + ";")) /= OM then
full_def_copy := front + mid + comment + aft_comment + unicode_unpahrse(pe);
else
full_def_copy := full_def_statement;
end if;
elseif (pe := parse_expr(full_def_copy + ";")) /= OM then
full_def_copy := front + mid + unicode_unpahrse(pe);
else
full_def_copy := full_def_statement;
end if;
printa(dump_defs_handle,"(",theorem_count,") ",full_def_copy,"<P>");
end if;
elseif now_in = theorem_section then -- collect a theorem statement
theorem_sections with:= last_thm_num := num_sections;
theorem_start := current_section(1);
if dump_theorems_flag then -- dump theorems in this case; use prettyprinted form if possible
full_thm_statement := "" +/ current_section; -- assemble the full theorem statement
rspan(full_thm_statement," \t"); rmatch(full_thm_statement,"Proof+:"); rmatch(full_thm_statement,"Proof:"); -- remove possible clutter
rspan(full_thm_statement," \t.");
full_thm_copy := full_thm_statement; -- break the theorem statement from its label, remove possible clutter again
front := break(full_thm_copy,":"); mid := span(full_thm_copy,": \t");
if full_thm_copy(1) = "[" then -- if the label has a comment then pull it off
comment := break(full_thm_copy,"]"); aft_comment := span(full_thm_copy,"] \t");
--print("full_thm_copy: ",full_thm_copy);
if (pe := parse_expr(full_thm_copy + ";")) /= OM then -- if it parses, then reassmeble it un unicode form
save_running_on_server := running_on_server; -- since the immediately following dump is always wanted int prettyprinted form
running_on_server := true; -- pretend for the moment that we are running on the server, even if not
full_thm_copy := front + mid + comment + aft_comment + unicode_unpahrse(pe);
running_on_server := save_running_on_server; -- but drop the pretense immediately
else -- otherwise just use the theorem in its original form (in a wellformed scenario, this should never happen)
full_thm_copy := full_thm_statement;
end if;
elseif (pe := parse_expr(full_thm_copy + ";")) /= OM then -- handle the no-comment case similarly
save_running_on_server := running_on_server; -- since the immediately following dump is always wanted int prettyprinted form
running_on_server := true; -- pretend for the moment that we are running on the server, even if not
full_thm_copy := front + mid + unicode_unpahrse(pe);
running_on_server := save_running_on_server; -- but drop the pretense immediately
else
full_thm_copy := full_thm_statement;
end if;
theorem_count +:= 1; -- count the theorems as we go along
num_to_show := if theorem_count > 1 and tpa_to_just_tp_num(theorem_count - 1) = (stc := tpa_to_just_tp_num(theorem_count)) then
str(stc) + "+" else str(stc?1) end if;
printa(dump_theorems_handle,"(",num_to_show,") ",full_thm_copy,"<P>");
-- print the theorem in its prettyprinted form
if not running_on_server then printa(dump_theorems_handle2,"(",num_to_show,") ",full_thm_statement,"<P>"); end if;
-- also print the theorem in its plain form if running standalone
end if;
span(theorem_start,"\t "); theorem_name := break(theorem_start,":");
match(theorem_name,"Theorem"); span(theorem_name,"\t "); rspan(theorem_name,"\t ");
if theorem_name /= "" then
theors_defs_of_theory(last_theory_entered) :=
(theors_defs_of_theory(last_theory_entered)?{}) with ("T" + theorem_name);
end if;
--printy(["theorem_name: ",theorem_name]);
elseif now_in = proof_section then -- collect a proof
proof_sections with:= num_sections; -- note that the current section is a proof section
if not was_just_qed then incomplete_proofs with:= num_sections; end if; was_just_qed := false;
if num_sections - 1 /= last_thm_num then printy(["lost proof: ",last_thm_num," ",num_sections," ",sections(last_thm_num..num_sections)]); end if;
elseif now_in = theory_section then -- collect a theory declaration
theory_sections with:= num_sections;
cur_sect := current_section(1); -- get the header line of the theory section
if dump_theorems_flag then -- dump theories in this case
full_theory_statement := join(current_section,"<BR>");
printa(dump_theories_handle,"(",theorem_count,") ",full_theory_statement,"<P>");
end if;
front := break(cur_sect,"["); cur_sect := front; -- break off possible trailing comment
--print(printy(["theory_section: ",current_section]);
match(cur_sect,"THEORY"); trailing_underbar := match(cur_sect,"_"); -- look for underbar indicating external theory name
if trailing_underbar /= "" then external_theory_name := break(cur_sect,"\t "); end if;
span(cur_sect,"\t ");
theory_name := break(cur_sect,"(\t "); span(cur_sect,"\t (");
if trailing_underbar /= "" then theory_name := "_" + external_theory_name + "_" + theory_name; end if;
-- if the theory is external, prefix its name with the name of the responsible prover
rspan(cur_sect,"\t "); rmatch(cur_sect,")"); rspan(cur_sect,"\t ");
theory_params := split_at_bare_commas(cur_sect);
parent_of_theory(theory_name) := last_theory_entered;
assumps_and_consts_of_theory(theory_name) := [theory_params,[trim(line): line in current_section(2..#current_section - 1)]];
--printy(["theory_name: ",theory_name," ",assumps_and_consts_of_theory(theory_name)]);
--printy(["theory_section: ",current_section]);
elseif now_in = enter_section then -- collect an ENTER statement
cur_sect := current_section(1);
match(cur_sect,"ENTER_THEORY"); span(cur_sect,"\t ");
last_theory_entered := break(cur_sect,"\t ");
--printy(["enter_section: ",last_theory_entered]);
enter_sections with:= [last_theory_entered,num_sections];
elseif now_in = apply_section then -- collect an 'APPLY' instance
apply_sections with:= num_sections; -- we put the 'APPLY' itself into 'apply_sections'
def_sections with:= num_sections; -- but also record it as a definition in the 'def_sections' list
-- since it may define various symbols
--printy(["TAKING APPLY SECTION: ",current_section]);
end if;
end take_section;
-- ************* Enforcement of special Ref syntactic/semantic rules ***************
procedure tree_check(node); -- checks that there are no compound functions, enumerated tuples of length > 2,
-- and unrestricted or improperly restricted iterators in setformers
--printy(["tree_check: ",node]);
if is_string(node) then return true; end if; -- case of bottom-level name; disallow null tuples
if not is_tuple(node) then return false; end if; -- armoring: monadic occurences of opperators like '&" expected to be binary
[n1,n2,n3] := node; -- break node into operands and operator: generally infix (but not always)
if n1 = "ast_null" then return true; end if;
case abbreviated_headers(n1) -- handle all allowed nodes
when "if" => return tree_check(n2) and tree_check(n3) and tree_check(node(4)); -- if expression
when "and","or","==","=",":=","incs","incin","imp","+","*","-","in","notin","/=","/==","@" =>
return tree_check(n2) and tree_check(n3);
when "{-}","[]","itr","Etr" => return true and/ [tree_check(nj): nj in node(2..)]; -- enumerated set, list, iterator list
when "[-]" => if #node > 3 then return false; end if; -- triples and greater are disallowed
return if n3 = OM then tree_check(n2) else tree_check(n2) and tree_check(n3) end if;
when "arb","not","pow","#","domain","range" => return tree_check(n2);
when "EX","ALL" => -- existential and universal
if not (tree_check(n2) and tree_check(n3)) then return false; end if; -- check iterator and condition
return true and/ [is_string(itr) or (It1 := itr(1)) = "ast_in" or It1 = "DOT_INCIN": itr in n2(2..)];
when "{/}" => -- setformer, no expr; verify presence of restrictions in iterators
if not (tree_check(n2) and tree_check(n3)) then return false; end if; -- check iterator and condition
return true and/ [(It1 := itr(1)) = "ast_in" or It1 = "DOT_INCIN": itr in n2(2..)];
when "{}" => -- setformer; verify presence of restrictions in iterators
if not (tree_check(n2) and tree_check(n3) and tree_check(node(4))) then return false; end if; -- check iterator and condition
return true and/ [(It1 := itr(1)) = "ast_in" or It1 = "DOT_INCIN": itr in n3(2..)];
when "()" => return is_string(n2) and tree_check(n3);
-- rule out compound functions and predicates
otherwise => return if n3 = OM then tree_check(n2) else tree_check(n2) and tree_check(n3) end if;
end case;
end tree_check;
procedure get_theorem_name(stg); -- extracts theorem name from string
span(stg," \t"); match(stg,"Theorem"); span(stg," \t"); name := break(stg,":"); return name;
end get_theorem_name;
procedure check_theorem_map(); -- check the syntax of all the theorems written to the theorem_map_file
dup_theorems := {tn: tn in domain(theorem_map) | #theorem_map{tn} > 1};
if dup_theorems /= {} then
printy(["\n******* ERROR: the following theorem names are duplicated: ",dup_theorems,"\n"]);
error_count +:= #dup_theorems;
printy(["One version of each duplicated theorem will be used arbitrarily"]);
for tn in dup_theorems loop theorem_map(tn) := arb(theorem_map{tn}); end loop;
end if;
end check_theorem_map;
-- ************* Interfaces to native SETL parser ***************
procedure parze_expr(stg); -- preliminary printing/diagnosing parse
-- echoes the line being parsed, and calls the standard parser
printy(["\nparsing: ",stg]);
if paren_check(stg) and (ps := pahrse_expr(stg)) /= OM then printy([" OK"]); return ps; end if;
printy(["\n",setl_num_errors()," ************* ERRORS"]); abort(setl_err_string(1));
end parze_expr;
procedure pahrse_expr(stg); -- parse with check of syntactic restrictions
-- called initially in place of parse_expr, to enforce Ref syntactic rules.
ps := parse_expr(stg);
--print("ps: ",abs(stg(3))," ",abs("•")," ",stg = "P •imp ((P •imp Q) •imp Q);"," ",ps);
if ps /= OM and tree_check(ps) then return ps; end if;
printy(["\n",setl_num_errors()," ************* ERRORS"]);
end pahrse_expr;
-- ******** Analysis of function and symbol arities - enforcement of arity rules (***incomplete***) *******
-- It is intended that these routines should ensure that no symbol is used with multiple arities. They need to be completed
-- in a manner reflecting the Ref convention that constant, function, and predicate names can have either THEORY-wise, global, or proof-local scope.
procedure collect_fun_and_pred_arity(node,dno); -- collect the arity of functions and predicates (main entry)
current_def := dno;
collect_fun_and_pred_arity_in(node,[]);
end collect_fun_and_pred_arity;
procedure collect_fun_and_pred_arity_in(node,bound_vars); -- collect the arity of functions and predicates (workhorse)
-- also collects locations of symbol definitions
if is_string(node) then if node notin bound_vars then null; end if; return; end if;
case (ah := abbreviated_headers(n1 := node(1))?n1)
when "and","or","==","+","-","{-}","in","notin","/==","=","/=","[]","[-]","{.}","itr","Etr","inc","incin","imp","*","->","not","null" => -- ordinary operators
for sn in node(2..) loop collect_fun_and_pred_arity_in(sn,bound_vars); end loop;
when "arb","domain","range","@","#","incs","<","<=",">",">=","pow" => -- more ordinary operators
for sn in node(2..) loop collect_fun_and_pred_arity_in(sn,bound_vars); end loop;
when ":=" => -- definition operators
-- if the symbol at the very left of a definition is 'def', it is being used to define an infix or prefix operator
if (left_side := node(2))(2) = "DEF" then
if is_string(ls32 := left_side(3)(2)) then
arity_symbol with:= [ls32,0];
if current_def /= 0 then symbol_definition with:= [ls32,current_def]; current_def := 0; end if;
-- collect the operator definition
collect_fun_and_pred_arity_in(node(3),bound_vars); return; -- process the right side of the definition and return
else
[op,a1,a2] := ls32; -- unpack
arity_symbol with:= [op,if a2 = OM then 1 else 2 end if];
if current_def /= 0 then symbol_definition with:= [op,current_def]; current_def := 0; end if; -- collect the operator definition
collect_fun_and_pred_arity_in(node(3),bound_vars); return; -- process the right side of the definition and return
end if;
elseif is_string(left_side) then -- definition of a constant object
arity_symbol with:= [left_side,0];
if current_def /= 0 then symbol_definition with:= [left_side,current_def]; current_def := 0; end if; return; -- no arguments
elseif (ahl := abbreviated_headers(left_side(1))) = "()" then -- definition of a standard predicate or operator
args := if is_string(n3 := left_side(3)) then [n3] else left_side(3)(2..) end if;
arity_symbol with:= [ls2 := left_side(2),#args]; -- note the number of arguments
if current_def /= 0 then symbol_definition with:= [ls2,current_def]; current_def := 0; end if; -- collect the symbol definition
elseif ahl in specials_1 then -- special one-variable operator
arity_symbol with:= [ahl,1];
if current_def /= 0 then symbol_definition with:= [ahl,current_def]; current_def := 0; end if; return;
else
printy(["******* ",left_side]); return;
end if;
for sn in args loop collect_fun_and_pred_arity_in(sn,bound_vars); end loop;
when "if" => -- if expression
for sn in node(2..) loop collect_fun_and_pred_arity_in(sn,bound_vars); end loop;
when "()" => -- this is the case of functional and predicate application; the second variable is a reserved symbol, not a set
args := [arg := node(3)]; -- the single arg, or list of args
nargs := if is_string(arg) then 1 else #(args := arg(2..)) end if;
arity_symbol with:= [n2 := node(2),nargs]; -- maps each defined symbol into its arity
-- if current_def /= 0 then symbol_definition with:= [n2,current_def]; current_def := 0; end if; -- collect the symbol definition
for sn in args loop collect_fun_and_pred_arity_in(sn,bound_vars); end loop;
when "{}","{/}","EX","ALL" => bound_vars +:= find_bound_vars(node); -- setformer or quantifier; note the bound variables
for sn in node(2..) loop collect_fun_and_pred_arity_in(sn,bound_vars); end loop; -- now collect free variables as before
otherwise =>
is_dot := match(ah,"DOT_"); if is_dot /= "" then return; end if; -- here we should process the arity of infix and prefix operators
printy(["shouldn't happen collect_fun_an_pred_arity_in: ",ah," ",node]); -- shouldn't happen
end case;
end collect_fun_and_pred_arity_in;
-- ******************************************************************
-- ******** Input and checking of digested proofs - top level *******
-- ******************************************************************
-- ******* initial input of digested proof data *******
procedure read_proof_data(); -- ensure that the list of digested_proofs,
-- the theorem_map of theorem names to theorem statements,
-- the theorem_sno_map_handle of theorem names to theorem statements,
-- its inverse inverse_theorem_sno_map,
-- the theory-related maps parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory
-- and the theory_of map sending each theorem and definition into its theory
-- are all available
if was_syntax_error then print("\n\nwas_syntax_error -- STOPPED\n\n"); stop; end if; -- armoring
if digested_proof_handle = OM then -- read the full tuple of digested proofs if they have not already been read
init_logic_syntax_analysis(); -- obligatory initialization
digested_proof_handle ?:= open(user_prefix + "digested_proof_file","TEXT-IN");
reada(digested_proof_handle,digested_proofs); close(digested_proof_handle);
printy(["number of digested proofs is: ",#digested_proofs]);
end if;
if theorem_map_handle = OM then -- read the theorem_map if it has not already been read
theorem_map_handle ?:= open(user_prefix + "theorem_map_file","TEXT-IN");
reada(theorem_map_handle,theorem_map); close(theorem_map_handle);
-- printy(["number of theorem_map entries is: ",#theorem_map]);
end if;
if theorem_sno_map_handle = OM then -- read the theorem_sno_map if it has not already been read
theorem_sno_map_handle ?:= open(user_prefix + "theorem_sno_map_file","TEXT-IN");
reada(theorem_sno_map_handle,theorem_sno_map); close(theorem_sno_map_handle);
inverse_theorem_sno_map := {[y,x]: [x,y] in theorem_sno_map};
end if;
if theory_data_handle = OM then -- read the theory_data if it has not already been read
theory_data_handle ?:= open(user_prefix + "theory_data_file","TEXT-IN");
reada(theory_data_handle,theory_data); close(theory_data_handle);
[parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory,enter_secno_map] := theory_data;
-- form map of theorems and definitions into their theories
theory_of := {[thm_or_def,theory]: [theory,th_def_set] in theors_defs_of_theory,thm_or_def in th_def_set};
end if;
theorem_list := [x: [-,x] in merge_sort(inverse_theorem_sno_map)]; -- list of theorem names in order
theorem_name_to_number := {[x,j]: x = theorem_list(j)}; -- map of theorem names to their numbers in theorem order
defs_of_theory := {}; -- maps each theory into the names of the definitions appearing in the theory
defsymbs_of_theory := {}; -- maps each theory into the symbols defined in the theory
defconsts_of_theory := {}; -- maps each theory into the parameterless symbols defined in the theory
end read_proof_data;
-- ************* read-in of digested proof and theory-related files ***************
-- subset of above reads, used when only digested_proofs and theory data are needed.
procedure init_proofs_and_theories();
-- ensures initialization of digested_proofs, parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory
if digested_proof_handle = OM then -- read the full tuple of digested proofs if they have not already been read
init_logic_syntax_analysis(); -- obligatory initialization
digested_proof_handle ?:= open(user_prefix + "digested_proof_file","TEXT-IN");
reada(digested_proof_handle,digested_proofs); close(digested_proof_handle);
printy(["number of digested proofs is: ",#digested_proofs]);
end if;
if theory_data_handle = OM then -- read the theory_data if it has not already been read
theory_data_handle ?:= open(user_prefix + "theory_data_file","TEXT-IN");
reada(theory_data_handle,theory_data); close(theory_data_handle);
[parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory,enter_secno_map] := theory_data;
-- form map of theorems and definitions into their theories
theory_of := {[thm_or_def,theory]: [theory,th_def_set] in theors_defs_of_theory,thm_or_def in th_def_set};
end if;
end init_proofs_and_theories;
-- ******* main external entry to proof checking *******
procedure check_proofs(list_of_proofs); -- check ELEM and discharge inferences in given range,
-- by calling the single-proof verifier as often as necessary.
check_definitions(1,1 max/ list_of_proofs);
-- read and check all the definitions that might be relevant to the proofs in question
nelocs_total := ok_total := ndisch_total := ok_disch_total := 0;
--printy(["\n<br>checked definitions, starting read_proof_data: "]);
read_proof_data(); -- ensure that the list of digested_proofs,
-- the theorem_map of theorem names to theorem statements,
-- the theorem_sno_map_handle of theorem names to theorem statements,
-- its inverse inverse_theorem_sno_map,
-- the theory-related maps parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory
-- and the theory_of map sending each theorem and definition into its theory
-- are all available
prepare_theorem_templates(); -- convert the theorem_map into a map 'theorem_templates' of theorem names into pairs
--printy(["\n<br>read_proof_data, starting check of individual proofs: ",#list_of_proofs]);
for j = list_of_proofs(jix) loop
check_a_proof(j);
end loop; -- call the single-proof verifier as often as necessary.
set_output_phase(1); -- direct the following output to the main output file
--printy(["\n<br>checked proofs: ",list_of_proofs]);
printy(["<P>*********** Run Ended ********** total inference errors = ",total_err_count," total fully verified proofs = ",total_fully_verified_proofs," ",time()]);
end check_proofs;
procedure prepare_theorem_templates(); -- convert the theorem_map into a map 'theorem_templates'
-- of theorem names into pairs [set_of_freevs_theorem,parsed_thm]
-- the aim of the family of procedures of which this is part is to detect all the theorems which might be cited to
-- establish some conclusion in a proof. Let s_all be the set of function and constant names which occur in a theorem
-- (including its hypotheses) and let s be the set of function and constant names which occur in its conclusion.
-- Then s_all must be included in the set of function and constant names which occur somewhere on the stack
-- when it is set up to prove a desired conclusion (since otherwise there would be no way of proving
-- all the hypotheses of the theorem), and s must be included in the set of function and constant names which occur in
-- the desired statement (since otherwise the desired statement could not result from the theorem conclusion by substitution).
if theorem_templates /= OM then return; end if; -- do operation just once
theorem_templates := {}; -- otherwise initialize map
for theorem_cited = theorem_map(thm_tname) loop
-- theorem_map is a map from external theorem names to the string forms of theorems;
-- we convert all theorems to the 'theorem_templates' form
theorem_cited := join(breakup(theorem_cited,"&")," and "); -- replace ampersands with 'ands' in the theorem cited
parsed_thm := parse_expr(theorem_cited + ";")(2); -- parse the entire theorem; drop the 'list' prefix
[freevs_theorem,funcs_all] := find_free_vars_and_fcns(parsed_thm); -- get its free variables and all its functions and constants
-- if the main operation is an implication, take only the conclusion
if parsed_thm(1) = "DOT_IMP" then parsed_thm := parsed_thm(3); end if;
-- if the main operation of the conclusion is now a conjuction, break it up into the list of its conjuncts
[freevs_theorem,funcs_conclusion] := find_free_vars_and_fcns(parsed_thm); -- get the functions and constants of the theorem conclusion
list_of_conjuncts := []; -- will collect
while parsed_thm(1) = "ast_and" loop
list_of_conjuncts with:= parsed_thm(2); parsed_thm := parsed_thm(3);
end loop;
list_of_conjuncts with:= parsed_thm; -- capture the last conjunct
theorem_templates(thm_tname) := [freevs_theorem,parsed_thm];
-- maintain mapping of top symbols of conjuncts to name of theorem in which they appear
-- this wwill be used to prune the search for theorems matching a gien conclusion
for conjnct in list_of_conjuncts loop
-- convert function applications at top into their function names
if (keyconj := conjnct(1)) = "ast_of" then keyconj := conjnct(2); end if;
topsym_to_tname(keyconj) := (topsym_to_tname(keyconj)?{}) with thm_tname;
end loop;
if thm_tname = "T463c" then print("<P>freevs_theorem,parsed_thm: ",thm_tname,"<BR>",[freevs_theorem,funcs_conclusion,funcs_all]); end if;
end loop;
--print("<BR>domain topsym_to_tname: ",domain(topsym_to_tname)); print("<BR>",topsym_to_tname("FINITE"));
end prepare_theorem_templates;
-- ************* Master entry for checking inferences in a single proof ***************
procedure check_a_proof(proofno); -- read a given proof and check all its inferences
-- This routine iterates over the successive statements of a single proof, determining their type by examining the attached hint,
-- and then decomposing the hint to determine what statement or theorem, or theory (if any) is to invoke, what substitutions
-- are to be performed during any such invocation, and what preceding statements are to be used as the logical context of the
-- inference to be attempted. Once this preliminary analysis has been done, its results are generally assembled into a conjuction,
-- which is passed to our standard MLSS decision algorithm for satisfiability checking.
-- We also enforce the condition that no variable used in a proof can be existentially instantiated more than once,
-- This routine works in the following way to check 'Discharge' steps, which affect the proof context more seriously
-- than the other kinds. The negative of the statement appearing in the last preceding 'Suppose' step is formed,
-- and added to the context open just before the 'Suppose'. Then the conclusion of the 'Discharge' must be an ELEM consequence
-- of the statements available in that context. ["Discharge", "QED"] steps require that 'false' be an ELEM consequence
-- of statements available in that context, and that the context must have been opened by a "Suppose_not" at the very start of the proof.
-- "Suppose_not" steps are to be checked separately, by verifying that they are INSTANCEs of the negative of the theorem being proved.
-- Note that theorem statements use a syntax not used elsewhere, in which capitalized free variable names are understood to be
-- universally quantified (by a prefixed universal quantifier).
-- The context of an ELEM deduction can be constrained in a simple way by prefixing a prior statement label to the ELEM command,
-- as in (Stat_n)ELEM. If this is done, only the statements in the range back to the indicated statement will be used as context
-- for blobbing. By appending a '*' to such a restriction clause one 'coarsens' the blobbing by forcing blobbing of functions that
-- the back-end extended MLSS routines might otherwise attempt to handle (see the global const 'unblobbed_functions' in the
-- 'logic_parser_globals package'). Likewise, by appending a '+' to such a restriction clause,
-- one increases the time allowed for the work of the the MLSS routines, after which they abandon the inference currently being attempted.
error_count := 0; -- initialize the accumulated error count for this proof, which will be reported
step_time_list := step_kind_list := []; -- start to collect lists of times and kinds for verification of steps in this proof
hint_stat_tup := digested_proofs(proofno); -- get the tuple of hints and statements for the current proof; this is a set of pairs, [hint,statement]
--print("proofno ********",proofno," ",hint_stat_tup); --if hint_stat_tup = OM then end if;
-- separate out the section number of the theorem and use it to get the theorem id
if hint_stat_tup = OM then
printy(["******* PROOF CHECK ABORTED: theorem number " + str(proofno) + " apparently out of range"]);
return;
elseif inverse_theorem_sno_map = OM then
printy(["******* PROOF CHECK ABORTED: since proof of theorem number " + str(proofno) + " does not have required form (possibly lacks 'Suppose-not')"]);
return;
end if;
-- detect the per-proof use_proof_by_structure flags
use_proof_by_structure := false; -- a negative prepended theorem section number is used as the proof_by_structure flag
if (hst1 := hint_stat_tup(1)) < 0 then use_proof_by_structure := true; hst1 := -hst1; end if;
theorem_id := inverse_theorem_sno_map(theorem_section_number := hst1 - 1);
theory_of_this_proof := theory_of(theorem_id); -- get the theory to which this proof belongs
--printy(["theory_of_this_proof: ",theory_of_this_proof," ",theorem_id]);
--printy(["check_a_proof:: ",proofno]);
hint_stat_tup := hint_stat_tup(2..); -- bypass the prefixed theorem identifier
-- e.g. ["Use_Def(C_0)", "m /= [R_0,R_0]"], ["ELEM", "not(car(m) = R_0 & cdr(m) = R_0)"]
if hint_stat_tup = OM then return; end if; -- don't try nonexistent cases
--printy(["hint_stat_tup: ",hint_stat_tup]); stop;
set_output_phase(2); -- begin second output phase
printy(["\n+++++++++++ starting verifications for: ",theorem_id,"#",current_proofno := proofno," -- ",verif_start_time := opcode_count()/oc_per_ms]);
-- note that group of verifications is starting
number_of_statement_theorem := proofno; -- record proofno for later diagnostic
--printy(["suppose_not_map: ",proofno," ",suppose_not_map(proofno)]);
negated_theorem := check_suppose_not(suppose_not_map(proofno),proofno,theorem_id);
-- check the suppose_not_lines entry, for this proof; OM is returned if the suppose_not is not AUTO
--printy(["<P>check_suppose_not return: ",negated_theorem?"undef"]);
elem_inf_locs := [j: [hint,stat] = hint_stat_tup(j) | #hint >= 4 and hint(#hint - 3..) = "ELEM"]; -- find the ELEM statements in the current proof
suppose_locs := [j: [hint,stat] = hint_stat_tup(j) | #hint >= 7 and hint(1..7) = "Suppose"]; -- find the Suppose statements in the current proof
discharge_locs := [j: [hint,stat] = hint_stat_tup(j) | #hint >= 9 and hint(1..9) = "Discharge"]; -- find the Discharge statements in the current proof
nelocs_total +:= (nelocs := #elem_inf_locs); -- get the counts of these significant locations
ndisch_total +:= (ndlocs := #discharge_locs);
nslocs := #suppose_locs;
ok_counts := ok_discharges := 0; -- to get total OKs in ELEM and discharge inferences
-- we will count the number of successsful verifications
count := 0; -- count of number of theorem citations
--printy(["lists of theorem sections: ",elem_inf_locs,suppose_locs,discharge_locs]); stop;
-- rebuild the 'labeled_pieces' map of statement-piece labels to statement pieces (already examined during parse)
labeled_pieces := {}; -- 'labeled_pieces' maps each the label of eeach labeled conjunct of a theorem into the string form of that conjunct
for [hint,stat] = hint_stat_tup(kk) loop -- iterate over the lines of the proof
[clean_stat,lab_locs,labs] := drop_labels(stat); -- take note of the labeled portions of the statement if any
for lab = labs(j) loop -- iterate over all the conjunct labels in the statement
rspan(lab," \t:"); -- clean the label by dropping terminal ':' and whitespace
labeled_pieces(lab) := csj := clean_stat(lab_locs(j)..); -- map the label into the immediately following clause
end loop;
end loop;
--printy(["<P>labeled_pieces: ",labeled_pieces," ",lab_locs]); stop;
relev_hint_stat_tup := []; -- for checking existentially instantiated variables for duplicates
statement_stack := []; context_start_stack := []; -- statement_stack will be a stack of the statements not yet discharged
step_start_time := opcode_count()/ oc_per_ms; -- note start time for verification of first step of this proof
--printy(["starting iteration over statements of proof"]);
for [hint,stat,op_mark] = hint_stat_tup(j) loop -- iterate over the hint, stat, optimization_mark triples of the current proof,
-- determining their kind
--print("<P>[hint,stat,op_mark] ",j," ",[hint,stat,op_mark]); if j > 7 then print("<P>check_a_proof lab_locs loop: ",proofno); end if;
is_auto := false; -- assume that the statement being processed is not an 'auto' case
optimization_mark := (op_mark = 1); -- convert 1/0 to boolean
if optimization_mark then optimize_whole_theorem := false; end if;
-- optimization mark on statement subsequent to initial 'Suppose_not' end optimization-by-default
--print("<BR>optimization_mark: ",optimization_mark," ",op_mark?"UNDEF"," ",optimize_whole_theorem," ",stat);
--print(["<BR>[hint,stat]: ",[hint,stat]]);
statement_being_tested := hint + " ==> " + stat; -- keep track of the statement being tested
number_of_statement := j; -- to report verification errors
number_of_statement_theorem := theorem_id; -- to report verification errors
name_of_statement_theorem := "??????"; -- to report verification errors
step_time_list with:= ((ntime := opcode_count()/ oc_per_ms) - step_start_time);
-- note the time required for verifying the immediately preceding step
step_kind_list with:= step_kind; -- note the kind of the step
step_start_time := ntime; -- update the starting time
match(hint,"Proof+:"); match(hint,"Proof:"); span(hint," \t"); -- remove whitespace and possible "Proof:" prefix from hint
squash_details := try_harder := false; -- determine whether special function info, as for 'car', is to be retained in inference
hbup := breakup(hint,">"); --if #hbup = 1 then continue; end if; -- since there is no '>'
-- first check for inferences with parameters, which begin with --> or with (...) -->
if exists piece = hbup(k) | #piece >= 2 and piece(#piece - 1..) = "--" and #(hbup(k + 1)?"") > 1 then
-- we have an inference with parameters: note that 'piece' has become
-- everything preceding the '>' in '-->'
hbk := hbup(k + 1); span(hbk,"\t "); -- hbk is the rest of the hint, remove initial whitespace
if hbk = "" then hbk := " "; end if;
-- check for '--> T'
if hbk(1) = "T" then -- we have a theorem citation
step_kind := "T"; -- note that step is theorem citation
statement_stack with:= stat; -- always keep the result of this statement
if disable_tsubst then continue; end if;
count +:= 1; -- count the number of citations
--print("<P>optimization_mark before1: ",optimization_mark?"UNDEFINED"); if optimization_mark notin {true,false} then stop; end if;
--print("<P>labeled_pieces: ",labeled_pieces);
check_a_tsubst_inf(count,theorem_id,statement_stack,hbk,piece,j); -- check a single tsubst inference
-- ***** optimization code is contained in check_a_tsubst_inf routine
if is_auto then -- in the 'AUTO' case, just replace the stack top with the generated statement
statement_stack(#statement_stack) := auto_gen_stat;
end if;
-- check for '--> S'
elseif hbk(1) = "S" then -- we have a statement citation
step_kind := "S"; -- note that step is statement citation
statement_stack with:= stat; -- always keep the result of this statement
if disable_citation then continue; end if;
span(hbk," \t"); rspan(hbk," \t"); -- remove leading and trailing whitespace
[hbk,restr] := breakup(hbk,"()"); -- break out restriction clause if any
if (statement_cited := labeled_pieces(hbk)) = OM then -- make sure that the reference is valid
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nCitation of missing labeled statement"]);
error_count +:= 1; continue;
end if;
relev_hint_stat_tup with:= ["Stat_subst",hint,statement_cited]; -- might define a variable
check_a_citation_inf(count,statement_cited,statement_stack,hbk,restr,piece,j);
-- check a single statement citation inference
--print("<P>auto_gen_stat: ",auto_gen_stat?"undef"); -- ***** optimization code is contained in check_a_citation_inf routine
if is_auto then -- in the 'AUTO' case, just replace the stack top with the generated statement
statement_stack(#statement_stack) := auto_gen_stat;
end if;
end if;
elseif #hint >= 4 and hint(#hint - 3..) = "ELEM" then -- we have an ELEM deduction
step_kind := "E"; -- note that step is 'ELEM'
-- otherwise our standard MLSS-based procedure must be used
statement_stack with:= stat; -- stack the result of the deduction (of course, not to be used in its own deduction)
if disable_elem then continue; end if;
-- note that the stacked statements still carry their labels
parsed_stat := parse_expr(drop_labels(stat)(1) + ";")(2);
--printy(["testing: ",stat," "]);
if (ccv := compute_check(parsed_stat)) = true then -- try proof by computation as a preliminary, using just the current statement as input
--printy(["theorem established using proof by contradiction: ",stat]);
ok_counts +:= 1; ok_total +:= 1; -- count the number of successful verifications
tested_ok := true; -- note that this was correctly handled
continue; -- done with this statement
end if;
-- but if fast proof by computation doesn't work, use the standard blobbing test procedure.
conj := form_elem_conj(hint,statement_stack); -- build conjunction to use in ELEM-type deductions
if show_details then printy(["\nconj: ",conj]); end if;
test_conj(conj); -- test this conjunct for satisfiability
if not tested_ok then error_count +:= 1; continue; end if;
-- ******** try automated optimization of context ********
if optimization_mark or optimize_whole_theorem then -- optimization is desired
--print("<BR>ELEM statement_stack: "); for stat in statement_stack loop print("<BR>",stat); end loop;
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
print("<BR>The lines of context required for proof of line ",j," ",search_for_all(statement_stack)," line is: ",hint," ==> ",stat);
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
optimizing_now := save_optimizing_now; -- restore optimization flag
end if;
elseif #hint >= 7 and hint(1..7) = "Suppose" then -- a context is opening
step_kind := if j = 1 then "N" else "U" end if; -- note that step kind is "Suppose"
if j = 1 then optimize_whole_theorem := optimization_mark; end if; -- optimization mark on 'Suppose_not' marks the whole theorem
relev_hint_stat_tup with:= [if #hint >= 11 and hint(1..11) = "Suppose_not" then "Suppose_not" else "Suppose" end if,hint,stat];
-- might define a variable sneakily
context_start_stack with:= #statement_stack; -- note the last statement before the new context
statement_stack with:= if j = 1 then negated_theorem?stat else stat end if;
-- stack the first statement of the new context, or the expanded AUTO if this has occured in a Suppose_not
if negated_theorem /= OM and ":" in stat then -- we have an AUTO case. If the Suppose_not conclusion is labeled,
-- the negated theorem must replace the AUTO in the 'labeled_pieces' map.
rspan(stat," \t"); rmatch(stat,"AUTO"); rspan(stat," \t:"); span(stat," \t");
labeled_pieces(stat) := negated_theorem; -- the negated theorem replaces AUTO in the 'labeled_pieces' map
--print("<P>AUTO case: ",stat," ",negated_theorem);
end if;
if disable_discharge then continue; end if;
elseif #hint >= 9 and hint(#hint - 8..) = "Discharge" then -- a context is closing
step_kind := "D"; -- note that step kind is "Discharge"
if context_start_stack = [] then
print("<BR>Discharge without start *** ",elem_inf_locs," ",suppose_locs," ",discharge_locs," ",hint_stat_tup);
print("<BR>stopped due to: syntax error"); was_syntax_error := true; stop;
end if;
last_prior_start_m1 from context_start_stack; -- get the last statement position before the context being closed
if show_details then printy(["\nstatement_stack at discharge: ",statement_stack]); end if;
--print("<P>about to check discharge: ",hint," ",stat);
if (not disable_discharge) and check_discharge(statement_stack,last_prior_start_m1,stat,j,hint) then -- check the discharge operation
-- ***** optimization code (two units) is contained in check_a_tsubst_inf routine
ok_discharges +:= 1; -- otherwise report on failed discharge
end if; -- but always treat the discharge as if it succeeded, to try to diagnose later situations
-- the statements dropped by the discharge must also be dropped from the 'labeled_pieces' map,
-- so that they cannot be cited subsequently
stack_part_dropped := statement_stack(last_prior_start_m1 + 1 ..); -- note the range of statements dropped by the discharge
for stat_dropped in stack_part_dropped loop
[-,-,labs] := drop_labels(stat); -- find the labels in the statement
for each_lab in labs loop labeled_pieces(each_lab) := OM; end loop; -- drop the labled pieces that are discharged
end loop;
--print("<P>stack_part_dropped: ",stack_part_dropped);
statement_stack := statement_stack(1..last_prior_start_m1); -- remove all later statements
if is_auto then -- this was an AUTO case; use the AUTO_generated statemeng
statement_stack with:= auto_gen_stat;
--print("AUTO case: ",statement_stack);
else -- not an auto generated case; use the statement from the AUTO
statement_stack with:= stat; -- stack the result of the discharge
end if;
elseif #hint > 0 and hint(1) = "T" then -- theorem quotation with no citation
-- Note: if this defines a variable sneakily it must be in error
step_kind := "Q"; -- note that step kind is "quotation"
statement_stack with:= stat; -- always keep the result of this statement
if disable_tsubst then continue; end if;
count +:= 1; -- count the number of citations
--print("<P>optimization_mark before2: ",optimization_mark?"UNDEFINED"); if optimization_mark notin {true,false} then stop; end if;
--print("<P>labeled_pieces:: ",labeled_pieces);
check_a_tsubst_inf(count,theorem_id,statement_stack,hint,"",j); -- check a single tsubst inference
-- ***** optimization code is contained in check_a_tsubst_inf routine
elseif (#hint >= 9 and hint(1..9) = "Set_monot") or (#hint >= 10 and hint(1..10) = "Pred_monot") then
step_kind := "M"; -- note that step kind is "Set_monot"
statement_stack with:= stat; -- always keep the result of this statement
if disable_monot then continue; end if;
check_a_monot_inf(count,statement_stack,hint(10..),theorem_id); -- check a single Set_monot/Pred_monot inference
if tested_ok and (optimization_mark or optimize_whole_theorem) then -- ******** optimization is desired ********
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
print("<BR>Lines of context required for proof of Set/pred_monot statement ",j," namely ",
statement_stack(#statement_stack)," are ",search_for_all(statement_stack));
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
optimizing_now := save_optimizing_now; -- restore optimization flag
end if;
elseif #hint >= 7 and hint(1..7) = "ALGEBRA" then -- algebraic inference
step_kind := "A"; -- note that step kind is "ALGEBRA"
statement_stack with:= stat; -- always keep the result of this statement
if disable_algebra then continue; end if;
rspan(hint,"\t "); -- remove possible whitespace
restr := if (hint_tail := hint(8..)) = "" then "" else hint_tail + "ELEM" end if;
conj := form_elem_conj(restr,statement_stack); -- build conjunction to use in ELEM-type deductions
-- the hint passed should either be an empty string or have the form appropriate to "ELEM", namely (Stat..,Stat..,..)ELEM
check_an_algebra_inf(conj,[stat]); -- handle this single algebraic inference; make tuple to signal that on server
if tested_ok and (optimization_mark or optimize_whole_theorem) then -- ******** optimization is desired ********
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
print("<BR>The lines of context required for proof of algebra statement ",j," namely ",
statement_stack(#statement_stack)," are ",search_for_all(statement_stack));
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
optimizing_now := save_optimizing_now; -- restore optimization flag
end if;
elseif #hint >= 5 and hint(1..5) = "APPLY" then -- theory application inference
step_kind := "L"; -- note that step kind is "APPLY"
statement_stack with:= stat; -- always keep the result of this statement
if disable_apply then continue; end if;
[theory_name,apply_params,apply_outputs] := decompose_apply_hint(hint)?[];
-- the split_apply_params is a list of pairs [assumed_fcn(vars),replacement_expn]
-- the apply_outputs is the colon-and-comma punctuated string defining the functions to be generated
relev_hint_stat_tup with:= ["APPLY",apply_outputs,stat]; -- might define one or more variables
if theory_name = OM then
printy(["********** Error - llformed APPLY statement. Hint is: ",hint]); continue;
end if; -- done with this APPLY case
--printy(["APPLY: ",stat," ",[theory_name,apply_params,apply_outputs]]);
if theory_name = "Skolem" then
if check_a_skolem_inf_inproof(statement_stack,theory_name,apply_params,apply_outputs) = OM then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,"\nSkolem inference failed"]);
error_count +:= 1;
end if;
continue; -- done with this APPLY case
end if;
--print("check_an_apply_inf_inproof: [theory_name,apply_params,apply_outputs]: ",[theory_name,apply_params,apply_outputs]);
if check_an_apply_inf_inproof(theorem_id,statement_stack,theory_name,apply_params,apply_outputs) = OM then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nApply inference using ",theory_name," failed."] + extra_message?[" Sorry."]);
error_count +:= 1;
end if;
-- handle this single apply inference
-- ******** optimization code needs to be supplied for this case ********
-- check for '--> Assump'
elseif #hint >= 6 and hint(1..6) = "Assump" then -- inference by theory assumption
step_kind := "P"; -- note that step kind is "Assump"
statement_stack with:= stat; -- always keep the result of this statement
if disable_assump then continue; end if;
rspan(hint,"\t "); -- remove possible whitespace
restr := if (hint_tail := hint(6..)) = "" then "" else hint_tail + "ELEM" end if;
conj := form_elem_conj(restr,statement_stack); -- build conjunction to use in ELEM-type deductions
-- the hint passed should either be an empty string or have the form appropriate to "ELEM", namely (Stat..,Stat..,..)ELEM
check_an_assump_inf(conj,stat,theorem_id); -- handle this single assumption inference
-- ******** no optimization code needed for this case, since inference is immediate ********
elseif #hint >= 6 and hint(1..6) = "SIMPLF" then -- inference by set-theoretic simplification
step_kind := "F"; -- note that step kind is "SIMPLF"
statement_stack with:= stat; -- always keep the result of this statement
if disable_simplf then continue; end if;
ohint := hint; -- keep the original hint
rspan(hint,"\t "); -- remove possible whitespace
restr := if (hint_tail := hint(7..)) = "" then "" else "ELEM" + hint_tail end if;
-- the hint passed should either be an empty string or have the form appropriate to "ELEM", namely (Stat..,Stat..,..)ELEM
check_a_simplf_inf(statement_stack,stat,j,ohint,restr); -- handle this single simplification inference
-- ***** optimization code is contained in check_a_simplf_inf routine
elseif #hint >= 7 and hint(1..7) = "Loc_def" then
step_kind := "O"; -- note that step kind is "Loc_def"
relev_hint_stat_tup with:= ["Loc_def",hint,stat]; -- always defines a variable
statement_stack with:= stat; -- always keep the result of this statement
if disable_loc_def then continue; end if;
check_a_loc_def(statement_stack,theorem_id); -- check a single Loc_def inference
-- ******** no optimization code needed for this case, since inference is immediate ********
elseif #hint >= 5 and hint(1..5) = "EQUAL" then -- check an 'EQUAL' inference
step_kind := "="; -- note that step kind is "EQUAL"
statement_stack with:= stat; -- always keep the result of this statement
if disable_equal then continue; end if;
rspan(hint,"\t "); -- remove possible whitespace
restr := if (hint_tail := hint(6..)) = "" then "" else hint_tail + "ELEM" end if;
conj := form_elem_conj(restr,statement_stack); -- build conjunction to use in ELEM-type deductions
-- the hint passed should either be an empty string or have the form appropriate to "ELEM", namely (Stat..,Stat..,..)ELEM
check_an_equals_inf(conj,stat,statement_stack,hint,j); -- handle this single equality inference
-- ***** optimization code is contained in check_an_equals_inf routine
elseif #hint >= 9 and hint(1..8) = "Use_def(" then -- check a 'Use_def' inference
-- recursive definitions are treated in a special, limited way. if the definition
-- involved in a Use_ef inference is seen to be recursive, then the conclusion
-- of the inference should be a conjunction of terms of the form f(arg..) = right_side (or P(arg..) •eq right_side),
-- where f (resp. P) is the symbol defined. In this case the args are collected and substituted into the right-hand
-- side of the definition, and this must give the right-hand side of the corresponding term of the conclusion.
step_kind := "Z"; -- note that step kind is "Use_def"
orig_stat := stat; -- keep copy of statement
rspan(stat," \t"); auto_tag := rmatch(stat,"AUTO"); -- determine if this is an AUTO case
if auto_tag /= "" then -- this is an AUTO case
-- in AUTO cases, the hint can have one of 4 forms:
-- Use_def(symbol)
-- Use_def(symbol-->Statnnn)
-- Use_def(symbol(p_1,...,p_k))
-- Use_def(symbol(p_1,...,p_k)-->Statnnn)
rspan(stat," \t:"); span(stat," \t"); -- 'stat' is now the label attached to the AUTO, or the nullstring if none
[symb_referenced,arglist,stat_label] := decompose_ud_auto_hint(hint); -- the arglist list may be []
-- the 4 cases listed above correspond to arglist =/= [] and stat_label =/= OM.
parsed_args := []; -- will collect
if exists arg = arglist(k) | (parsed_args(k) := parse_expr(arg + ";")) = OM then -- arg list is bad, so issue diagnostic and quit
printy(["\n****** Error verifying step: ",j,", namely ",hint," ==> ",orig_stat,"\nsyntactically illformed parameter ",
k,"\nin Use_def parameter list"]);
continue; -- done with this case
end if;
--print("<P>auto_tag: ",auto_tag," ",hint," ",[symb_referenced,arglist,stat_label]);
if (def_of_symbol := get_def_of_symbol(symb_referenced,theorem_id)) = OM then
printy(["\n****** Error verifying step: ",j,", namely ",hint," ==> ",orig_stat,
"\ncannot find definition of symbol ",symb_referenced,"\nbypassing statement "]); stop;
continue; -- done with this case
end if;
-- diagnostic will have been issued within 'get_def_of_symbol'
[def_vars,def_body] := def_of_symbol; -- get the list of definition arguments and the parsed definition right side
if #parsed_args /= #def_vars then
printy(["\n****** Error verifying step: ",j,", namely ",hint," ==> ",orig_stat,
"\nmismatch between number of parameters supplied in Use_def and number of parameters in definition argument list,\nwhich is ",
def_vars]);
continue; -- done with this case
end if;
def_op := if apparently_pred(def_body) then "DOT_EQ" else "ast_eq" end if; -- is this a predicate or a function definition?
defleft := left_of_def_found?["ast_of",symb_referenced,["ast_list"] + def_vars]; -- use special lefts for DEF cases
-- this is passed as a global from 'get_symbol_def' procedure
subst_map := {[vj,parsed_args(j)]: vj = def_vars(j)}; -- map of definition variables to their replacements
reconstructed_def := [def_op,defleft,def_body]; -- the definition as an equality or equivalence
substituted_def := unparse(substitute(reconstructed_def,subst_map));
statement_stack with:= (stat + " " + substituted_def); -- the substituted definition is the conclusion of the AUTO Use_def
--print("<P>substituted_def AUTO case: ",substituted_def," ",stat," ",labeled_pieces);
if stat /= "" then -- if the AUTO is labeled, we must update 'labeled_pieces' to show the substituted_def
labeled_pieces(stat) := substituted_def;
end if;
continue; -- done with this case (AUTO case)
end if; -- end if auto_tag /= "" (AUTO case)
statement_stack with:= stat; -- always keep the result of this statement
if disable_use_def then continue; end if;
check_a_use_def(statement_stack,stat,theorem_id,hint,j); -- check a single Use_def inference
-- ***** optimization code is contained in check_a_use_def routine
-- Note: uses of recursive definitions are not optimized
else -- 'unexpected' inference operation; just stack the claimed result
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nUnknown inference mode encountered"]);
error_count +:= 1;
statement_stack with:= stat;
end if;
end loop;
--printy(["<BR>check_a_proof done iteration over the lines of the proof: ",proofno]);
-- record the time for the final step
step_time_list with:= ((ntime := opcode_count()/ oc_per_ms) - step_start_time); -- collect time for last step
step_kind_list with:= step_kind; -- collect step_kind for last step
-- check that the proof doesn't redefine variables already defined in its eveloping theory
lovd := list_of_vars_defined(theory_of_this_proof,relev_hint_stat_tup); -- get the list of variables defined locally in this proof
--printy(["lovd: ",lovd]);
noccs := {};
for x in lovd loop noccs(x) := (noccs(x)?0) + 1; end loop;
if (multip_def := {x: x in domain(noccs) | noccs(x) > 1}) /= {} then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nexistentially instantiated variables reused in proof: ",multip_def]);
printy(["Sequence of variables existentially defined is: ",lovd]);
error_count +:= 1;
end if;
-- ***** display a sucess or failure report for the proof *****
if error_count > 0 then
printy([" NOKtime: ",opcode_count()/oc_per_ms - verif_start_time,step_time_list(2..),step_kind_list]);
-- drop the dummy initial time; signal error
printy(["****** ",error_count," inference errors in this proof ****\n"]);
total_err_count +:= error_count;
else
printy([" OKtime: ",opcode_count()/oc_per_ms - verif_start_time,step_time_list(2..),step_kind_list]);
-- drop the dummy initial time
total_fully_verified_proofs +:= 1;
end if;
ok_disch_total +:= ok_discharges; -- total the number of discharges correctly handled
--printy(["<BR>check_a_proof return: ",proofno]); stop;
end check_a_proof;
-- ************* conjunction building for inference checking ***************
-- The following procedure prepares for invocation of our back-end satisfiability routines by transorming the stacked
-- (and possibly restricted) collection of prior statemnts forming the context of a desired conclusion into a conjunction
-- suitable for submission to those routines.
procedure form_elem_conj(hint,statement_stack); -- build conjunction to use in ELEM-type deductions
-- the hint passed should either be an empty string or have the form appropriate to "ELEM", namely (Stat..,Stat..,..)ELEM
return form_ed_conj(true,"ELEM",hint,statement_stack);
end form_elem_conj;
procedure form_discharge_conj(hint,statement_stack); -- build conjunction to use in Discharge-type deductions
nss := #statement_stack;
--print("<BR>form_discharge_conj: "); for s in statement_stack loop print("<BR>",unicode_unpahrse(s)); end loop; print("<BR>end stack listing: <P>");
if (fl := front_label(ssnss := statement_stack(nss))) /= "" then
match(ssnss,fl); span(ssnss,": \t"); statement_stack(nss) := fl + ": (not(" + ssnss + "))"; -- invert the last statement on the stack
else
statement_stack(nss) := "(not(" + ssnss + "))"; -- invert the last statement on the stack
end if;
return form_ed_conj(true,"Discharge",hint,statement_stack);
end form_discharge_conj;
procedure form_discharge_conj_nodiagnose(hint,statement_stack); -- build conjunction to use in Discharge-type deductions
-- version supressing 'label not seen' diagnostic messages; no inversion of final element in this case
nss := #statement_stack;
res := form_ed_conj(false,"Discharge",hint,statement_stack);
rspan(res," \t"); rmatch(res,";"); -- remove terminating semicolon
return res;
end form_discharge_conj_nodiagnose;
procedure form_ed_conj(diagnose,ed,hint,statement_stack); -- build conjunction to use in ELEM-type deductions
-- the hint passed should either be an empty string or have the form appropriate to "ELEM", namely (Stat..,Stat..,..)ELEM
ned := #ed; -- length of the keyword ('ELEM' or 'Discharge')
if #hint > ned and hint(close_posn := #hint - ned) = ")" then -- the context of the deduction is constrained
--->analysis of hints
constraint_label := hint(2..close_posn - 1); -- get the constraint_label
if (ncl := #constraint_label) > 0 and (lcl := constraint_label(ncl)) in "+*" then
if lcl = "*" then
squash_details := true; -- set the squash_details flag, forcing blobbing of functions with some known properties
elseif lcl = "+" then
try_harder := false; -- set the try_harder flag, allowing additional Davis-putnma cases to be examined
end if;
constraint_label := constraint_label(1..ncl - 1); -- and drop the indicator character just used
end if;
-- now examine the remaining constraint labels to see which of the previous proof statements are to be used
if constraint_label = "" then -- null constraint label; use just the statement
conj := conjoin_last_neg(statement_stack(#statement_stack..));
else -- non-null constraint label, restricting the set of statements to be used
constraint_labels := [x in breakup(constraint_label," \t,;*+") | x /= ""]; -- restriction clauses can be separated by commas or semicolons
conjoined_statements := []; -- will collect
--print("<P>form_ed_conj constraint_labels: ",constraint_labels," ",join(statement_stack,"<BR>"));
for constraint_label in constraint_labels loop
-- if the label is not found on the stack, somewhere below the top of the stack (which end with the negative of the desired conclusion)
-- if not (exists kk in [nss := #statement_stack - 1,nss - 1..1] |
if not (exists kk in [nss := #statement_stack - if ed = "ELEM" then 1 else 0 end if,nss - 1..1] |
front_label(statement_stack(kk)) = constraint_label) then
if diagnose then -- diagnose missing constraint labels if 'diagnose' flag is set
printy(["Constraint label ",constraint_label," appears in " + ed + " deduction ",statement_stack(nss)," but not seen as earlier prefix"]);
end if;
continue; -- bypass this ELEM deduction
end if;
if #constraint_labels = 1 then -- if there is just one constraint label,
-- use everything back to the last previous statement carrying this label
conjoined_statements := statement_stack(kk..nss);
-- take the stack items back to the constraint label
else
conjoined_statements with:= statement_stack(kk); -- collect just the stack items with the constraint labels
end if;
end loop;
conj := conjoin_last_neg(conjoined_statements with:= statement_stack(#statement_stack));
-- add the negation of the desired conclusion
end if;
else -- the context of the ELEM is not constrained; use it all
conj := conjoin_last_neg(conjoined_statements := statement_stack);
end if;
return conj;
end form_ed_conj;
-- ************* context-stack to conjuction conversion for inferencing in general ***************
procedure build_conj(citation_restriction,statement_stack,final_stat);
-- build conjunction, either of entire stack or of statements indicated by conjunction restriction
-- we collect the undischarged statements (either back to the start
-- of the the current theorem, or as constrained by the constraint labels given),
-- append the substituted theorem or statement citation and the negative of the current conclusion to it, and
-- perform a standardized ELEM check
--printy(["\nbefore substitution: ",theorem_cited," ",replacement_map]);
--printy(["\nafter substitution: ",unparse(stt),"\n",stt]); -- make substitution
-- now examine the remaining constraint label to see which of the previous proof statements are to be used
if citation_restriction /= "" then -- non-null constraint label; use just the statements indicated
constraint_labels := [x: x in breakup(citation_restriction," \t,;") | x /= ""]; -- restriction clauses can be separated by commas or semicolons
conjoined_statements := []; -- will collect
--print("<P>citation_restriction: ",citation_restriction," ",constraint_labels);
for constraint_label in constraint_labels loop
if not (exists kk in [nss := #statement_stack,nss - 1..1] | front_label(statement_stack(kk)) = constraint_label) then
printy(["Constraint label ",constraint_label," appears in theorem citation deduction ",stat," but not seen as earlier prefix"]);
continue; -- ignore this constraint label
end if;
if #constraint_labels = 1 then -- if there is just one constraint label,
-- use everything back to the last previous statement carrying this label
conjoined_statements := statement_stack(kk..nss - 1);
-- take the stack items back to the constraint label
else
conjoined_statements with:= statement_stack(kk); -- collect just the stack items with the constraint labels
end if;
--printy(["conjoined_statements: ",conjoined_statements]);
end loop;
else -- there is no restriction; take the whole earlier part of the stack
-- new section ends here
conjoined_statements := statement_stack(1..(nss := #statement_stack) - 1);
end if;
conjoined_statements with:= (" not (" + statement_stack(nss) + ")"); -- add the negation of the conclusion to be drawn
conjoined_statements with:= final_stat; -- add the string form of the substituted theorem
conjoined_statements := [drop_labels(stat)(1): stat in conjoined_statements]; -- drop all the embedded labels
return conj := "(" + join(conjoined_statements,") and (") + ");"; -- build into conjunction
end build_conj;
procedure strip_white(stg); span(stg," \t"); rspan(stg," \t"); return stg; end strip_white;
-- ************* definition checking ***************
procedure check_definitions(start_point,end_point); -- check the definition citation inferences in the indicated range
-- note that the verifier system accepts three forms of definition:
-- (i) ordinary algebraic definitions, whose right side is a setformer or other valid expression not involving the
-- symbol being defined
-- (ii) (transfinite) recursive definitions, which have the same form as ordinary algebraic definitions,
-- but with the defined functions symbol appearing on the right, in a manner subject to detailed syntactic
-- restrictions described below
-- (iii) implicit definition, by appearance as a defined symbol in an APPLY statement (this includes 'definition by Skolemization')
-- Moreover, each definition can either be global (to 'Set_theory'), global to some subtheory of 'Set_theory',
-- or local to some proof.
printing := (ostart := start_point) < -999; -- flag to control printing of diagnostic messages
start_point max := 1;
init_proofs_and_theories();
-- ensure initialization of digested_proofs, parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory
--printy(["\nStarting check_definitions and theories"]);
if definitions_handle = OM then -- read the file of definitions if they have not already been read
init_logic_syntax_analysis(); -- obligatory initialization
definitions_handle ?:= open(user_prefix + "definition_tup_file","TEXT-IN");
reada(definitions_handle,definitions_tup); close(definitions_handle);
printy(["number of symbol definitions is: ",#definitions_tup]);
-- convert the definitions_tup to a map from symbol names to the definition of the symbol in question,
-- which must be given the form of a theorem asserting an equality or equivalence.
-- we also build an auxilary map from defined symbols to the section numbers of their definitions.
symbol_def := {}; -- map from symbol names to the definition of the symbol in question
--printy(["\nStarting loop over definitions tup, length is ",#definitions_tup]);
for [th_of_def,deftext] = definitions_tup(nn) loop
--if nn >= #definitions_tup then printy(["Have checked: ",nn]); end if;
span(deftext," \t");
mayapply := #(orig_deftext := deftext) >= 5 and deftext(1..5) = "APPLY"; -- check for definition by theory application
--if nn > 148 then print("mayapply: "); end if;
if mayapply then -- if definitions by theory application then capture the fact that the output arguments are defined by the APPLY
break(deftext,"("); match(deftext,"("); output_args_list := break(deftext,")"); -- isolate the output args list
output_args_list := breakup(breakup(output_args_list,","),":");
if exists p in output_args_list | (not is_tuple(p) or #p /= 2) then
printy(["***** Syntax error in APPLY output argument list: ",orig_deftext]); continue;
end if;
things_defined := {case_change(y,"lu"): [x,y] in output_args_list};
if #things_defined /= #output_args_list then
printy(["***** Duplicated outputs in APPLY output argument list: ",orig_deftext]); continue;
end if;
for thing in things_defined loop symbol_def(th_of_def + ":" + thing) := [[],"defined_by_theory_application"]; end loop;
continue; -- done with this case
end if; -- end if mayapply
--if nn > 148 then print("here1"); end if;
orig_defstart := defstart := break(deftext,"="); -- otherwise isolate the part of the definition preceding ":="
if #deftext /= 0 and deftext(1) in "¥•" then -- workaround ***********
-- defstart2 := match(deftext,"•"); defstart3 := break(deftext,"="); defstart +:= (defstart2 + defstart3); -- Mac version
defstart2 := match(deftext,"¥"); defstart3 := break(deftext,"="); defstart +:= (defstart2 + defstart3);
end if;
mustco := rmatch(defstart,":");
if mustco = "" then if printing then printy(["***** Definition error. Def start is ",orig_defstart," orig is ",deftext]); end if; continue; end if;
rspan(defstart," \t");
pieces := segregate(defstart,"Def(");
if exists piece = pieces(j) | #piece > 3 and piece(1..4) = "Def(" then
cleandef := "" +/ pieces(j..) + " :" + deftext;
else
symbdef := rbreak(defstart," \t"); cleandef := symbdef + " :" + deftext;
end if;
--if th_of_def = "wellfounded_recursive_fcn" then if printing then printy(["cleandef: ",cleandef]); end if; end if;
if (deftree := parse_expr(cleandef + ";")) = OM then
if printing then printy(["***** Syntax error in definition: ",cleandef]); end if; continue;
end if;
[defop,left_part,right_part] := deftree(2);
if defop /= "ast_assign" then
if printing then printy(["***** Error - definition does not have obligatory 'assignment' form: ",cleandef]); end if;
continue;
end if;
if is_string(left_part) then
symbdef := left_part; arglist := [];
elseif left_part(1) = "ast_pow" then
symbdef := "AST_POW"; arglist := left_part(2..);
elseif left_part(1) = "ast_domain" then
symbdef := "AST_DOMAIN"; arglist := left_part(2..);
elseif left_part(1) = "ast_range" then
symbdef := "AST_RANGE"; arglist := left_part(2..);
elseif left_part(1) /= "ast_of" then
if printing then printy(["***** Error - definition does not have obligatory 'function assignment' form: ",cleandef]); end if;
continue;
else
[-,symbdef,arglist] := left_part;
if symbdef = "DEF" then -- defining a binary infix operator
symbdef := case_change((al2 := arglist(2))(1),"lu");
arglist := if symbdef /= "TILDE_" then al2(2..) else [al2(2)] + al2(3)(2..) end if;
--print("<BR>al2: ",al2);
left_of_def(th_of_def + ":" + symbdef) := al2;
if symbdef = "AST_ENUM_TUP" then left_of_def(th_of_def + ":[]") := al2; end if;
if symbdef = "AST_NELT" then left_of_def(th_of_def + ":#") := al2; end if;
if symbdef = "TILDE_" then left_of_def(th_of_def + ":~") := al2; end if;
if symbdef = "AT_" then left_of_def(th_of_def + ":@") := al2; end if;
else
arglist := arglist(2..);
end if;
end if; -- end if is_string
-- now we (re-)check that no symbol has been defined twice in a given theory or any of its ancestors,
-- and that all the functions and free variables on the right-hand side of the definition are either
-- previously defined symbols or function arguments. function arguments cannot appear as function symbols.
-- function arguments cannot appear twice, and must be simple strings.
-- if the defined symbol itself appears in the right-hand side, it must appear as a function symbol
-- or infix operator, and it is then subject to the special checks that apply to recursive definitions
if symbol_def(th_of_def + ":" + symbdef) /= OM then
if printing then printy(["***** Error - symbol: ",symbdef," has previous definition in same theory"]);
printy(["Will ignore new definition: ",cleandef]); end if;
continue;
end if; -- check also that there is no definition in ancestral theory
ancestor_theories := []; cur_th := th_of_def;
while (cur_th := parent_of_theory(cur_th)) /= OM loop -- construct the chain of ancestor theories
ancestor_theories with:= cur_th;
end loop;
if exists anc_th in ancestor_theories | symbol_def(anc_th + ":" + symbdef) /= OM then
if printing then printy(["***** Error - symbol: ",symbdef," has previous definition in ancestor theory ",anc_th]);
printy(["Will ignore new definition: ",cleandef]); end if;
continue;
end if; -- check also that there is not definition in ancestral theory
if exists x in arglist | not is_string(x) then
if printing then printy(["***** Error - argument symbols: ",{x in arglist | not is_string(x)}," are not simple variables ",left_part]);
printy(["Will ignore new definition: ",cleandef]); end if;
continue;
end if;
if exists x in arglist | get_symbol_def(x,th_of_def) /= OM then
if printing then printy(["***** Error - argument symbols: ",{x in arglist | symbol_def(x) /= OM}," have previous definition"]);
printy(["Will ignore new definition: ",cleandef]); end if;
continue;
end if;
if #arglist /= #(set_of_args := {x: x in arglist}) then
if printing then printy(["***** Error - repeated argument symbol in function definition: "]);
printy(["Will ignore definition: ",cleandef]); end if;
continue;
end if;
[free_variables,function_symbols] := find_free_vars_and_fcns(right_part);
--if printing then printy(["[free_variables,function_symbols]: ",free_variables," ",function_symbols]); end if;
--if printing then printy(["assumps_and_consts_of_theory: ",assumps_and_consts_of_theory(th_of_def)," ",th_of_def," symbdef: ",symbdef]); end if;
free_variables := {x in free_variables | get_symbol_def(x,th_of_def) = OM};
-- drop defined constants from the set of free variables
assumed_symbols_of_th_of_def := {breakup(case_change(x,"lu"),"(")(1): x in (acctd := assumps_and_consts_of_theory(th_of_def))(1)};
-- get the set of assumed symbols of theory in which definition appears
--printy(["acctd: ",acctd]);
defconsts_in_ancestors := {"O","U","V","R","R_0","R_1"}; -- ****************** Temporary - fix ********* *********
if (badfrees := free_variables - set_of_args - assumed_symbols_of_th_of_def - defconsts_in_ancestors) /= {} then
if true or printing then
printy(["***** Error - free variable on right of definition that is not definition argument: ",badfrees," right-hand part of definition is: ",unparse(right_part)]);
--printy(["assumed_symbols_of_th_of_def: ",assumed_symbols_of_th_of_def]);
-- printy(["Will (not really) ignore definition: ",cleandef]); total_err_count +:= 1;
null;
end if;
continue; -- ******** disable ******** until assumed variables of theories are handled properly
end if; -- end if badfrees
if (badfsymbs := {f in function_symbols | get_symbol_def(f,th_of_def) = OM
and f /= symbdef and #f > 4 and f(1..4) /= "ast_"} - assumed_symbols_of_th_of_def) /= {} then
-- if true or printing then printy(["***** Error - undefined function symbols ",badfsymbs," are used on right of definition of ",symbdef]);
-- printy(["Will accept definition, but only provisionally : ",cleandef]); end if; total_err_count +:= 1;
null;
end if; -- end if badfsymbs
if symbdef in function_symbols then -- definition is recursive
if not recursive_definition_OK(symbdef,arglist,right_part) then -- recursive definition does not have valid structure
if true or printing then
printy(["***** Error - recursive definition of ",symbdef,"(",join(arglist,","),")"," does not have valid structure. "]);
printy(["Will ignore this definition"]); end if;
total_err_count +:= 1;
continue;
end if;
--printy(["recursive definition::::::: ",cleandef]);
end if;
--if th_of_def = "wellfounded_recursive_fcn" then printy(["********* on record: ",th_of_def + ":" + symbdef," ",right_part]); end if;
symbol_def(th_of_def + ":" + symbdef) := [arglist,right_part];
-- place the new definition on record
end loop; -- end for [th_of_def,deftext] = definitions_tup(nn) loop
end if; -- end if definitions_handle = OM
--printy(["number of symbol definitions in specified range is: ",#symbol_def]);
for hint_stat_tup = digested_proofs(proofno) loop -- get the tuple of hints and statements for the proof
if proofno < start_point then continue; end if;
if proofno > end_point or hint_stat_tup = OM then exit; end if;
--printy(["hint_stat_tup: ",hint_stat_tup]);
-- separate out the section number of the theorem and use it to get the theorem id
theorem_section_number := abs(hint_stat_tup(1)) - 1; -- take absolue value to allow or role of sign as use_proof_by_structure flag
hint_stat_tup := hint_stat_tup(2..);
-- now iterate over all the 'Use_def' statements within the proof.
-- For each one of them, first verify that the definition precedes its use,
-- and then verify that the use produces the conclusion asserted.
end loop;
if ostart < -9900 then return; end if; -- do not process APPLYs if was called from APPLY
def_in_theory := {breakup(x,":"): x in domain(symbol_def)}; -- maps each theory into the set of symbols defined in theory
-- but need to add in the symbols assumed by the theory, plus all the symbols defined by theorem-level APPLYs
-- in the theory
printy(["definition analysis complete, starting theory analysis: "]);
for [theorie,assump_and_consts] in assumps_and_consts_of_theory, symb in assump_and_consts(1) | "•" notin symb and "¥" notin symb and "(" notin symb loop
def_in_theory with:= [theorie,symb];
end loop;
def_in_theory := {[x,def_in_theory{x}]: x in domain(def_in_theory)}; -- same, as set-valued map
--printy(["ostart: ",ostart]);
for [th_of_def,deftext] = definitions_tup(nn) loop -- now we iterate once more over the definitions, handling the APPLYs
span(deftext," \t");
mayapply := #(orig_deftext := deftext) >= 5 and deftext(1..5) = "APPLY"; -- check for definition by theory application
if not mayapply then continue; end if; -- handling the APPLYs only
[theory_name,apply_params,apply_outputs,resulting_thm] := decompose_apply_hint(deftext)?[];
--printy(["APPLY: ",[theory_name,apply_params,apply_outputs,resulting_thm]]);
if apply_outputs = OM then continue; end if;
if (split_apply_outputs := get_apply_output_params(apply_outputs,deftext)) = OM then continue; end if;
-- decompose and validate apply_outputs, returning them as a tuple of pairs
-- if apply_outputs are not valid, then bypass remainder of processing for this definition
def_in_theory(th_of_def) := (def_in_theory(th_of_def)?{}) + {symb: [-,symb] in split_apply_outputs};
-- add the symbols defined by theory application to the set of defined symbols of a theory
-- otherwise we will also have a collection of syntactically validated substitution expressions.
-- check that substitution expressions have been provided for all the assumed functions of the theory and only these
--printy(["theory_name: ",theory_name]);
number_of_statement := "theory " + theory_name + "-related"; -- dummies for subsequent failure diagnostic
number_of_statement_theorem := "APPLY statement";
statement_being_tested := resulting_thm;
if theory_name /= "Skolem" then -- top-level non-Skolem inference
if check_an_apply_inf(resulting_thm,theory_name,apply_params,apply_outputs) = OM then
printy(["<BR>resulting_thm,theory_name,apply_params,apply_outputs:<BR>",resulting_thm,theory_name,apply_params,apply_outputs]);
printy(["******* attempt to deduce theorem by top-level APPLY inference failed: ",resulting_thm]);
else
null; -- printy(["Deduction of ",resulting_thm," by APPLY inference successful...."]);
end if;
else -- top-level Skolem inference
if (theorem_to_verify := theorem_map(resulting_thm)) = OM then
printy(["******* Illformed thorem name in top-level Skolem inference: ",resulting_thm]);
end if;
if check_a_skolem_inf(resulting_thm,theorem_to_verify,apply_params,apply_outputs) = OM then
printy(["******* Skolem inference failed: ",resulting_thm]);
else
null; --printy(["Deduction of ",resulting_thm," by top-level Skolem inference successful...."]);
end if;
end if;
end loop;
printy(["Checking assumps_and_consts_of_theory, length is : ",#assumps_and_consts_of_theory]);
--printy(["def_in_theory: ",def_in_theory]); stop;
-- Now check the assumed functions and assumptions of all theories.
-- The assumed functions must be wellformed and have simple, non-repeating arguments.
-- The assumptions must be well-formed formulae without free variables, and must involve no functions or constants
-- other than those assumed in the theory or assumed or defined in its parent theories.
assumed_symbs := {}; -- will collect set of assumed symbols
debug_n := -1;
for [assumed_fcns,assumps] = assumps_and_consts_of_theory(th) | th /= "Set_theory" loop
--if (debug_n +:= 1) >= 52 then printy(["have checked ",debug_n," #assumed_fcns: ",#assumed_fcns]); end if;
--if debug_n >= 52 then printy(["before check_an_external_theory ",debug_n]); end if;
if th(1) = "_" then check_an_external_theory(th,assumed_fcns,assumps); continue; end if;
--if debug_n >= 52 then printy(["after check_an_external_theory ",debug_n]); stop; end if;
for af in assumed_fcns loop
if (afp := parse_expr(af + ";")) = OM then
printy(["******* illformed assumed function in THEORY ",th,": ",af]);
total_err_count +:= 1;
continue;
end if;
if debug_n >= 53 then printy(["at is_string ",debug_n]); stop; end if;
afp := afp(2); -- drop the 'list' prefix
if is_string(afp) then -- assumed constant
assumed_symbs(th) := (assumed_symbs(th)?{}) with afp; -- associate assumed constant with theory
elseif (lop := afp(1)) /= "ast_of" then -- check to see if the assumed function has infix form
if #lop < 5 or lop(1..4) /= "DOT_" then -- error, since must have simple function
printy(["******* illformed assumed function in THEORY ",th,": ",af," is not a simple function"]);
total_err_count +:= 1;
continue;
elseif #afp /= 3 then -- we must have an infix operator, but don't
printy(["******* illformed assumed function in THEORY ",th,": ",af," is not an infix function"]);
total_err_count +:= 1;
continue;
elseif not (is_string(a1 := afp(2)) and is_string(a2 := afp(3))) then -- we must have a simple operator, but don't
printy(["******* illformed assumed function in THEORY ",th,": ",af," does not have simple variables as arguments"]);
total_err_count +:= 1;
continue;
elseif a1 = a2 then -- we must have a simple operator, but don't
printy(["******* illformed assumed function in THEORY ",th,": ",af," has two identical arguments"]);
total_err_count +:= 1;
continue;
end if;
assumed_symbs(th) := (assumed_symbs(th)?{}) with lop; -- associate assumed infix operator with theory
continue;
elseif exists x in (fcn_args := afp(3)(2..)) | (not is_string(x)) then -- error, since must have simple arguments
printy(["******* illformed argument of assumed function in THEORY ",th,": ",af," has a compound argument"]);
total_err_count +:= 1;
continue;
elseif #fcn_args /= #{x: x in fcn_args} then -- error, since must have non-repeated arguments
printy(["******* assumed function in THEORY ",th,": ",af," has a repeated arguments"]);
total_err_count +:= 1;
continue;
end if;
assumed_symbs(th) := (assumed_symbs(th)?{}) with afp(2); -- associate assumed function symbol with theory
-- at this point the list of assumed functions of the theory has passed all checks
-- now we check that each theory assumption is well-formed, and fully quantified,
-- and involves no symbol not either assumed in the theory, or defined or assumed
-- in one of its ancestor theories.
-- Note that every ancestor theory of a theory must have been declared berore the theory.
--->working_definitions
ancestor_theories := []; -- find the ancestor theories of the theory being applied
theory_nm := th;
while (parent_theory := parent_of_theory(theory_nm)) /= OM loop
ancestor_theories := [parent_theory] + ancestor_theories; theory_nm := parent_theory;
end loop;
for assump in assumps loop
if (assp := parse_expr(assump + ";")) = OM then
printy(["******* illformed assumption in THEORY ",th,": ",assump]);
total_err_count +:= 1;
continue;
end if;
assp := assp(2); -- drop the 'list' prefix
freevs := find_free_vars(assp); -- find the free variables of the assumption
end loop;
--printy(["ancestor_theories: ",th," ",ancestor_theories]);
end loop;
end loop;
printy(["\n<BR>Done checking definitions and theories"]);
--printy(["assumed_symbs for theories: ",assumed_symbs]); -- report on assumed symbs for theories if desired
end check_definitions;
-- ************* Checking of recursive definitions ***************
procedure recursive_definition_OK(symbdef,arglist,right_part); -- check a recursive definition for syntactic validity
-- for a recursive definition to be syntactically valid, the function 'symbdef' being defined must have
-- at least one 'properly restricted' argument. That is, there must exist an argument position (e.g. the first)
-- such that every right-hand occurrence of symbdef(y,..) is within a scope in which y is bound by an iterator
-- of the form 'y in x', where x is the definition argument in the same (e.g. the first) position.
-- theories which support more general forms of recursive definition can relax this restriction.
var arg_positions_OK; -- global used during execution of interior workhorse:
-- remaining set of argument positions that are properly restricted.
--printy(["recursive_definition_OK: ",symbdef," ",arglist," ",right_part]);
if (arg_positions_OK := {1..#arglist}) = {} then return false; end if; -- initially assume that all positions are OK
recursive_definition_OK_in(right_part,{}); -- call the inner workhorse, which will prune the acceptable set of positions
--printy(["arg_positions_OK: ",arg_positions_OK]);
return arg_positions_OK /= {}; -- definition is OK if any acceptable argument positions remain
procedure recursive_definition_OK_in(node,var_bindings); -- check a recursive definition for syntactic validity (inner workhorse)
if is_string(node) then return; end if; -- down to a simple variable, so nothing more to do
case (ah := abbreviated_headers(node(1)))
when "and","or","==","+","-","{-}","in","notin","/==","=","/=","[]","[-]","{.}","itr","Etr","incs","incin","imp","*","->","not","null" =>
-- ordinary operators; just descend recursively to check the operator arguments
for sn in node(2..) loop recursive_definition_OK_in(sn,var_bindings); end loop;
when "arb","range","domain" => -- ordinary operators; just descend recursively to check the operator arguments
for sn in node(2..) loop recursive_definition_OK_in(sn,var_bindings); end loop;
when "()" => -- this is the case of functional and predicate application; the second variable is a reserved symbol, not a set
if node(2) = symbdef then -- we have an occurrence of the symbol being defined.
-- examine its arguments and prune out all those which are not properly restricted
--printy(["node(2) = symbdef: ",node," ",symbdef]);
ap_OK_copy := arg_positions_OK; -- we need only examine argument positions which are still candidiates for being
-- the position to which recursion is being applied.
for n in ap_OK_copy loop -- look at positions which are still candidates
-- if the argument in the position being examined is not a simple variable, the position is unsuitable for recursion
if not is_string(param_n := node(3)(n + 1)) then arg_positions_OK less:= n; end if;
-- if the binding of the simple variable in the position being examined does not restrict it to
-- membership in the left-hand variable originally in this position or if that left-hand variable
-- is no longer free, the position is unsuitable for recursion
if var_bindings(param_n) /= ["in",aln := arglist(n)] or var_bindings(aln) /= OM then arg_positions_OK less:= n; end if;
end loop;
end if;
--printy(["var_bindings: ",var_bindings," node: ",node," arg_positions_OK: ",arg_positions_OK]);
-- also examine the function arguments recursively
for sn in node(3..) loop recursive_definition_OK_in(sn,var_bindings); end loop;
when "{}","{/}","EX","ALL" => var_bindings +:= find_quantifier_bindings(node); -- setformer or quantifier; note the bound variables
for sn in node(2..) loop recursive_definition_OK_in(sn,var_bindings); end loop; -- check the operation arguments
when "@" => -- function composition
for sn in node(2..) loop recursive_definition_OK_in(sn,var_bindings); end loop; -- check the operation arguments
when "if" => -- conditional expression
for sn in node(2..) loop recursive_definition_OK_in(sn,var_bindings); end loop; -- check the operation arguments
otherwise => -- additional infix and prefix operators
all_fcns with:= node(1);
for sn in node(2..) loop recursive_definition_OK_in(sn,var_bindings); end loop; -- check the operation arguments
end case;
end recursive_definition_OK_in;
end recursive_definition_OK;
-- ************* removal of internal labels from proof lines ***************
-- Proof lines as found in an original scenario file, and also in its digested form, can contain labels of the form
-- Statnnn, which are used for citation of statements (or statement parts), and for restring proof contexts
-- when needed to improve inference efficieny. Before statements are parsed these labels must be removed
-- by the following routine.
procedure drop_labels(stg); -- finds location of Statnnn: in string, if any.
-- These labels are dropped, and positions of first characters are returned
-- drop_locdef(stg); -- drop "Loc_def:" if it appears
stg := join(breakup(stg,":"),": "); -- put a blank after each colon
tup := segregate(stg,"Stabcdefghijklmnopqrstuvwxyz:0123456789");
newt := lablocs := labs := [];
for sect = tup(j) loop
if #sect > 5 and sect(1..4) = "Stat" and sect(#sect) = ":" then -- we have a label; record its location
lablocs with:= 1 +/ [#piece: piece in newt]; --[#piece: piece in tup(1..j - 1)];
labs with:= sect;
else -- collect the piece of the formula outside labels
newt with:= sect;
end if;
end loop;
--print("drop_labels: ",["" +/ newt,lablocs,labs],tup);
return ["" +/ newt,lablocs,labs];
end drop_labels;
-- ************************************************************
-- ************* Interface to the MLSS routines ***************
-- ************************************************************
-- after a conjunction intended for satisfiability testing has been built by the routines shown above
-- it is prepared for submission to the MLSS decision routines of the back end by the following procedure.
-- this first rechecks the syntax of the conjunction, and sets flags to control blobbing coarseness and time allowed for
-- inferencing. The formula is then blobbed, following which its blobbed form is 'boiled down'
-- to eliminate clauses which can obviouusly contribute noting to a satisfiablity test,
procedure test_conj(conj); -- test a conjunct for satisfiability
starting_occ := opcode_count(); -- opcode count at start of inferencing
--printy(["Parsing: ",conj]); printa(debug_handle := open("debug_file","TEXT-OUT"),"Parsing: ",conj); close(debug_handle);
debug_conj := conj; abend_trap := show_conj; -- set a printout to trigger in case this routine aborts
-- debug_conj2 is used to indicate the conjunct being tried when inferences are getting slow
if (tree := if is_string(conj) then parse_expr(conj) else conj end if) = OM then
print("STOPPED by syntax error in conj: ",conj); stop;
end if;
debug_conj := "Not a parse problem";
if tree = OM then
printy(["PARSE ERROR: ",conj]); printy([statement_stack]);
tested_ok := false;
cycles_inferencing +:= (opcode_count() - starting_occ); -- accumulate the inferencing cycles
return;
end if;
--printy(["Done Parsing: ",allow_blob_simplify]);
if is_string(conj) then tree := tree(2); end if; -- if parameter was string then take component 2 of parsed form (to drop 'ast_list')
-- debug_conj2 := unicode_unpahrse(tree);
if squash_details then allow_unblobbed_fcns := false; end if; -- temporarily drop global flag if details not wanted in inference
--print("<BR>about to blob: ",tree);
blobbed_tree := if allow_blob_simplify then blob_tree(tree) else blob_tree_in(tree) end if;
-- blob the conjunction, with last element reversed; for equals inferences don't restart blob counter and map
--print("<BR>blobbed_version: ",blobbed_tree);
allow_unblobbed_fcns := true; -- restore global flag
kbl := branches_limit; if try_harder then branches_limit := 40000; end if; -- set larger branches_limit for model testing
start_time := time();
blobbed_tree := boil_down_blobbed(otree := blobbed_tree); -- Added March 26,2002: simplify before further processing
--print("<BR>boil_down_blobbed: ",blobbed_tree);
formula_after_blobbing := otree; -- to generate report when inference is slow
formula_after_boil_down := blobbed_tree; -- to generate report when inference is slow
if show_details then -- print out details of an inference being attempted, for debugging purposes.
printy(["conj: ",conj]);
printy(["\notree: ",unicode_unpahrse(otree)]);
printy(["\nblobbed_tree: ",unicode_unpahrse(blobbed_tree)]);
printy(["\notree after step 1 of first simplification: ",unicode_unpahrse(simp3)]);
printy(["\notree after step 2 of first simplification: ",unicode_unpahrse(simp2)]);
printy(["\notree after first full simplification: ",unicode_unpahrse(simp1)]);
printy(["\nnumber of occurrences of blobs: ",{[x,y] in num_occurences_of | y /= 999999}]);
end if;
tested_ok := false; -- assume not OK. This flag will be set to 'true' by the back-end satisfiability testing
-- routines if the inferece being attempted succeeds. If an inference is abandoned,
-- those routines will return the string value "???????". Given this inforation,
-- we can report on the success or failure of th inference.
--print("<BR>testing model_blobbed: ",blobbed_tree);
if (mb := model_blobbed(blobbed_tree)) = OM then -- no model, since verification succeeds
-- first test to see if inference succeeds without use of proof by structure
ok_counts +:= 1; ok_total +:= 1; -- count the number of successful verifications
tested_ok := true;
-- if not we try to use of proof by structure, if the use_proof_by_structure falg is set
elseif use_proof_by_structure then -- conjoin extra proof_by_structure clauses to the basic conjunction, see if inference succeeds this way
-- *******************************************************
-- *********** interface to proof_by_structure ***********
-- *******************************************************
addnal_conjs := {[x,y] in extract_relevant_descriptors(otree := tree) | y /= just_om_set};
-- extract additional clauses from the initial conjunction, which represents all the clauses available at this stge of the proof.
if addnal_conjs /= {} and (addnal_assertions := get_assertions_from_descriptors(addnal_conjs)) /= "" then
addnal_assertions_tree := parse_expr(addnal_assertions + ";")(2);
tree := ["ast_and",tree,addnal_assertions_tree]; -- conjoin the additional assertions supplied by proof_by_structure
end if;
debug_conj2 := unicode_unpahrse(tree);
--print("<P>debug_conj2: ",debug_conj2,"<P>addnal_conjs: ",addnal_conjs,"<P>addnal_assertions: ",addnal_assertions,"<P>addnal_assertions_tree: ",addnal_assertions_tree,"<P>tree: ",tree);
if squash_details then allow_unblobbed_fcns := false; end if; -- temporarily drop global flag if details not wanted in inference
blobbed_tree := if allow_blob_simplify then blob_tree(tree) else blob_tree_in(tree) end if;
-- blob the conjunction, with last element reversed; for equals inferences don't restart blob counter and map
allow_unblobbed_fcns := true; -- restore global flag
kbl := branches_limit; if try_harder then branches_limit := 40000; end if; -- set larger branches_limit for model testing
start_time := time();
blobbed_tree := boil_down_blobbed(otree := blobbed_tree); -- Added March 26,2002: simplify before further processing
formula_after_blobbing := otree; -- to generate report when inference is slow
formula_after_boil_down := blobbed_tree; -- to generate report when inference is slow
if show_details then -- print out details of an inference being attempted, for debugging purposes.
printy(["conj: ",conj]);
printy(["\notree: ",unicode_unpahrse(otree)]);
printy(["\nblobbed_tree: ",unicode_unpahrse(blobbed_tree)]);
printy(["\notree after step 1 of first simplification: ",unicode_unpahrse(simp3)]);
printy(["\notree after step 2 of first simplification: ",unicode_unpahrse(simp2)]);
printy(["\notree after first full simplification: ",unicode_unpahrse(simp1)]);
printy(["\nnumber of occurrences of blobs: ",{[x,y] in num_occurences_of | y /= 999999}]);
end if;
tested_ok := false; -- assume not OK. This flag will be set to 'true' by the back-end satisfiability testing
-- routines if the inferece being attempted succeeds. If an inference is abandoned,
-- those routines will return the string value "???????". Given this inforation,
-- we can report on the success or failure of th inference.
if (mb := model_blobbed(blobbed_tree)) = OM then -- no model, since verification succeeds
ok_counts +:= 1; ok_total +:= 1; -- count the number of successful verifications
tested_ok := true;
end if;
end if;
if tested_ok then -- one of the two preceding verification attempts succeeded, so there is nothing more to do
null;
elseif show_error_now then -- else verification failed, so may need to diagnose
if is_string(mb) and mb(1..7) = "???????" then -- the inferecne was abandoned, so diagnose as abandoned
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested, "\nAbandoned attempt to derive MLS contradiction\n",
"\n<P>reduced blobbed statement was:\n\n\t",unicode_unpahrse(blobbed_tree)]);
-- the statement stack from which the blob being tested was derived
printy(["<P>Statement stack with negated last statement is: "]);
for x in conjoined_statements?["Stack not available"] loop printy([unicode_unpahrse(parse_expr(drop_labels(x)(1) + ";"))]); end loop;
if addnal_conjs /= {} and addnal_assertions?"" /= "" then printy(["Additional clauses supplied by proof_by_structure: ",addnal_assertions]); end if;
if not show_details then -- print additional details concerining failure
printy(["\nblobbed formula tested for satisfiability:\n",unicode_unpahrse(otree)]); -- the blob being tested
printy(["\nblobed formula after first full simplification:\n",unicode_unpahrse(simp1)]); -- the blob after simplification
printy(["\nconj:\n",if is_tuple (conj) then unicode_unpahrse(conj) else conj end if]);
printy(["\nblob_name map is:\n",blob_name]);
end if;
elseif not optimizing_now then -- otherwise issue an 'inference failed' diagnosis, provided not optimizing
if number_of_statement_theorem = OM then print("****** Error verifying step:"); return; end if;
-- allow for external testing where less output is wanted
printy(["\n****** Error verifying step: " + number_of_statement?"number_of_statement" + " of theorem "
+ number_of_statement_theorem?"number_of_statement_theorem" + "\n<P>\t", statement_being_tested,"\n<P>Attempt to derive MLS contradiction has failed\n",
"\n<P>reduced blobbed statement was:\n\n\t", unicode_unpahrse(blobbed_tree)]);
-- the statement stack from which the blob being tested was derived
printy(["<P>Statement stack with negated last statement is:\n"]);
for x in conjoined_statements?["Stack not available"] loop printy([unicode_unpahrse(parse_expr(drop_labels(x)(1) + ";"))]); end loop;
-- for x in conjoined_statements?["Stack not available"] loop printy([unparse(parse_expr(x + ";"))]); end loop;
--print("addnal_conjs: ",addnal_conjs," otree: ",otree," statement_stack: ",statement_stack);
if addnal_conjs /= {} and addnal_assertions?"" /= "" then printy(["Additional clauses supplied by proof_by_structure: ",addnal_assertions," vars_to_descriptors: ",vars_to_descriptors," type: ",type(vars_to_descriptors)," debug_extract: ",debug_extract," debug_conj_tree: ",debug_conj_tree,""]); end if;
if not show_details then -- print additional details concerining failure
printy(["\nblobbed formula tested for satisfiability:\n",unicode_unpahrse(otree)]); -- the blob being tested
printy(["\nblobbed formula after first full simplification:\n",unicode_unpahrse(simp1)]); -- the blob after simplification
printy(["\nconj:\n",if is_tuple (conj) then unicode_unpahrse(conj) else unicode_unpahrse(parse_expr(conj)) end if]);
printy(["\nblob_name map is:\n",blob_name]);
end if;
end if;
end if;
branches_limit := kbl; -- restore default branches_limit for model testing
cycles_inferencing +:= (opcode_count() - starting_occ); -- accumulate the inferencing cycles
end test_conj;
procedure show_conj(); printy(["conj being tested: ",debug_conj]); end show_conj;
-- abend routine for test_conj, used to debug the situation if an attempted inference crashes.
-- the following small routines are used during conjuction building.
procedure conjoin_last_neg(stat_tup); -- invert the last element of a collection of clauses and rewrite using 'ands'
stat_tup := [drop_labels(stat)(1): stat in stat_tup];
stat_tup(#stat_tup max 1) := neglast := "(not(" + stat_tup(#stat_tup max 1)?"false" + "))";
return join(breakup("(" + join(stat_tup,") and (") + ");","&")," and ");
end conjoin_last_neg;
procedure conjoin_last_neg_nosemi(stat_tup);
-- invert the last element of a collection of clauses and rewrite using 'and'
stat_tup := [drop_labels(stat)(1): stat in stat_tup];
stat_tup(#stat_tup) := "(not(" + stat_tup(#stat_tup) + "))";
return join(breakup("(" + join(stat_tup,") and (") + ")","&")," and "); -- no terminating semicolon
end conjoin_last_neg_nosemi;
procedure conjoin_last(stat_tup); -- rewrite a collection of clauses and using 'ands'
stat_tup := [drop_labels(stat)(1): stat in stat_tup];
return join(breakup("(" + join(stat_tup,") and (") + ");","&")," and "); -- include terminating semicolon
end conjoin_last;
procedure collect_conjuncts(tree); -- extract all top-level terms from a conjunction
--printy([tree]);
if is_string(tree) then return [tree]; end if;
[n1,n2,n3] := tree;
return if n1 = "ast_and" then collect_conjuncts(n2) + collect_conjuncts(n3) else [tree] end if;
end collect_conjuncts;
-- the following small routine is used during equality inferencing
procedure collect_equalities(tree); -- extract all top-level collect_equalities from a conjunction
--printy([tree]);
if is_string(tree) then return []; end if;
[n1,n2,n3] := tree;
return if n1 = "ast_and" then collect_equalities(n2) + collect_equalities(n3) elseif n1 = "ast_eq" or n1 = "DOT_EQ" then [tree] else [] end if;
end collect_equalities;
-- ******* disable/enable checking of particular classes of proofschecking *******
-- the following routine,intended for use during initial system development and testing,
-- accepts a comma-delimited string argument, which it uses to disable particular inference classes
-- especially before they have been implemented. Disabled inferences will always appear to succeed.
procedure disable_inferences(stg); -- disables and re-enables the various kinds of inferences
-- include a '*' in the list to disable by default
list := breakup(stg,",");
-- first enable all the inference modes
ntv := not(tv := if "*" in list then true else false end if);
disable_elem := disable_tsubst := disable_algebra := disable_simplf := tv;
disable_discharge := disable_citation := disable_monot := disable_apply := tv;
disable_loc_def := disable_equal := disable_use_def := disable_assump := tv;
for item in list loop
case item
when "elem" => disable_elem := ntv;
when "tsubst" => disable_tsubst := ntv;
when "algebra" => disable_algebra := ntv;
when "simplf" => disable_simplf := ntv;
when "discharge" => disable_discharge := ntv;
when "citation" => disable_citation := ntv;
when "monot" => disable_monot := ntv;
when "apply" => disable_apply := ntv;
when "loc_def" => disable_loc_def := ntv;
when "equal" => disable_equal := ntv;
when "use_def" => disable_use_def := ntv;
when "assump" => disable_assump := ntv;
end case;
end loop;
end disable_inferences;
-- *********************************************************************
-- ** Library of routines for checking specific classes of inferences **
-- *********************************************************************
-- Each of the routines in the following subsectio of this file handles a particular kind of inference by analyzing
-- it hint and body, converting these and the context of the statement to an appropriate conjuction,
-- and passing this to the routines seen above for ulimate transmission to the satisfiability tsting procedures
-- of the verifier system back end.
-- ************* Discharge inference checking ***************
-- The following inferencing routine is a bit untypical since it first checks that a discharge inferene succeeds,
-- in that its full context is contradictory, and if this is so must also check that the stated conclusion
-- of the discharge is an elementary context of the negative of the last statement supposed and of the
-- statements which precede that statement in the available context.
-- Note that the context-stack manipulation needed in connectioon with the suppose-discharge mechanism
-- managed o\not by this routien by by code found at its point of call.
procedure check_discharge(statement_stack,prior_suppose_m1,stat_in_discharge,discharge_stat_no,hint);
-- checks a discharge operation; see comment below
--print("<P>Checking discharge of: ",stat_in_discharge," ",statement_stack);
-- First we conjoin the collection of statements that should be contradictory at a Discharge.
-- This is the set of all statements stacked up to the point just prior to the Discharge statement itself.
-- Then we check that the statement in the Discharge is an ELEM consequence of the statements on the stack
-- prior to last statement Supposed, plus the negative of the last statement Supposed.
-- The statement in the Discharge can be labeled, so any labels that it may contain must be dropped.
hint_copy := hint; rspan(hint_copy," \t"); rmatch(hint_copy,"Discharge"); -- get the contxt section alone
blob_and_try_hard_tag := rspan(hint_copy," \t=>)*+"); -- section of hint indicating coarse blobbing
--print("<BR>blob_and_try_hard_tag: ",blob_and_try_hard_tag," ",hint);
-- we conjoin everything stacked so far, as dictacted by the context; this should be a contradiction
-- conj1a := conjoin_last(statement_stack);
conj1 := form_discharge_conj(hint,statement_stack with ["false"]); -- build conjunction to use in Discharge-type deductions
-- we add 'false as a final statement, since this is the 'desired conclusion' in the first step of a discharge
--print("<BR>conj1: ",conj1);
--printy(["start parse: ",conj1]);
if (tree := parse_expr(conj1)) = OM then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,"\nBad Discharge syntax in proof step "]);
printy(["syntactically illformed conjunction is\n",conj1]);
error_count +:= 1;
return false;
end if;
--printy(["unparse(tree(2)): ",unicode_unpahrse(tree(2))]);
save_squash_details := squash_details; squash_details := "*" in blob_and_try_hard_tag;
blobbed_tree := blob_tree(tree(2)); -- blob the conjunction
blobbed_tree := boil_down_blobbed(otree := blobbed_tree); -- Added March 26,2002: simplify before further processing
formula_after_blobbing := otree; -- to generate report when inference is slow
formula_after_boil_down := blobbed_tree; -- to generate report when inference is slow
mb := model_blobbed(blobbed_tree); -- see if we have a contradiction using everything stacked so far
--print("<P>squash_details1: ",squash_details," ",mb?"UNDEFINED");
squash_details := save_squash_details; -- restore the prior squash_details setting
--printy(["\n\ncheck_discharge: ",statement_stack," stat_in_discharge: ",stat_in_discharge," conj1: ",conj1," otree: ",otree]);
--printy([" blobbed_tree: ",blobbed_tree," ",type(blobbed_tree)," mb: ",mb]);
if mb /= OM then -- this is the failure case, first step
if mb = "??????? Probably can't decide without excess work ??????? " then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nAbandoned attempt to find contradiction in Discharge\n",unicode_unpahrse(blobbed_tree)]);
else
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,"\nFailure to find contradiction in Discharge\n"]);
printy(["\notree: ",unicode_unpahrse(otree)]);
error_count +:= 1;
printy(["\notree after first simplification: ",unicode_unpahrse(simp1)]);
end if;
printy(["\nStatement stack with negated last statement is: "]);
for x in statement_stack loop printy([unicode_unpahrse(parse_expr(x + ";"))]); end loop;
return false; -- note first failure mode; conjunction formed from statements selected preceding discharge are not contadictory
elseif optimization_mark or optimize_whole_theorem then -- ******* optimization is desired
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
print("<BR>lines of context required for justification of the discharge statement on line ",discharge_stat_no,", namely ",hint," ==> ",stat_in_discharge," are: ", search_for_all(statement_stack with ["false"]));
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
optimizing_now := save_optimizing_now; -- restore optimization flag
end if; -- otherwise we must check that the statement appearing in the Discharge
-- is an ELEM consequence of the statements on the stack prior to last
-- statement Supposed, plus the negative of the last statement Supposed.
if stat_in_discharge = "QED" then -- there is a claimed contradiction with an original Suppose_not;
if prior_suppose_m1= 0 then -- this is legitmate if no prior suppose exists,
return true; -- so the discharge just verified concludes the proof
else -- but otherwise the QED is illegitimate
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,"\nClaimed 'QED' preceded by undischarged 'Suppose', does not terminate proof\n"]);
error_count +:= 1;
return false; -- gotcha, you trickster!
end if;
end if;
top_stat := stat_in_discharge; -- prepare to check for the 'AUTO' case
rspan(top_stat," \t"); autmatch := rmatch(top_stat,"AUTO");
--print("top_stat: ",top_stat," ",autmatch);
if autmatch /= "" or top_stat = "AUTO" then -- in the 'AUTO' case, just take the negative of the last statement supposed and return
-- note workaround for rmatch bug; not matching entire string
is_auto := true;
stat_supposed := drop_labels(statement_stack(prior_suppose_m1 + 1))(1); -- get the last previous statement supposed, dropping label if any
auto_gen_stat := (top_stat + " " + "(not(" + stat_supposed + "))");
return true; -- 'true' indicates inference success
--print("<P>statement_stack: ",statement_stack);
end if;
-- the statements prior to the last statement Supposed, plus the negative of the last statement Supposed.
conj2 := form_discharge_conj_nodiagnose("",trimmed_stack := statement_stack(1..prior_suppose_m1) with ("(not(" + statement_stack(prior_suppose_m1 + 1) + "))") with drop_labels(stat_in_discharge)(1));
-- note: we null the hint to use the unrestricted context in this second step
-- note: this routine must remove final semicolon
-- note: the 'Suppose' itself is now inverted, since it has been refuted by the prior argument
--print("stack_to_last_suppose: ",stack_to_last_suppose,"\nstack_to_last_suppose2: ",stack_to_last_suppose2);
--print("<BR>conj2: ",conj2);
if (tree := parse_expr(conj2 + ";")) = OM then
printy(["<BR>Bad Discharge syntax in conj2:\n",conj2,"\n",statement_stack]);
printy(["<BR>stopped due to: syntax error"]); was_syntax_error := true; stop;
end if;
save_squash_details := squash_details; squash_details := "*" in blob_and_try_hard_tag;
--print("<P>squash_details2: ",squash_details);
blobbed_tree := blob_tree(tree(2)); -- blob the conjunction, its last element having been reversed
--printy(["\ncheck_discharge: ",unicode_unpahrse(blobbed_tree)]);
blobbed_tree := boil_down_blobbed(otree := blobbed_tree); -- Added March 26,2002: simplify before further processing
formula_after_blobbing := otree; -- to generate report when inference is slow
formula_after_boil_down := blobbed_tree; -- to generate report when inference is slow
mb := model_blobbed(blobbed_tree);
squash_details := save_squash_details; -- restore the prior squash_details setting
-- printy(["\n starting discharge verification: ",discharge_stat_no," ",time()]); printy([mb?" OK-INCONS. AS EXPECTED ",time()]);
if mb = OM then -- this is the success case
if optimization_mark or optimize_whole_theorem then -- ******* optimization is desired
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
print("<BR>lines of context required to draw conclusion of discharge statement on line ",discharge_stat_no,", namely ",hint," ==> ",stat_in_discharge," are: ", search_for_all(trimmed_stack));
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
optimizing_now := save_optimizing_now; -- restore optimization flag
end if;
return true;
end if;
if mb = "??????? Probably can't verify Discharge statement without excess work ??????? " then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nAbandoned attempt to verify blobbed Discharge statement\n",unicode_unpahrse(blobbed_tree)]);
else
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nDischarge inference error\n\t", unicode_unpahrse(blobbed_tree)]);
printy(["\nPreceding context is inconsistent, but conclusion does not follow from negative of supposition. Blobbed version is ",
unicode_unpahrse(blobbed_tree)]);
printy(["\notree: ",unicode_unpahrse(otree)]);
printy(["\notree after first simplification: ",unicode_unpahrse(simp1)]);
error_count +:= 1;
end if;
printy(["\nStatement stack to point of last suppose with negated Suppose and Discharge statement is: "]);
for x in statement_stack(1..prior_suppose_m1 + 1) loop printy([x]); end loop;
return false; -- note second failure mode
end check_discharge;
-- ************* Statement citation inference checking ***************
-- The following routine first substitutes the list of replacements supplied as statement citation inference
-- parameters into the (ordinarily quantified) body of the statement being cited, following which it checks to see if the
-- conclusion of the citation inference follows as an elementary conclusion from the substituted result and from
-- other statements available in the context of the citation inference.
procedure check_a_citation_inf(count,statement_cited,statement_stack,hbk,citation_restriction,piece,statno); -- check a single citation inference
span(hbk," \t"); rspan(hbk," :\t"); -- remove enclosing whitespace and possible terminating colon
-- check to see if the statement citation inference is restricted.
-- if so, the restriction will appear in parentheses at the right of the statement refererence,
-- as e.g in x --> Stat66(Stat1,Stat7*) ==> ...
--printy(["hbk: ",hbk]);
is_restricted := (citation_restriction := citation_restriction?"") /= ""; -- is the 'ELEM' phase of the citation inference restricted
blob_and_try_hard_tag := rspan(citation_restriction,"*+"); -- see if there is a blobbing restriction or a try_hard flag
-- now decompose the citation hint into the expressions to be substituted into the statement
--if piece = OM then printy(["piece: ",statement_cited," ",statement_stack]); end if;
preflist := piece(1..#piece - 2); rspan(preflist,"\t "); span(preflist,"\t "); -- clean the list of substitution items from the hint
lp := match(preflist,"("); rp := rmatch(preflist,")");
if lp = "" then preflist := [preflist]; else preflist := split_at_bare_commas(preflist); end if;
-- decompose the list into its comma-separated pieces
statement_cited := join(breakup(statement_cited,"&")," and "); -- replace ampersands with 'ands' in the statement cited
[stat_with_freevars,freevars] := strip_quants(parse_expr(statement_cited + ";")(2),npl := #preflist);
-- attempt to strip off required number of quantifiers
if #freevars < npl then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nWrong number of variables supplied in statement citation"]);
printy(["Statement details are: ",hbk," ",statement_cited," ",preflist]); --," ",[stat_with_freevars,freevars]);
error_count +:= 1;
--if (citation_err_count +:= 1) > 2 then stop; end if;
return;
end if;
for form = preflist(jkj) loop -- parse all the expressions in the list of things to be substituted
if (paexp := parse_expr(form + ";")) = OM then
printy(["syntax of item ",jkj," to be substituted in statement is bad, items are ",preflist,
", statement cited is ",statement_cited]);
printy(["stopped due to: syntax error"]); was_syntax_error := true;return;
end if;
end loop;
--printy(["replacement map: ",preflist]);
replacement_map := {[v,parse_expr(plj + ";")(2)]: v = freevars(j) | (plj := preflist(j)) /= OM};
--printy(["replacement map after: ",{[v,preflist(j)]: v = freevars(j)}]);
sts := substitute(stat_with_freevars,replacement_map); -- build the appropriately substituted form of the statement
--printy(["sts: ",sts]);
top_stat := statement_stack(nss := #statement_stack); -- prepare to check for the 'AUTO' case
rspan(top_stat," \t"); autmatch := rmatch(top_stat,"AUTO");
if autmatch /= "" then -- in the 'AUTO' case, just take the generated statement and return
is_auto := true; auto_gen_stat := (top_stat + " " + (gen_stat_body := unparse(sts)));
-- the generated statement carries the label if any
if ":" in top_stat then -- labeled case
rspan(top_stat," \t:"); span(top_stat," \t"); -- strip the label of whitespace and colon
labeled_pieces(top_stat) := gen_stat_body; -- replce AUTO by the statement automatically generated
end if;
return;
end if;
conj := build_conj(citation_restriction,statement_stack,substituted_statement := unparse(sts));
-- build conjunction, either of entire stack or of statements indicated by conjunction restriction
--printy(["conj: ",is_restricted," ",citation_restriction," ",conj]);
-- try proof by computation as a preliminary, using the conjunct as input, but only in restricted case
if is_restricted and (ccv := compute_check(parse_expr(conj)(2))) = false then -- the conjunct is inconsistent
tested_ok := true; -- note that this was correctly handled
return; -- done with this statement
end if;
if citation_check_count > detail_limit then
printy(["check_a_citation_inf: preflist is: ",preflist," statement_cited is: ",statement_cited,
"\ndesired conclusion is: ",statement_stack(#statement_stack),"\nsubstituted citation is: ",unicode_unpahrse(sts)]);
printy(["preflist: ",preflist," ",conj]);
printy(["stopped due to: excessive detail"]); return;
end if;
save_squash_details := squash_details; squash_details := "*" in blob_and_try_hard_tag;
-- details can be squashed during theorem substitution inferences
test_conj(conj); -- test this conjunct for satisfiability
squash_details := save_squash_details; -- restore the prior squash_details setting
if citation_check_count > detail_limit then printy(["done test_conj"]); end if;
if not tested_ok then
printy([str(citation_check_count +:= 1)," Checking citation of statement: ",hbk]);
error_count +:= 1;
printy(["statement_cited: ",hbk," ",statement_cited,"\ndesired conclusion: ",
statement_stack(#statement_stack),"\nsubstituted citation:: ",unicode_unpahrse(sts)]);
elseif optimization_mark or optimize_whole_theorem then -- ******** optimization is desired ********
--print("<BR>statement_stack: ");for stat in statement_stack loop print("<BR>",stat); end loop;
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
ns := #statement_stack;
needed_with_substituted := search_for_all(statement_stack(1..ns - 1) +:= [substituted_statement,statement_stack(ns)]);
-- do the optimization search. The stack supplied carries the cited statement after substitution as its next-to-last component
if (needed_less_substituted := [k in needed_with_substituted | k /= ns]) = [] then
print("<BR>Only the statement cited (" + hbk + ") is needed to prove citation result on line ",statno,", namely: ",
statement_stack(#statement_stack));
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
else
print("<BR>The lines of context required for proof of citation of " + hbk + " in line ",statno,", namely: ",
statement_stack(#statement_stack)," are " + hbk + " plus ",needed_less_substituted);
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
end if;
optimizing_now := save_optimizing_now; -- restore optimization flag
end if;
end check_a_citation_inf;
-- ************* Theorem citation inference checking ***************
-- The following routine first substitutes the list of replacements supplied as statement citation inference
-- parameters into the (ordinarily quantified) body of the theorem being cited, following which it checks to see if the
-- conclusion of the theorem citation inference follows as an elementary conclusion from the substituted result and from
-- other statements available in the context of the citation inference.
procedure check_a_tsubst_inf(count,theorem_id,statement_stack,hbk,piece,stat_no); -- check a single tsubst inference
--print("<P>check_a_tsubst_inf labeled_pieces: ",labeled_pieces);
span(hbk," \t"); rspan(hbk," :\t"); -- remove enclosing whitespace and possible terminating colon
if #hbk = 0 or hbk(#hbk) = "x" then return; end if; -- bypass citations like 'Txxx'
blob_and_try_hard_tag := ""; -- but might be reset just below if there is a restriction
if "(" in hbk then -- the 'ELEM' phase of the citation inference is restricted
citation_restriction := rbreak(hbk,"("); rmatch(hbk,"("); rmatch(citation_restriction,")");
blob_and_try_hard_tag := rspan(citation_restriction,"*+"); -- see if there is a blobbing restriction or a try_hard flag
--printy(["citation_restriction: ",citation_restriction]);
else
citation_restriction := ""; -- citation restriction is null
end if;
theorem_cited := theorem_map(hbk); -- find the theorem which has been cited
if theorem_cited = OM then -- look for undefined theorems cited
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nReference to undefined Theorem"]);
error_count +:= 1; return;
end if;
theorem_cited := join(breakup(theorem_cited,"&")," and "); -- replace ampersands with 'ands' in the theorem cited
freevs_theorem := find_free_vars(parsed_thm := parse_expr(theorem_cited + ";")(2)); -- get the free variables of the 'theorem' part
firstloc_theorem := {}; -- we will determine the order in which the free variables appear
pieces := [case_change(p,"lu"): p in breakup(theorem_cited,",./?><;'\":][}{\\| \t=-+_)(*&^%$#@!~`") | p /= ""];
-- break at all punctucation marks
for p = pieces(j) loop firstloc_theorem(p) := firstloc_theorem(p)?j; end loop;
freevs_theorem := merge_sort({[firstloc_theorem(case_change(v,"lu"))?0,v]: v in freevs_theorem | (not check_def_of_symbol(v,theorem_id))});
freevs_theorem := [v: [-,v] in freevs_theorem | not((nv := #v) >= 8 and v(nv - 7..) = "_THRYVAR")];
-- _THRYVAR variabls should never count as substitutable free variables
-- now decompose the citation hint into the expressions to be substituted for the free variables of the theorem
if piece = "" then -- theorem with no free variables
preflist := [];
else -- theorem with list of expressions to be substituted for its free variables
preflist := piece(1..#piece - 2); rspan(preflist,"\t "); span(preflist,"\t "); -- clean the list of substitution items from the hint
lp := match(preflist,"("); rp := rmatch(preflist,")");
if lp = "" then preflist := [preflist]; else preflist := split_at_bare_commas(preflist); end if;
-- decompose the list into its comma-separated pieces
end if;
if #freevs_theorem < #preflist then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nWrong number of variables supplied in theorem citation"]);
error_count +:= 1;
printy([" freevs_theorem: ",freevs_theorem," ",preflist,"\nproof line is: ",[hint,stat],"\ntheorem_cited is: ",theorem_cited]);
end if;
-- here we have a theorem citation inference; we check all the substitution items for syntactic validity
parsed_substs := []; -- will collect all the parsed items to be substituted
for form = preflist(jj) loop -- parse all the expressions in the list of things to be substituted
if (paexp := parse_expr(form + ";")) = OM then
printy(["syntax of item ",jj," to be substituted is bad"," hint is ",piece]);
printy(["stopped due to: syntax error"]); was_syntax_error := true;return;
else
parsed_substs with:= paexp;
end if;
end loop;
replacement_map := {[varb,if (psj := parsed_substs(j)) /= OM then psj else varb end if]: varb = freevs_theorem(j)};
--print("<BR>freevs_theorem: ",freevs_theorem);
-- parse the formulae to replace the variables; variables for which no sustituend is given remain unchanged
stt := substitute(parsed_thm,replacement_map); -- build the appropriately substituted form of the theorem
theorem_id_no := theorem_sno_map(theorem_id); cited_theorem_no := theorem_sno_map(hbk);
-- get the section numbers of the two theorems involved
if ((cited_theorem_no?100000) >= (theorem_id_no?-1)) then
error_count +:= 1;
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\ntheorem cited does not precede theorem in which citation occurs"]);
end if;
top_stat := statement_stack(nss := #statement_stack); -- prepare to check for the 'AUTO' case
rspan(top_stat," \t"); autmatch := rmatch(top_stat,"AUTO");
if autmatch /= "" then -- in the 'AUTO' case, just take the generated statement and return
is_auto := true; auto_gen_stat := (top_stat + " " + (gen_stat_body := unparse(stt)));
if ":" in top_stat then -- labeled case
rspan(top_stat," \t:"); span(top_stat," \t"); -- strip the label of whitespace and colon
--print("<BR>labeled_pieces: ",labeled_pieces," ",gen_stat_body);
labeled_pieces(top_stat) := gen_stat_body; -- replace AUTO by the statement automatically generated
end if;
return;
end if;
conj_build_start := opcode_count();
if (conj := build_conj(citation_restriction,statement_stack,substituted_statement := unparse(stt))) = OM then
print("STOPPED by build_conj failure: ",citation_restriction,statement_stack,substituted_statement); stop;
end if;
-- build conjunction, either of entire stack or of statements indicated by conjunction restriction
--print("<BR>conj build time for theorem: ",((oc_after := opcode_count()) - conj_build_start)/oc_per_ms);
save_squash_details := squash_details; squash_details := "*" in blob_and_try_hard_tag;
-- details can be squashed during theorem substitution inferences
test_conj(conj); -- test this conjunct for satisfiability
--print("<BR>test_conj time for theorem: ",(opcode_count() - oc_after)/oc_per_ms," ",conj);
squash_details := save_squash_details; -- restore the prior squash_details setting
--print("<P>optimization_mark in tsubst_inf: ",optimization_mark?"UNDEFINED"); if optimization_mark notin {true,false} then stop; end if;
if not tested_ok then
error_count +:= 1;
elseif optimization_mark or optimize_whole_theorem then -- ******** optimization is desired ********
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
ns := #statement_stack;
needed_with_substituted := search_for_all(statement_stack(1..ns - 1) +:= [substituted_statement,statement_stack(ns)]);
-- do the optimization search. The stack supplied carries the cited statement after substitution as its next-to-last component
if (needed_less_substituted := [k in needed_with_substituted | k /= ns]) = [] then
print("<BR>Only the theorem cited (" + hbk + ") is needed to prove theorem citation result on line ",stat_no,", namely: ",
statement_stack(#statement_stack));
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
else
print("<BR>The lines of context needed to prove citation of theorem " + hbk + " in line ",stat_no,", namely: ",
statement_stack(#statement_stack)," are " + hbk + " plus ",needed_less_substituted);
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
end if;
optimizing_now := save_optimizing_now; -- restore optimization flag
end if;
end check_a_tsubst_inf;
-- ************* Equality inference checking ***************
-- The following routine differs from ordinary elementary inference checking principally in that
-- it uses a slightly expanded and potentially more expensive form of blobbing.
-- Prior to this blobbing, the routine searches the conjunct to be tested (using the 'extract_equalities'
-- call seen below) to find and return all equalities and equivalences present at its top level.
-- These are passed to the blobbing routine through the global variable 'equalities_rep_map';
-- the blobbing routine then uses this information to unify the blobs of all items known to be equal
-- (and hence of all otherwise identical structures in which these may appear). After this modified
-- version of blobbing has been applied, the resulting conjuction is tested for satisfiability in the standard way.
procedure check_an_equals_inf(conj,stat,statement_stack,hint,stat_no); -- handle a single equality inference
--printy(["check_an_equals_inf: ",conj]);
if (pec := parse_expr(conj)) = OM then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nSyntax error in 'EQUAL' conjunction"]);
printy(["Conjunction is: ",conj]);
error_count +:= 1;
return;
end if;
allow_unblobbed_card := true; -- don't strong-blob the cardinality operator during equality inferencing
allow_blob_simplify := false; -- turn off the blobbing simplifications
show_error_now := false; -- turn off error print in following initial trial
equalities_rep_map := extract_equalities(conj); -- first try without standardizing bound variables
test_conj(conj); -- test this conjunct for satisfiability
show_error_now := true;
allow_unblobbed_card := false; -- turn strong-blobing of the cardinality operator back on
if tested_ok then -- restore preceding state and return
if optimization_mark or optimize_whole_theorem then -- ******** optimization is desired ********
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
allow_unblobbed_card := true; -- don't strong-blob the cardinality operator during equality inferencing
needed := search_for_all(statement_stack); -- do the optimization search
allow_unblobbed_card := false; -- turn strong-blobing of the cardinality operator back on
print("<BR>The lines of context (in addition to all extracted equalities) required for proof of equality inference " + hint + " ==> " + stat + " in line ",stat_no," are ",needed);
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
end if;
equalities_rep_map := {}; -- blank the equalities_rep_map to avoid use in other inferences
allow_blob_simplify := true; -- restore the blobbing simplifications
return;
end if;
conj := unparse(standardize_bound_vars(pec(2))) + ";";
-- standardize the conjunct
equalities_rep_map := extract_equalities(conj);
-- extract all conjoined equalities and equivalences from the conjunction
--printy(["checking an 'EQUAL' inference: ",stat]);
--printy(["conj: ",conj]); printy(["after blobbing: ",unicode_unpahrse(blob_tree(parse_expr(conj)(2)))]);
--printy(["equalities_rep_map: ",{[x,unicode_unpahrse(y)]: [x,y] in equalities_rep_map}]);
--printy(["blob_name: ",blob_name]);
test_conj(conj); -- test this conjunct for satisfiability
if not tested_ok then
error_count +:= 1;
printy(["equalities_rep_map is: ",{[x,unicode_unpahrse(y)]: [x,y] in equalities_rep_map}]);
end if;
equalities_rep_map := {}; -- blank the equalities_rep_map to avoid use in other inferences
allow_blob_simplify := true; -- restore the blobbing simplifications
end check_an_equals_inf;
procedure extract_equalities(conj); -- extract and arrange all top-level equalities and equivalences from a conjunction
-- the equalities are arranged so as a map which sends all of the blobs in a clss of items found to be equivalent
-- into a single one of them, used as a representative of the entire group.
--printy(["extract_equalities: ",conj]);
pconj := parse_expr(conj)(2); -- blob the conjunct, along with the equalities in it
eq_tuple := [];
conj_tuple := collect_conjuncts(pconj); -- collect all conjuncts by descent of top level of conjunct tree
--printy(["conj_tuple: ",[unicode_unpahrse(x): x in conj_tuple]]);
blob_name_ctr_was := -1;
for jj in [1..4] loop
pconj_unblobbed := conj_tuple(1); for conj in conj_tuple(2..) loop pconj_unblobbed := ["ast_and",pconj_unblobbed,conj]; end loop;
-- reformat as a conjunct
--printy(["pconj_unblobbed: ",unicode_unpahrse(pconj_unblobbed)]);
pconj := if jj = 1 then blob_tree(pconj_unblobbed) else blob_tree_in(pconj_unblobbed) end if;
-- blob the reformatted conjunct to get a compatible blobbing of all terms.
--printy(["pconj: ",unicode_unpahrse(pconj)]);
eq_tuple +:= collect_equalities(pconj); -- collect the equalities from the uniformly blobbed conjunct
--printy(["--------------- extract_equalities: ",[unicode_unpahrse(t): t in eq_tuple]]);
downhill_map := {}; -- will map each element in an equality into a smaller equal element
-- the elements in this map are represented in string form
for [n1,n2,n3] in eq_tuple | (sn2 := unparse(n2)) /= (sn3 := unparse(n3)) loop -- iterate over all the equalities
-- handling their left and right sides as strings
if #sn2 < #sn3 or (#sn2 = #sn3 and sn2 < sn3) then [n2,n3] := [n3,n2]; [sn2,sn3] := [sn3,sn2]; end if; -- force left-hand side to be larger
if (dn2 := downhill_map(sn2)) = OM then downhill_map(sn2) := sn3; continue; end if; -- since no previous downhill value
while (dn_further := downhill_map(dn2)) /= OM loop dn2 := dn_further; end loop;
-- chain down to the ultimate representative of sn2
if dn2 = sn3 then continue; end if; -- since just confirms prior downhill value
if #dn2 < #sn3 or (#dn2 = #sn3 and dn2 < sn3) then downhill_map(sn3) := dn2; continue; end if;
-- since in this case the step sn3 -> dn2 is downhill
downhill_map(dn2) := sn3; -- since in this case the path step -> sn3 is downhill
end loop;
-- now map each element in the domain of downhill_map into its ultimate representative
for nd in domain(downhill_map) loop -- chain down to the ultimate representative
path_down := [];
while (dn_further := downhill_map(nd)) /= OM loop
path_down with:= nd; nd := dn_further;
end loop;
for n in path_down loop downhill_map(n) := nd; end loop;
-- everything along the path is mapped to this ultimate representative
end loop;
--printy(["downhill_map after: ",downhill_map]);
equalities_rep_map := {[x,parse_expr(y + ";")(2)]: [x,y] in downhill_map};
--printy(["blob_name_counter_was: ",blob_name_ctr_was," ",blob_name_ctr]);
if blob_name_ctr_was = blob_name_ctr then exit; end if; -- exit if no new blobs have appeared
blob_name_ctr_was := blob_name_ctr;
end loop;
return {[x,parse_expr(y + ";")(2)]: [x,y] in downhill_map};
-- return the map of each left and right side of an equality into its parsed ultimate representative
end extract_equalities;
-- ************* Algebra inference checking ***************
-- The following routine extends the equality inferencing seen just above by adding
-- a mechanism which recognizes elementary algebraic identities. To this end, it is made aware
-- (via declarations which invoke the procedure 'post_alg_roles' of the logic_elem_supplement' file)
-- of families of algebraic operations, each including aassociative and commutative addition
-- and multiplication operations for which multiplication distributes over addition.
-- Given this information, the routine seaches the syntax tree of any conjuction presented to it
-- for all 'algebraic nodes', i.e. nodes headed by an operator known to belong to some algebraic
-- family of operators, and below this node for the largest possible subtree of nodes all of which
-- belong to the same algebraic family F as the top node. This subtree can be regarded
-- as defining an algebraic expression whose base level 'variables' are whatever non-algebraic expressions
-- are found at its bottom nodes. We then generate a collection of equalities and auxilary conditions.
-- An auxiliary condition is generated for each each non-algebraic expression exp found
-- at the bottom nodes of one of the algebraic subtrees just described, and must state that this expression
-- is a member of the ring associated with the top level of the subtree. If all of these auxiliary
-- conditions can be seen to hold in the logical context of the algebraic inference,
-- then an equality identifying the algebraic subtree with its canonical, fully expanded and simplified
-- algebraic form is generated and added to the conjuction to be tested for satisfiabily.
-- After addition of all such equalities, the resulting conjuction is passed to our standard
-- equality-inferencing procedures.
procedure check_an_algebra_inf(conj,stat); -- handle a single algebraic inference
-- this routine is passed two strings, one the conjunctive hypothesis for an algebraic deduction, the
-- other the desired conclusion.
-- We begin by collecting all the algebraic subexpressions (algebraic nodes) from both
-- the hypothesis and the conclusion supplied.
-- Then we construct all the algebraic equalities arising from the standardization these of algebraic nodes,
-- add these to the hypothesis available, and then use equality inference. perhaps later also fields) transitively
-- and is able to draw conclusions asserting the membership of an element in a ring or field
-- collect the ring membership assertions at the top conjunctive level of the conjunctive hypothesis
-- Note: this will include various standard membeship statements for all known rings
if (tree := parse_expr(conj)) = OM then
print("conj fails to parse; algebra inference aborted. ", conj);
error_count +:= 1;
return "failed\n";
end if;
dma := domain(memb_asserts := find_ring_members(tree(2)));
--print("check_an_algebra_inf: ",conj," ",stat);
algnodes := find_algebraic_nodes(tree); -- find the algebraic nodes in the tree of the hypothesis
--print("memb_asserts after find_algebraic_nodes: ",memb_asserts," algnodes: ",algnodes," ring_from_addn: ",ring_from_addn," ",conj);
--print("<P>algnodes in hypothesis for all known algebraic operators: ",[[x,unparse(y)]: [x,y] in algnodes]);
-- process the algebraic nodes, and if their bottons are known to belong to the relevant ring,
-- add membership assertions for them to memb_asserts
for plus_op in dma loop -- handle all relevant algebraic sets one after another, starting with their identifying '+' signs
mapo := memb_asserts(plus_op); -- get the membership assertions for the algebraic set defined by the plus_op
ring_for_plus_op := ring_from_addn(plus_op); -- get the ring corresoponding to the plus_op
known_memb_stgs := {unparse(memb_assertion(2)): memb_assertion in mapo};
-- get the intially known membership assertios for the current ring
for alg_node in (algnodes_for_this := algnodes(plus_op)?{}) loop
-- iterate over all the algebraic nodes in the hypothesis for the current ring
anbs := alg_node_bottoms(alg_node,plus_op);
-- get the bottom level blobs, whose top operators do not belong to the current agebraic set
-- see if any of these blobs involve subexpressions not known too be ring members, in
-- view of the available membership hypotheses and the membership conclusions just drawn from them,
-- provided that the blob is not a known constant associated with the '+' opartor and ring being processed
if (bad_anbs := {stg_anb: anb in anbs | (stg_anb := unparse(anb)) notin known_memb_stgs
and alg_of(stg_anb) /= plus_op}) = {} then -- allow for ring constants like '1'
-- insert a membership assertion for the algebraic node into the available set of membership assertions
memb_asserts(plus_op) := memb_asserts(plus_op) with ["ast_in",alg_node,ring_for_plus_op];
end if;
end loop;
end loop;
-- find the membership assertions made at the top conjunctive level of the conclusion
-- Note: this will also include various standard membership statements for all known rings
-- this is a mapping form the '+' signs of rings to the membership statements found
if not is_tuple(stat) then -- stand-alone testing
concluding_memb_asserts := find_ring_members(tree2 := parse_expr(stat + ";")(2));
algnodes2 := find_algebraic_nodes(tree2); -- find the algebraic nodes in the tree of the conclusion
--print("stat: ",unparse(tree2)," algnodes2: ",[unparse(x): x in algnodes2]);
else
concluding_memb_asserts := find_ring_members(parse_expr(drop_labels(stat(1))(1) + ";")(2));
-- extract stat from tuple into which it was put and find top level membership assertions
algnodes2 := {};
end if;
-- note: these are returned as a map from the collection of all '+' operators of known rings to the
-- parsed subtree of nodes algebraic for this ring, i.e. nodes whose top operator is a known algebraic operator of the ring
for x in domain(algnodes2) loop algnodes(x) := algnodes(x)?{} + algnodes2(x); end loop;
-- now algnodes holds all the algebraic nodes from hypothesis and conclusion, grouped by ring
dma2 := domain(concluding_memb_asserts); -- this is the set of '+' signs of all known rings
good_algnodes := bad_algnodes := []; -- in case following loop is skipped entirely
rspan(conj,"; \t"); -- remove semi from conj before it is used to build extended conjunct. Thi is present in standalone case
extended_conjunct := if is_tuple(stat) then conj -- Note: 'stat' will only be a tuple if this routine is operating in server mode
else "((" + conj + ") and (not(" + stat + ")));" end if; -- algebraic equalities will be added to this progressively
--print("<P>concluding_memb_asserts: ",concluding_memb_asserts," algnodes: ",algnodes);
memb_assertions_remaining_to_to := []; -- membersip assertions in the conclusion which may requive an equality deduction
for plus_op in dma loop -- handle all relevant algebraic sets one after another, starting with their identifying '+' signs
mapo := memb_asserts(plus_op); -- get the membership assertions for the algebraic set defined by the plus_op
known_memb_stgs := {unparse(memb_assertion(2)): memb_assertion in mapo};
-- the string variants of available statements concering items known to belong to the ring associated with the '+' sign
--print("<P>known_memb_stgs: ",known_memb_stgs," <BR>mapo ",mapo," memb_asserts ",memb_asserts);
--printy(["known members of ring: ",#known_memb_stgs," ",known_memb_stgs," concluding_memb_asserts: ",{unicode_unpahrse(x): x in concluding_memb_asserts(plus_op)?{}}]);
-- now examine all membership assertions in the conclusion being tested,
-- and if they follow from the collection of membership conjuncts available from the hypothesis,
-- add them to the set of membership assertions available; otherwise diagnose them
for memb_assertion in concluding_memb_asserts(plus_op)?{} loop -- iterate over all these membership assertions
anbs := alg_node_bottoms(memb_assertion(2),plus_op);
-- get the bottom level blobs, whose top operators do not belong to this agebraic set
--print("anbs: ",anbs);
-- see if any of these blobs involve subexpressions not known to be ring members, in
-- view of the available membership hypotheses and the membership conclusions just drawn from them,
-- provided that the blob is not a known constant associated with the '+' opartor and ring being processed
if (bad_anbs := {stg_anb: anb in anbs | (stg_anb := unparse(anb)) notin known_memb_stgs
and alg_of(stg_anb) /= plus_op}) /= {} then -- allow for ring constants like '1'
-- if so, algebraic deduction fails
--if number_of_statement_theorem = OM then print("****** Error verifying step:"); continue; end if; -- allow for testing outside main environment
memb_assertions_remaining_to_to with:= memb_assertion;
else -- otherwise we accept this membership assertion into the set of membership assertions being collected
memb_asserts(plus_op) := memb_asserts(plus_op) with memb_assertion;
end if;
end loop; -- at this point we have collected all the membership statements available for the algebraic deduction to be made
-- this we are doing for each of the known rings, one by one
--print("memb_asserts: ",memb_asserts);
algnodes_for_this := algnodes(plus_op)?{}; -- access the algebraic nodes, from the hypothesis and conclusion, for this algebraic operator set
--if (na := #(after := {unparse(ma(2)): ma in memb_asserts(plus_op)} - known_memb_stgs)) /= 0 then printy(["new members of ring: ",after," ",na]); end if;
--print("<P>algnodes_for_this: ",plus_op," ",[unicode_unpahrse(algnode): algnode in algnodes_for_this],"<BR>membrelns after: ",{unicode_unpahrse(ma(2)): ma in memb_asserts(plus_op)});
-- filter out those algebraic nodes whose base elements are not known to be in the ring of the current plus_op
good_algnodes := bad_algnodes := []; -- will separate
for algnode in algnodes_for_this loop
anbs := alg_node_bottoms(algnode,plus_op);
if (bad_anbs := {stg_anb: anb in anbs | (stg_anb := unparse(anb)) notin known_memb_stgs
and alg_of(stg_anb) /= plus_op}) /= {} then -- allow for ring constants like '1'
bad_algnodes with:= algnode; -- algnode is bad
else
good_algnodes with:= algnode; -- otherwise accept this algnode as good
end if;
end loop;
-- now generate an equality for each good algebraic node
-- this equality sets the node equal to its standardized form
--print("algnodes_for_this: ",algnodes_for_this," good_algnodes: ",good_algnodes," bad_algnodes: ",bad_algnodes," known_memb_stgs: ",known_memb_stgs);
alg_equalities := [generate_algebraic_identity(algnode): algnode in good_algnodes];
--print("<BR>alg_equalities and memb_asserts: ",[unparse(x): x in alg_equalities],"\ngood_algnodes: ",[unparse(x): x in good_algnodes],"\nmemb_asserts: ",[unparse(x): x in memb_asserts(plus_op)],"\nbad_algnodes: ",[unparse(x): x in bad_algnodes],"\n\n");
-- add all the membership statements available for the current ring, together with all the generated identities
-- standardizing the algebraic nodes, to the conjuct. This is done progressively for the known rings, one after another
extended_conjunct := join([unparse(item): item in memb_asserts(plus_op)] + [unparse(item): item in alg_equalities] + [extended_conjunct]," and ");
end loop;
-- now that all available algebraic identities have been added to the conjunct,
-- try an equality inference
--printy(["extended_conjunct: ",extended_conjunct]);
--print("extended_conjunct: ",extended_conjunct);
rspan(extended_conjunct,"; \t"); extended_conjunct +:= ";"; -- force a single semicolon at the end prior to parsing
-- and now apply equality processing to the extended conjunct
if (peec := parse_expr(extended_conjunct)) = OM then -- an unexpected parsing problem
print("extended_conjunct in algebra inference fails to parse: ",extended_conjunct); error_count +:= 1; return "failed\n";
end if;
extended_conjunct := unparse(standardize_bound_vars(peec(2))) + ";";
-- standardize the extended conjunct
allow_blob_simplify := false; -- turn off the blobbing simplifications
equalities_rep_map := extract_equalities(extended_conjunct);
-- extract all conjoined equalities from the extended conjunction
--printy(["checking an 'ALGEBRA' inference: ",stat]);
--printy(["conj: ",conj]); printy(["after blobbing: ",unparse(blob_tree(parse_expr(conj)(2)))]);
--printy(["equalities_rep_map: ",{[x,unparse(y)]: [x,y] in equalities_rep_map}]);
--printy(["blob_name: ",blob_name]);
--print("checking an 'ALGEBRA' inference: ",conj," desired conclusion is ",stat); --," extended_conjunct ",extended_conjunct);
--print("conj: ",conj,"conj after blobbing: ",unparse(blob_tree(parse_expr(conj)(2))));
--print("equalities_rep_map: ",{[x,unparse(y)]: [x,y] in equalities_rep_map});
--print("blob_name: ",blob_name);
test_conj(extended_conjunct); -- test the extended conjunct for satisfiability
if not tested_ok then
error_count +:= 1;
printy(["equalities_rep_map is: ",equalities_rep_map]);
printy(["extended_conjunct is: ",extended_conjunct]);
printy(["bad_algnodes: ",[unparse(item): item in bad_algnodes]]);
return "failed\n";
end if;
--printy(["extended_conjunct is: ",extended_conjunct]); -- DEBUGGING ONLY
equalities_rep_map := {}; -- blank the equalities_rep_map to avoid use in other inferences
allow_blob_simplify := true; -- restore the blobbing simplifications
return "verified\n";
end check_an_algebra_inf;
procedure alg_node_bottoms(tree,plus_op); -- return list of algebraic node base elements for given node
var list_of_bottoms := []; -- will be collected by recursive workhorse
alg_node_bottoms_in(tree,plus_op); -- call recursive workhorse
--printy(["alg_node_bottoms: ",tree," ",list_of_bottoms]);
return list_of_bottoms;
procedure alg_node_bottoms_in(tree,plus_op); -- recursive workhorse
if is_string(tree) and alg_role_of(tree) /= "ring" then -- collect twigs if reached
list_of_bottoms with:= tree; return;
end if;
[n1,n2,n3] := tree;
if n1 = "ast_of" then -- we have a 'Rev' function, hich is a function and not an operator (in the scenario source)
-- here we check the function symbol, which comes in the second position
if alg_of(n2) /= plus_op or #(n3?[]) > 2 then list_of_bottoms with:= tree; return; end if;
-- collect non-algebraic nodes reached (non-algebraic function or more than 1 parameter)
alg_node_bottoms_in(n3(2),plus_op); -- check the argument, skipping the prefixed 'ast_list'
else -- otherwise we must have a binary algebraic operator written as an infix
if alg_of(n1) /= plus_op then
list_of_bottoms with:= tree; return;
end if;
alg_node_bottoms_in(n2,plus_op); -- process algebraic subnodes recursively
if n3 /= OM then alg_node_bottoms_in(n3,plus_op); end if; -- allow for monadic operator case
end if;
end alg_node_bottoms_in;
end alg_node_bottoms;
-- ************* Simplification inference checking ***************
-- The following routine handles set theoretic simplification, of the kind invoked by Ref's
-- 'SIMPLF' inferencing primitive.
procedure check_a_simplf_inf(statement_stack,stat,stat_no,hint,restr); -- handle a single set-theoretic standardization inference
-- we find all the equalities arising from the standardization of nodes,
-- add these to the start of the conjunct, and then use equality inference
conj := form_elem_conj(restr,statement_stack); -- build conjunction to use in ELEM-type deductions
if (pec := parse_expr(conj)) = OM then
printy(["\n****** Error verifying step: "+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nSyntax error in 'Simplf' conjunction"]);
printy(["Conjunction is: ",conj]);
error_count +:= 1;
return;
end if;
pec_2 := pec(2);
-- find all the setformer and quantifier nodes within the tree
--print("simplify_builtins: ",simplify_builtins);
-- pec_2 := simplify_builtins(pec_2); -- added June 24; simplify builtin oerators to further standardize
-- elements withing the setformers and qunatifiers to be processed
setfs_and_quants := find_setformers_and_quants(tree := standardize_bound_vars(pec_2));
--printy(["********* SIMPLF used to deduce: ",stat]);
-- form equalities or equivalences as appropriate. Such an iequality is formed for
-- each setformer an quantifier node in the tree, and states that the expression
-- represented by the subtree is equal to its simlified form
equalities := [if item(1) in {"ast_genset","ast_genset_noexp"} then ["ast_eq",item,simpitem]
else ["DOT_EQ",item,simpitem] end if: item in setfs_and_quants |
unparse(simpitem := simplify_setformer(item)) /= unparse(item)];
if equalities = [] then -- SIMPLF cannot be applied
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nSIMPLF inappropriate in proof step"]);
printy([" since there are no simplifiable setformers or quantifiers"]);
printy(["setformers and quantifiers found: ",[unparse(item): item in setfs_and_quants]]);
error_count +:= 1;
return;
end if;
-- adjoin all these auxiliary equalities to the conjunct to be tested for satisfiability
equalities_clause := "(" + join([unparse(item): item in equalities],") and (") + ")";
extended_conjunct := equalities_clause + " and " + conj;
--print("extended_conjunct before standardization: ",extended_conjunct);
extended_conjunct := unparse(standardize_bound_vars(parse_expr(extended_conjunct)(2))) + ";";
-- standardize the extended conjunct
-- and now apply equality processing to the extended conjunct
allow_unblobbed_card := true; -- don't strong-blob the cardinality operator during equality inferencing
allow_blob_simplify := false; -- turn off the blobbing simplifications
equalities_rep_map := extract_equalities(extended_conjunct);
-- extract all conjoined equalities from the extended conjunction
--printy(["checking an 'SIMPLF' inference: ",stat]);
--printy(["conj: ",conj]); printy(["after blobbing: ",unparse(blob_tree(parse_expr(conj)(2)))]);
--printy(["equalities_rep_map: ",{[x,unparse(y)]: [x,y] in equalities_rep_map}]);
--printy(["blob_name: ",blob_name]);
test_conj(extended_conjunct); -- test the extended conjunct for satisfiability
if not tested_ok then
error_count +:= 1;
printy(["equalities are: ",[unparse(item): item in equalities]]);
printy(["equalities_rep_map is: ",equalities_rep_map]);
printy(["extended_conjunct is: ",extended_conjunct]);
printy(["setformers and quantifiers found: ",[unparse(item): item in setfs_and_quants]]);
elseif optimization_mark or optimize_whole_theorem then -- ******** optimization is desired ********
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
needed := search_for_all_w_extra(statement_stack,equalities_clause);
-- do the optimization search, passing the equalities_clause as an 'extra'
print("<BR>The lines of context required for proof of simplification inference " + hint + " ==> " + stat + " in line ",stat_no," are ",needed);
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
end if;
allow_unblobbed_card := false; -- restore strong-blobbing of the cardinality operator
equalities_rep_map := {}; -- null to avoid interference with other inferences
allow_blob_simplify := true; -- restore the blobbing simplifications
end check_a_simplf_inf;
-- ************* Use_def inference checking ***************
-- The following routine handles Ref Use_def inferences. It begins by finding the definition of the symbol being used,
-- which must belong either to the THEORY within which the inference appears, or within one of the ancestral
-- theories of that THEORY. The variables and the right-hand side of the definition are then examined,
-- and all the free variables and function of predicate symbols appearing in the right-hand side are collected.
-- If the function or predicate being defined is among them, the definition is recursive,
-- and is handled by the special procedure 'recursive_definition_OK' seen below. Otherwise we simply check to
-- verify that all of the free variables of the definition's right-hand side appear as parameters of the
-- function or predicate symbol being defined, and that all the other function, predicate, and constant symbols
-- appearing on the right have been defined previously, ither in the THEORY containing the definition
-- or in one of its ancestor THEORYs, Note that for this purpose sumbols assumed by a theory must count
-- as symbols defined in it.
procedure lookup_def_of_symbol(symb_referenced,theorem_id); -- look up the definition of a symbol referenced in a given theorem
thry_of_th := theory_of(theorem_id); -- get the theory of the current theorem
symb_referenced := -- conver the prefix characters of special operator names to 'name_underscore' form
if symb_referenced in {"range","domain","pow"} then "ast_" + symb_referenced
elseif #symb_referenced >= 1 and symb_referenced(1) in "¥•" then "DOT_" + case_change(symb_referenced(2..),"lu") -- infixes
elseif #symb_referenced >= 1 and symb_referenced(1) = "~" then "TILDE_" + case_change(symb_referenced(2..),"lu")
elseif #symb_referenced >= 1 and symb_referenced(1) = "@" then "AT_" + case_change(symb_referenced(2..),"lu")
elseif #symb_referenced >= 1 and symb_referenced(1) = "#" then case_change("ast_nelt","lu")
elseif symb_referenced = "[]" then case_change("ast_enum_tup","lu")
else case_change(symb_referenced,"lu") end if;
res := [get_symbol_def(symbol_defined := if symb_referenced in {"ast_range","ast_domain","ast_pow"} then
case_change(symb_referenced,"lu") else symb_referenced end if,thry_of_th),symb_referenced,symbol_defined];
--print(["lookup_def_of_symbol symb_referenced: ",symb_referenced," theorem_id: ",theorem_id," res: ",symb_referenced," ",symbol_defined," ",res]);
return res;
end lookup_def_of_symbol;
procedure get_def_of_symbol(symb_referenced,theorem_id); -- get definition of indicated symbol of theorem
[def_of_symbol,symb_referenced,symbol_defined] := lookup_def_of_symbol(symb_referenced,theorem_id); -- look up the definition of a symbol referenced in this theorem
if (sec_of_symbdef := theorem_sno_map("D" + case_change(symb_referenced,"lu"))) = OM and (nsr := #symb_referenced) > 8 and symb_referenced(nsr - 7..nsr) = "_THRYVAR"
and def_of_symbol /= OM then -- we have a '_THRYVAR' defined in the current theory
null; -- suppress diagnostic for _thryvar's defined in theorem of theory
elseif (sec_of_symbdef?1000000) > (sec_of_thm := theorem_sno_map(theorem_id)?0) then
printy(["\n****** Error verifying step: "+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nsymbol referenced ",symb_referenced," is defined after use: ",sec_of_symbdef," ",sec_of_thm]);
error_count +:= 1;
printy(["symbol_defined: ",symbol_defined," ",theorem_sno_map]);
return;
end if;
if def_of_symbol = OM then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nUse_def error, undefined symbol",
"\nsymbol ",symbol_defined," is referenced but undefined"]);
error_count +:= 1;
return;
end if;
return def_of_symbol;
end get_def_of_symbol;
procedure check_def_of_symbol(symb_referenced,theorem_id); -- determine whether indicated symbol of theorem is defined
[def_of_symbol,symb_referenced,symbol_defined] := lookup_def_of_symbol(symb_referenced,theorem_id); -- look up the definition of a symbol referenced in this theorem
if (sec_of_symbdef := theorem_sno_map("D" + case_change(symb_referenced,"lu"))) = OM and (nsr := #symb_referenced) > 8 and symb_referenced(nsr - 7..nsr) = "_THRYVAR"
and def_of_symbol /= OM then -- we have a '_THRYVAR' defined in the current theory
return true; -- symbol is defined
elseif (sec_of_symbdef?1000000) > (sec_of_thm := theorem_sno_map(theorem_id)?0) then
return false; -- symbol not yet defined
end if;
if def_of_symbol = OM then
return false;
end if;
return true;
end check_def_of_symbol;
procedure check_a_use_def(statement_stack,stat,theorem_id,hint,stat_no); -- check a single Use_def inference
--print("check_a_use_def: ",statement_stack);
tail := hint(9..); symb_referenced := break(tail,")");
symb_referenced := case_change(symb_referenced,"lu"); -- find the symbol whose definition is referenced
if symb_referenced(1) = "¥" then symb_referenced := "DOT_" + symb_referenced(2..); end if;
if symb_referenced(1) = "•" then symb_referenced := "DOT_" + symb_referenced(2..); end if;
if symb_referenced(1) = "@" then symb_referenced := "AT_" + symb_referenced(2..); end if;
if symb_referenced(1) = "~" then symb_referenced := "TILDE_" + symb_referenced(2..); end if;
symb_referenced := lowcase_form(symb_referenced)?symb_referenced;
match(tail,")"); break(tail,"("); rspan(tail," \t"); -- extract any context hint which may attach to the Use_def
context_hint := if tail = "" then "" else "ELEM" + tail end if; -- if a parenthesized contruct remains, reconstruct it as an "ELEM" hint
if (def_of_symbol := get_def_of_symbol(symb_referenced,theorem_id)) = OM then print("Undefined symbol in Use_def check"); return; end if;
[def_vars,def_body] := def_of_symbol; -- get the list of definition arguments and the parsed definition right side
[free_variables,function_symbols] := find_free_vars_and_fcns(def_body);
--print("<P>[free_variables,function_symbols]: ",[free_variables,function_symbols]," ",symb_referenced);
if symb_referenced in function_symbols then -- definition is recursive; check that it has required form
--print("<P>recursive definition[free_variables,function_symbols]: ",[free_variables,function_symbols]);
stat_tree := parse_expr(drop_labels(stat)(1) + ";")(2);
--printy([(is_tuple(stat_tree) and ((st1 := stat_tree(1)) = "ast_eq" or st1 = "DOT_EQ"))," ",stat_tree]);
--printy([is_tuple(conc_left := stat_tree(2)) and conc_left(1) = "ast_of" and conc_left(2) = symb_referenced]);
--printy([use_def_act_args := conc_left(3)(2..)," ",def_vars," ",def_body]);
if not (is_tuple(stat_tree) and ((st1 := stat_tree(1)) = "ast_eq" or st1 = "DOT_EQ")
and (is_tuple(conc_left := stat_tree(2)) and conc_left(1) = "ast_of" and conc_left(2) = symb_referenced)
and #(use_def_act_args := conc_left(3)(2..)) = #def_vars) then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nIllegal recursive Use_def inference in proof step",
"\nStatement does not have required equality/equivalence form and right number of left-hand arguments "]);
error_count +:= 1;
return;
else -- conclusion is of legal form; make substitutions for argument of recursive definition
subst_map := {[vj,use_def_act_args(j)]: vj = def_vars(j)};
reconstructed_def := [stat_tree(1),["ast_of",symb_referenced,["ast_list"] + def_vars],def_body];
substituted_def := unparse(substitute(reconstructed_def,subst_map));
-- now conjoin the substituted_def and the negative of the statement,and test it for satisfiability
conj := "(" + substituted_def + ") and (not(" + drop_labels(stat)(1) + "));";
test_conj(conj); -- test this conjunct for satisfiability
if not tested_ok then
printy(["Checking recursive Use_def(",symb_referenced,") inference: ",stat," substituted definition is: ",substituted_def]);
printy(["Recursive 'Use_def' inference failed in: ",theorem_id," ",stat]);
error_count +:= 1;
end if;
end if;
--printy(["use of recursive definition: "]);
return; -- done with this case (use of recursive definition)
end if;
equalities_rep_map := {}; -- no equalities used in this inference mode
allow_blob_simplify := true; -- blob simplification and bound var standardization turned on
-- otherwise handle like ELEM deduction,
-- but replace all occurrences of referenced symbol by the defining formula
conj := debug_conj := form_elem_conj(context_hint,statement_stack);
-- build conjunction to use in ELEM-type deductions; if the context is restricted, pass it as an 'ELEM' hint
-- the hint passed should either be an empty string or have the form appropriate to "ELEM", namely ELEM(Stat..,Stat..,..)
mismatched_args := false; -- will be set to true if a mismatacheed argument number is detected
conj := replace_fcn_symbol(orig_conj := parse_expr(conj)(2),symb_referenced,def_vars,def_body); -- expand the definition within the conjunction
--print("<BR>orig_conj: ",unparse(orig_conj)," conj: ",unparse(conj)," symb_referenced: ",symb_referenced);
if mismatched_args then -- issue diagnois and return
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nmismatched argument number in definition use",
"\nvariables in definition are ",mismatched_symbvars," but definition use is ",mismatched_expn]);
error_count +:= 1;
return;
end if;
--printy(["after replace_fcn_symbol conj is: ",unparse(conj)," orig_conj is: ",unparse(orig_conj)," symb_referenced is: ",symb_referenced," def_vars: ",def_vars," def_body: ",unparse(def_body)]);
if show_details then printy(["\nconj: ",conj]); end if;
test_conj(conj); -- test this conjunct for satisfiability
if not tested_ok then
print(["Checking Use_def(",symb_referenced,") inference: ",stat," definition is: ",unparse(def_body)]);
print(["'Use_def' inference failed in: ",theorem_id," ",stat]); error_count +:= 1;
print("conj tested in use_def was: ",unparse(conj));
elseif optimization_mark or optimize_whole_theorem then -- ******** optimization is desired ********
save_optimizing_now := optimizing_now; optimizing_now := true; -- set optimization flag to suppress diagnosis;
-- replace the statement_stack entries by their sustituted versions
statement_stack := [unparse(replace_fcn_symbol(parse_expr(drop_labels(stack_elt + ";")(1))(2),symb_referenced,def_vars,def_body)):
stack_elt in statement_stack];
-- and now treat as ELEM deduction
print("<BR>Lines of context required for proof of Use_def statement ",stat_no,", namely ",
hint," ===> ",statement_stack(#statement_stack)," are ",search_for_all(statement_stack));
--print(" Optimized pure inference time for this inference step is: ",best_time_so_far);
optimizing_now := save_optimizing_now; -- restore optimization flag
end if;
end check_a_use_def;
procedure get_symbol_def(symb,thry); -- get the definition of a symbol, in 'thry' or any of its ancestors
thry_list := [thry];
while (thry := parent_of_theory(thry)) /= OM loop thry_list with:= thry; end loop;
if exists thry in thry_list | (the_def := symbol_def(thry_and_symb := thry + ":" + symb)) /= OM then
left_of_def_found := left_of_def(thry_and_symb); -- get the left par of the symbol if there is any
--print("<BR>thry_and_symb: ",thry_and_symb," ",left_of_def_found?"UNDEF");
return the_def;
end if;
return OM;
end get_symbol_def;
-- ************* Suppose-not inference checking ***************
-- The following routine verifies that the Suppose_not statement appearing at the very start of a proof
-- is a proper equivalent to the negative of the theorem asserted. As presently arranged,
-- the procedure handles all of the procedures and Suppose_not in a stated range by iterating over them.
-- The free variables of the Theorem to which each Suppose_not attaches are found in left-to-right order,
-- the variables supplied in the Suppose_not are substituted for them, and the resulting formula negated
-- to produce a formula S. We also check to verify that all of these variable are distinct. Finally,
-- we use our back-end inferencing mechanisms to check that the stated form of the Suppose_not is equivalent
-- to the substituted formula S.
procedure check_suppose_not(supnot_trip,pno,th_id); -- check a suppose_not inference, represented by a triple
init_logic_syntax_analysis(); -- initialize for logic syntax-tree operations
-- (probably during an earlier run) and consists of pairs [negated_form,discharge_result];
-- here 'negated_form' is the negation (with possible substitution of constants for the universally
-- quantified variables of the original theorem which is 'discharge_result') of 'discharge_result'.
if supnot_trip = OM then
error_count +:= 1;
printy(["\n****** Error verifying step: " + 1 + " of theorem "
+ pno + "\n\t", str(statement_being_tested),"\nMissing suppose not\n"]);
return;
end if;
[replacement_vars,sn_form,theorem_form] := supnot_trip; -- break the triple into the 'suppose' and the 'theorem' part
thm_no := pno;
thry_of_th := theory_of(th_id); -- get the theory of the current theorem
if (aac := assumps_and_consts_of_theory(thry_of_th)) = OM then
print("Stopped since theory of theorem ",th_id?"UNDEFINED"," namely ",thry_of_th?"UNDEFINED_THEORY"," seems to have no defined assumps_and_consts"); stop;
end if;
[assumed_symbols,-] := aac;
assumed_vars := {case_change(x,"lu"): x in assumed_symbols | "(" notin x}; -- the set of simple assumed variables of the theory of the theorem
statement_being_tested := "Suppose_not(" + replacement_vars + ") ==> " + sn_form; -- reconstruct the Suppose_not statement, for reporting
sn_form := join(breakup(sn_form,"&")," and "); theorem_form := join(breakup(theorem_form,"&")," and "); -- replace ampersands with 'ands'
freevs_theorem := find_free_vars(parse_expr(theorem_form + ";")(2)); -- get the free variables of the 'theorem' part
freevs_theorem := [v: v in freevs_theorem | v notin assumed_vars];
-- the assumed variables of the theory of the theorem do not count as free variables of the theorem
--print("***** thry_of_th: ",thry_of_th," ",freevs_theorem," assumed_vars: ",assumed_vars);
vars_suppose := breakup(replacement_vars," ,;"); -- get the replacement variables supplied with the 'suppose' part
firstloc_theorem := firstloc_suppose := {}; -- will determine the first appearance of each free variable, so as to locate them in left_to_right order
pieces := breakup(sn_form,",./?><;'\":][}{\\| \t=-+_)(*&^%$#@!~`");
for p = pieces(j) | p /= "" loop pc := case_change(p,"lu"); firstloc_suppose(pc) := firstloc_suppose(pc)?j; end loop;
pieces := breakup(theorem_form,",./?><;'\":][}{\\| \t=-+_)(*&^%$#@!~`");
for p = pieces(j) | p /= "" loop pc := case_change(p,"lu"); firstloc_theorem(pc) := firstloc_theorem(pc)?j; end loop;
-- get the tuple of free variables in the statement to be deduced by the discharge and in the 'supposed' form
freevs_theorem := merge_sort({[firstloc_theorem(case_change(v,"lu"))?0,v]: v in freevs_theorem});
freevs_theorem := [v: [-,v] in freevs_theorem | not((nv := #v) >= 8 and v(nv - 7..) = "_THRYVAR")]; -- eliminate the thryvars, which are not free
freevs_theorem := [v in freevs_theorem | lookup_def_of_symbol(v,th_id)(1) = OM]; -- eliminate the symbols with definitions, which are not free
-- eliminate the assumed variables of the theory of this theorem, which are not free either
if #freevs_theorem = #vars_suppose then -- both formulae contain the same number of free variables, so we can set up a substitution map
subst_map := {[v,vars_suppose(j)?v]: v = freevs_theorem(j)}; -- set up the substitution map which should generate the 'suppose not' clause
-- from the discharge_result being deduced
theorem_tree := parse_expr(theorem_form + ";")(2);
theorem_is_imp := theorem_tree(1) = "DOT_IMP"; -- see if the theorem is an implication
theorem_subst := unparse(tsub := substitute(theorem_tree,subst_map));
-- get the parse tree of the theorem being deduced and make the inferred substitution in it
--print("<P>check_suppose_not:: ",sn_form," ",#(sn_form)," ",trim(sn_form) = "AUTO"," ",theorem_subst);
if trim(sn_form) = "AUTO" then -- return the negated theorem, or its sligly transoformed version if it is an implicatioon
if not theorem_is_imp then
return "(not(" + theorem_subst + "))";
else -- the cited theorem is an implication, so return its negation as 'a and (not b)'
return "(" + unparse(tsub(2)) + ") and (not (" + unparse(tsub(3)) + "))";
end if;
end if; -- accept the suppose_not without further ado, returning the negated thoeorem. In all other cases, OM will be returned
--printy(["before negated inequivalence"]);
if running_on_server then -- use the PC character for equivalence
negated_inequivalence := "not((not(" + sn_form + ")) •eq (" + theorem_subst + "));";
-- form what should be an impossible inequivalence
else -- use the Mac character for equivalence
negated_inequivalence := "not((not(" + sn_form + ")) •eq (" + theorem_subst + "));";
-- form what should be an impossible inequivalence (Mac version)
end if;
--print("negated_inequivalence: ",negated_inequivalence);
ne_tree := parse_expr(negated_inequivalence)(2); -- parse it
-- test the negated inequivalence to see if it has a model
save_allow_unblobbed_fcns := allow_unblobbed_fcns; allow_unblobbed_fcns := false;
bt := blob_tree(ne_tree);
formula_after_blobbing := otree; -- to generate report when inference is slow
formula_after_boil_down := ["No_boildown"]; -- to generate report when inference is slow
mb := model_blobbed(bt);
allow_unblobbed_fcns := save_allow_unblobbed_fcns; -- restore
if mb /= OM then
error_count +:= 1;
--print(unparse(ne_tree),"\n",unparse(bt)," mb: ",mb);
printy(["\n****** Error verifying step: " + 1 + " of theorem "
+ thm_no + "\n\t", statement_being_tested," ",unparse(bt)," ",negated_inequivalence," ",running_on_server,"\nSuppose_not inference failed\n"]);
end if;
else -- mismatch between number of free variables of theorem and number of substitueends in the Suppos-not
error_count +:= 1;
printy(["\n****** Error verifying step: " + 1 + " of theorem "
+ thm_no + "\n\t", statement_being_tested,"\nSuppose_not inference has mismatched variable set: ",
freevs_theorem," ",vars_suppose," ",suppose_not_tup,"\n"]);
end if;
end check_suppose_not;
-- ************* Monotonicity inference checking ***************
-- The following routine handles inferences by monotonicity. It can make use of previously proved
-- information concerning the monotonicity properties of particular functions and predicates,
-- supplied to it by declarations of the kind illustrated by the example DECLARE monotone("Un,++;Pow,+;#,+;•PLUS,+,+").
-- The routine calls the underlying 'monotone_infer' procedure (found in the logic_elem_supplement file),
-- to which only formulae of one of the forms 'e1 incs e2', 'e1 •incin e2', 'e1 = e2', 'e1 •imp e2',
-- and 'e1 •eq e2' can be submitted. Hence our routine can only handle formulae of this kind.
-- See the lenthy comment preceding 'monotone_infer' to see hw it expands the staement submitted
-- to it to the modified statement which the following procedure then tests for saisfiability using our standard
-- back-end satisfiability-testing routines.
procedure check_a_monot_inf(count,statement_stack,hbk,theorem_id); -- check a single Set_monot inference
if "(" in hbk then -- the 'ELEM' phase of the citation inference is restricted
inference_restriction := rbreak(hbk,"("); rmatch(inference_restriction,")");
else
inference_restriction := ""; -- citation restriction is null
end if;
--printy(["sts::: ",unparse(parse_expr(drop_labels(statement_stack(nss := #statement_stack))(1) + "];")(2)));
sts := "(" + unparse(monotone_infer(parse_expr(drop_labels(statement_stack(nss := #statement_stack))(1) + ";")(2))) + ")";
-- invert the final statement
--printy(["sts: ",sts]);
conj := build_conj(inference_restriction,statement_stack(1..nss - 1) with sts,OM);
-- final OM parameter indicates no cited theorem or statement
-- build conjunction, either of entire stack or of statements indicated by conjunction restriction
test_conj(conj); -- test this conjunct for satisfiability
--printy(["tested_ok: ",tested_ok," ",conj]);
if not tested_ok then
printy(["Conjunct formed using expanded Set_monot-statement fails unsatisfiability check ***"]);
error_count +:= 1;
printy(["Citation check number: ",citation_check_count," error number: ",citation_err_count +:= 1]);
end if;
end check_a_monot_inf;
-- ************* Loc_def inference checking ***************
procedure check_a_loc_def(statement_stack,theorem_id); -- check a single Loc_def inference, or conjoined collection thereof
conjoined_statements := [drop_labels(stat)(1): stat in statement_stack];
loc_def_tree := parse_expr((def_stat := conjoined_statements(ncs := #conjoined_statements)) + ";");
-- get the last stacked stement, which is the definition itself, and parse it
if loc_def_tree = OM then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nBad loc_def syntax"]);
error_count +:= 1; return;
end if;
loc_def_tree := loc_def_tree(2); [n1,-,-] := loc_def_tree;
if n1 /= "ast_and" and n1 /= "AMP_" then return check_a_loc_def_in(statement_stack,theorem_id); end if;
-- treat using workhorse routine if not a conjunct
-- if we have several conjoined definitions, walk the tree to collect them into a tuple
tup := conj_to_tuple(loc_def_tree);
-- then unparse the elements of this tuple, and substitute them sucessively for the
-- last element of the stack, and passs them to the workhorse for checking
otup := tup; tup := [unparse(x): x in tup];
save_error_count := error_count;
nss := #statement_stack; save_stack_top := statement_stack(nss);
--print("loc_def_tree: ",loc_def_tree," tup: ",tup);
for stg in tup loop
statement_stack(nss) := stg;
check_a_loc_def_in(statement_stack,theorem_id); -- call the workhorse
if error_count > save_error_count then -- stop at the first detected error,
return;
end if;
end loop;
statement_stack(nss) := save_stack_top; -- restore the stack
-- if there are none, check that all the symbols defined are different.
list_of_targets := [x(2): x in otup];
if #{x: x in list_of_targets} /= #list_of_targets then -- some taget variable is duplicated
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nIllegal repeated target variable in local definition, these must all be distinct."]);
error_count +:= 1; return;
end if;
end check_a_loc_def;
procedure conj_to_tuple(tree); -- walks a syntax tree of a conjunct, converting it to a tuple
var the_tup;
the_tup := []; -- initialize
conj_to_tuple_in(tree); -- call workhorse
return the_tup;
procedure conj_to_tuple_in(tree); -- workhorse: walks a syntax tree of a conjunct, converting it to a tuple
[n1,n2,n3] := tree; -- unpack
if n1 /= "ast_and" and n1 /= "AMP_" then the_tup with:= tree; return; end if; -- just collect non-conjuncts
conj_to_tuple_in(n2); conj_to_tuple_in(n3); -- otherwise walk the thwo branches of the conjuunct
end conj_to_tuple_in;
end conj_to_tuple;
-- The following simple routine handles Ref inferences of Loc_def type. It checks
-- to ensure that the statement passed to it has the form of an equality getween a
-- left-hand side wich is simply a previously unused function or predicate symbol
-- with distinct arguments, and a right-hand expression having no other free variables and
--
procedure check_a_loc_def_in(statement_stack,theorem_id); -- check a single Loc_def inference
-- procedure check_a_loc_def(statement_stack,theorem_id); -- check a single Loc_def inference
-- we check that the left side of the local definition is a simple, argument-free variable never used previously
-- that no free variables appear on the right hand side of the local definition, which must be a valid
-- expression, and that all other function and predicate symbols appearing in the right hand side
-- have been defined previously, either in the THEORY containing the Loc_def or in one of its ancestor THEORYs.
-- Note that no symbol appearing on the right which has been defined in this way should count as
-- a free variable of the right hand side.
conjoined_statements := [drop_labels(stat)(1): stat in statement_stack];
loc_def_tree := parse_expr((def_stat := conjoined_statements(ncs := #conjoined_statements)) + ";");
-- if loc_def_tree = OM then
-- printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
-- + number_of_statement_theorem + "\n\t", statement_being_tested,
-- "\nBad loc_def syntax"]);
-- error_count +:= 1; return;
-- end if;
loc_def_tree := loc_def_tree(2);
if not is_tuple(loc_def_tree) or loc_def_tree(1) /= "ast_eq" or not is_string(def_var := loc_def_tree(2)) then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nLocal definition does not have the required form"]);
printy(["Local definition " + def_stat + " must be simple equality with a variable not used previously ***"]);
error_count +:= 1; return;
end if;
if def_var in find_free_vars(loc_def_tree(3)) then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nLocal definition cannot be recursive ***"]);
error_count +:= 1; return;
end if;
conj_tree := parse_expr("(" + join(conjoined_statements(1..ncs - 1),") and (") + ");")(2); -- build into parsed conjunction
if def_var in find_free_vars(loc_def_tree(3)) then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"Bad defined variable in local definition",
"\nDefined variable of local definition ",def_stat," has been used previously in theorem ",theorem_id," ***"]);
error_count +:= 1; return;
end if;
-- we must also check that the taget variable of the local definition does not appear within this theorem as a function
-- symbol, and that it is not a defined variable, either in the current theory or in any of it ancestral theories.
end check_a_loc_def_in;
-- ************* Assumption inference checking for theorems within theories ***************
-- The following routine handles assumption inference checking for theorems within theories.
-- We simply collect all the assumptions of the THEORY containing the 'Assump' inference and of all the
-- ancestor THEORYs of that, and then verify that the stated conclusion of the 'Assump'
-- is a valid consequence of their conjunction.
procedure check_an_assump_inf(conj,stat,theorem_id); -- handle a single 'Assumption' inference
thy := theory_of_inf := theory_of(theorem_id); -- get the theory to which the inference belongs
assumps := assumps_and_consts_of_theory(thy)(2); -- get the assumptions of the theory
--printy(["theorem_id,thy: ",theorem_id," ",thy," assumps: ",assumps]);
-- include the assumptions of each ancestor theory
while (thy := parent_of_theory(thy)) /= OM loop
assumps +:= assumps_and_consts_of_theory(thy)(2);
end loop;
-- conjoin negative of statement with collected assumptions
conj := "(" + join(assumps,") and (") + ") and (not(" + drop_labels(stat)(1) + "));";
--printy(["conj-assumps: ",conj]);
if show_details then printy(["\nconj: ",conj]); end if;
test_conj(conj); -- test this conjunct for satisfiability
if not tested_ok then
printy(["'Assump' inference failed in: ",theorem_id," ",stat]);
error_count +:= 1;
end if;
end check_an_assump_inf;
-- ************* THEORY-application inference checking (outside proofs) ***************
-- The our next group of routines collective handle all allowed forms of proof by THEORY application.
-- Four bsic routines are provided, one to handle non-Skolem APPLY inferences at the Theorem level
-- (hence outside of any proof), and another to handle APPLY inferences within proofs.
-- Two additional routines are provided to handle Skolem inferences, which are treated as if they wre
-- applications of a built-in theory called 'Skolem'.
-- Only applications of internal Theories are handled by this group of routines. Collections of theorems
-- supplied by external verification systems, which Ref also allows, are handled by other routines seen below.
-- Theory applications at the Theorem level consist of an APPLY clause of the ordinary form followed
-- immediately by a standard-form Theorem statement, which has however no other proof than the APPLY clause itself.
-- The justifing clauses required to validate for such APPLY must be found among the 20 preceding theorems
-- in the same THEORY as the APPLY.
-- Theory applications internal to proofs consist of an APPLY clause of the ordinary form followed
-- by an ordinary Ref formula. The justifing clauses required to validate for such APPLY must be found in
-- the normal legical context of the APPLY inference.
-- Skolem applications at the Theorem level consist of an APPLY(..) Skolem... clause followed
-- immediately by a standard-form Theorem statement, which has no other proof than the
-- APPLY(..) Skolem... clause itself. The new function symbols defined such a 'Skolem APPLY'
-- (none of which can have been defined previously) are shown as replacement for nominal variables
-- of the form vnnn_thryvar nominally defined within the built-in theory 'Skolem'. The
-- conclusion of such a Skolem inference must be a universally quantified statement involving only
-- previously defined functions and constants, along with the symbols listed as being defined
-- by the Skolem inference. We check that the conclusion of the Skolem inference is a universally
-- quantified formula whose bound variables appear without attached limits, and that each occurence of
-- every function defined by the Skolem inference has simple variables as arguemts, these
-- variables always being a prefix of the quanitied variables found at the start of the Skolem conclusion.
-- Once this condition has been verified, the Skolem conclusion can be converted to a Skolem assumption
-- replacing every appearance of each such function by a bound variable having he same name as the function,
-- inserting an existentially quantified occurence of this variable at the appropriate point in the
-- list of universal quantifiers originally prefixed to the Skolem conclusion. For eample, a
-- Skolem conclusion originally having a form like
-- (FORALL x,y,z | ... f(x,y)...g(x,y,z)..f(x,y)..h(x)..)
-- converts in this way to the Skolem assumption
-- (FORALL x | (EXISTS h | (FORALL y | (EXISTS f |
-- (FORALL z | (EXISTS g | ... f...g..f..h..))))))
-- Once this Skolem assumption has been constructed in the manner described, justifing clauses validating it
-- must be found among the 20 preceding theoremsin the same THEORY as the APPLY.
-- Skolem applications internal to proofs consist of an APPLY clause of the ordinary form followed
-- by a universally quantified Ref formula involving only previously defined functions and constants,
-- along with the symbols listed as being defined by the Skolem inference. A Skolem assumption is constructed
-- from this Skolem conclusion in much the same way as for Skolem applications at the Theorem level,
-- Hover the justifing clauses validating a Skolem application internal to a proof must be found in
-- the normal legical context of the Skolem inference.
procedure check_an_apply_inf(next_thm_name,theory_name,apply_params,apply_outputs);
-- handle a single, non-skolem, theory-application inference
-- To handle these inferences, we first separate out the defined-parameter list and the substitution-list of the
-- APPLY. These are coarsely validated (prior to the call of this routine), and then the apply_params are further
-- validated by verifying that their individual parts have forms corresponding to valid definitions.
-- perform comprehensive syntactic check of APPLY statements
if (res := test_apply_syntax(theory_name,apply_params,apply_outputs)) = OM then return OM; end if;
[assumps_and_consts,apply_params_parsed] := res;
-- We must also check that none of the apply_outputs, which are defined by the APPLY,
-- have been defined previously, either in the present theory or in any of its ancestral theories.
-- Definitions made at this top level count as 'permanent' definitions in whatever theory is current,
-- but because their syntactic forms are different from those of algebraic or recursive definitions,
-- these definitions are not accessible via Use_def, but only through citation of the APPLY-generated
-- theorem wich introduces them. The same is true for APPLY statemets in proofs wich define
-- symbols, except that these symbols are locall to the proof. But within the proof they are
-- subject to the same non-repetion rules as described above.
-- Noe also that thse rules als apply to Loc_def definitions inside proofs.
-- next we generate the 'required hypothesis' for the theory application and check that it is available
-- in the context of the application. This is done by verifying that it follows by ELEM from the conjunction of
-- the 20 theorems preceding the APPLY. (In the unusual situations in which this is not the case, it may
-- be necessary to insert an artificial Theorem simply to satisfy this condition.
hypotheses_list := assumps_and_consts(2); -- the list of all hypotheses of the theory being applied
if hypotheses_list /= [] then -- the theory being applied has hypotheses, whose availability must be checked
required_hypothesis := build_required_hypothesis(hypotheses_list,apply_params_parsed);
-- verify that the 'required hypothesis (parsed_conj)' thus generated is available in the context of the APPLY
if test_conclusion_follows(next_thm_name,required_hypothesis) = OM then return OM; end if;
end if;
return conclusion_follows(theory_name,apply_outputs,apply_params_parsed,theorem_map(next_thm_name));
-- verify that the theorem following the (global) apply is a valid consequence of the substituted theorems of the theory
end check_an_apply_inf;
procedure check_apply_syntax(text_of_apply); -- special processing for global definitions by "APPLY"
-- printy(["text_of_apply: ",text_of_apply]); -- FILL IN ************
end check_apply_syntax;
-- ************* APPLY conclusion checking (for THEORY application outside proofs) ***************
procedure test_conclusion_follows(next_thm_name,desired_concl); -- test desired conclusion of a top-level APPLY inference
-- this must follow from nearby prior theorems, plus assumptions of theory containing the APPLY
if definitions_handle = OM then check_definitions(-10000,100000); end if; -- make sure that the symbol_def map is available
-- 'symbol_def' maps symbol names, e.g. "Set_theory:Z", to their definitions
read_proof_data(); -- ensure that the list of digested_proofs,
-- the theorem_map of theorem names to theorem statements,
-- the theorem_sno_map_handle of theorem names to theorem statements,
-- its inverse inverse_theorem_sno_map,
-- the theory-related maps parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory
-- defs_of_theory,defsymbs_of_theory,defconsts_of_theory,
-- and the theory_of map sending each theorem and definition into its theory
-- the theorem_list, and the theorem_name_to_number (list of theorem names in order, and inverse)
-- are all available
ntn := theorem_name_to_number(next_thm_name);
current_theory := theory_of(next_thm_name); -- the theory containing the APPLY
assumps_of_current_theory := assumps_and_consts_of_theory(current_theory)(2); -- these assumptions are also available
ancestor_theories := [current_theory]; cur_th := current_theory; -- construct the chain of ancestor theories
while (cur_th := parent_of_theory(cur_th)) /= OM loop -- of the theory containing the theorem
ancestor_theories with:= cur_th;
end loop;
prior_theorems_list := [[thry,tln]: n in [(ntn - 21) max 1..ntn - 1] | (thry := theory_of(tln := theorem_list(n))) in ancestor_theories];
--printy(["prior_theorems_list: ",prior_theorems_list," ",allowed_theories]);
prior_theorems_list := [fully_quantified(thry,theorem_map(tln)): [thry,tln] in prior_theorems_list];
-- form universally quantified version of the theorems in this list.
--printy(["prior_theorems_list:: "]); for x in prior_theorems_list loop printy([x]); end loop;
-- now verify that the required hypothesis is a consequence of an available nearby theorem
conj_to_test := build_conj("",(assumps_of_current_theory + prior_theorems_list) with desired_concl,OM);
test_conj(conj_to_test); -- test this conjunct for satisfiability
--printy(["tested_ok: ",tested_ok]);
if not tested_ok then
printy(["testing hypothesis availability for APPLY or Skolem inference: ",next_thm_name," ",time()]);
error_count +:= 1;
printy(["Conjunct formed using available nearby theorems fails unsatisfiability check ***"]); return OM;
end if;
return true;
end test_conclusion_follows;
-- ************* THEORY-application inference checking (within proofs) ***************
procedure check_an_apply_inf_inproof(thm_name,stat_stack,theory_name,apply_params,apply_outputs);
-- handle a single, non-skolem, theory-application inference
-- To handle these inferences, we first separate out the defined-parameter list and the substitution-list of the
-- APPLY. These are coarsely validated (prior to the call of this routine), and then the apply_params are further
-- validated by verifying that their individual parts have forms corresponding to valid definitions.
-- perform comprehensive syntactic check of APPLY statements
if (res := test_apply_syntax(theory_name,apply_params,apply_outputs)) = OM then return OM; end if;
[assumps_and_consts,apply_params_parsed] := res;
-- next we generate the 'required hypothesis' for the theory application and check that it is available
-- in the context of the application. This is done by verifying that it follows by ELEM from the conjunction of
-- the 20 theorems preceding the APPLY. (In the unusual situations in which this is not the case, it may
-- be necessary to insert an artificial Theorem simply to satisfy this condition.
hypotheses_list := assumps_and_consts(2); -- the list of all hypotheses of the theory being applied
if hypotheses_list /= [] then -- the theory being applied has hypotheses, whose availability must be checked
required_hypothesis := build_required_hypothesis(hypotheses_list,apply_params_parsed);
-- verify that the 'required hypothesis (parsed_conj)' thus generated is available in the context of the APPLY
conj_to_test := build_conj("",stat_stack with required_hypothesis,OM);
squash_details := true; -- apply inferences always use coarse blobbing
test_conj(conj_to_test); -- test this conjunct for satisfiability
--printy(["tested_ok: ",tested_ok]);
if not tested_ok then
extra_message := [" Failed hypothesis availability check for APPLY or Skolem inference in proof ",thm_name," conjuction tested was: ",unicode_unpahrse(conj_to_test)," ",time()];
return OM; error_count +:= 1;
end if;
end if;
res := conclusion_follows(theory_name,apply_outputs,apply_params_parsed,stat_stack(#stat_stack));
if res = true then return res; end if;
-- verify that the theorem following the (global) apply is a valid consequence
-- of the substituted theorems of the theory
printy(["Required hypotheses are available but conclusion does not follow from application of theory; ",theory_name," res is: ",res?"OM"]);
extra_message := ["Required hypotheses are available but conclusion does not follow from application of theory ",theory_name," ; res is: ",res?"OM"];
return OM; -- return OM in the failure case
end check_an_apply_inf_inproof;
-- ************* Syntax checking for THEORY application ***************
procedure test_apply_syntax(theory_name,apply_params,apply_outputs);
-- comprehensive syntactic check of APPLY statements
-- replacements must be supplied for all the assumed symbols of the theory being applied.
-- The theory being applied must have a chain of ancestors which is a prefix of the theory
-- open at the point of the APPLY.
-- the pairs being_defined->defining_formula which appear are subject to the same restrictions as
-- would apply to definitions of the form being_defined := defining_formula. That is,
-- (i) each left-side must be a function symbol (or constant) followed by non-repeating simple arguments.
-- (ii) every defining_formula must be a syntactically valid formula all of whose free variables and functions
-- are either parameters of the left-hand side or
--printy(["check_an_apply_inf: ",theory_name," ",apply_params," ",apply_outputs]);
--printy(["**** Unknown inference mode: APPLY used to deduce: ",stat]); error_count +:= 1;
-- Once all of the preceding checks have been passed, we start to build the list of conclusions_available
-- from the THEORY being applied. This consists of all the theorems of the THEORY not containing any symbol
-- defined within it, other than assumed symbols and symbols listed in the apply_outputs list.
-- The assumed symbols are replaced by their defining expressions, in exactly the same way as for
-- a Use_def inference, using the 'replace_fcn_symbol' operation. Then all the symbols listed in the apply_outputs list,
-- and all the assumed constants, are replaced by their new names. This gives a defied formula which we append to the
-- conj supplied, which is then tested for consistency.
-- We then note that all of the replacement symbols in the defined-parameter list, none of which
-- may have been defined previously, are defined by the APPLY statement.
init_proofs_and_theories();
-- ensure initialization of digested_proofs, parent_of_theory,theors_defs_of_theory,assumps_and_consts_of_theory
apply_params_parsed := {}; -- decompose the apply_params at their '->' separator, and check vaildity
assumps_and_consts := assumps_and_consts_of_theory(theory_name); -- get the data relevant to the theory
if assumps_and_consts = OM then
printy(["****** Error: Cannot find THEORY ",theory_name," apply_outputs are specified as: ",apply_outputs," next_thm_name is: ",next_thm_name]); return OM;
end if;
--printy(["assumps_and_consts: ",assumps_and_consts]);
nargs_of_assumed_symbol_name := {}; -- will build map of assumed symbol names of theory into their number of arguments
for assmd_symb_w_args in assumps_and_consts(1) loop
symb_name := case_change(break(assmd_symb_w_args,"("),"lu");
nargs_of_assumed_symbol_name(symb_name) := if assmd_symb_w_args /= "" then #breakup(assmd_symb_w_args,",") else 0 end if;
end loop;
ancestor_theories := []; -- find the ancestor theories of the theory being applied
theory_nm := theory_name;
while (parent_theory := parent_of_theory(theory_nm)) /= OM loop
ancestor_theories := [parent_theory] + ancestor_theories; theory_nm := parent_theory;
end loop;
theorems_and__defs_of_theory := theors_defs_of_theory(theory_name);
for stg in apply_params loop
orig_stg := stg; -- save for diagnosis
front := break(stg,"-"); nxt_char := match(stg,"->"); -- stg is now he part of orig_stg following the -> mark; front is the part before
if nxt_char = "" then printy(["****** Error: missing '->' in APPLY replacement list: ",apply_params," ",next_thm_name]); return OM; end if;
if (front := parse_expr((orig_front := front) + ";")) = OM then -- otherwise the front must be a simple function name,
-- and the back 'stg' an expression with no more
printy(["****** Error: illformed assumed_symbol part in APPLY ",theory_name," replacement list: ",orig_stg," ",orig_front," ",next_thm_name]);
return OM;
end if;
if (back := parse_expr(stg + ";")) = OM then
printy(["****** Error: illformed symbol-definition part in APPLY ",theory_name," replacement list: ",stg," ",next_thm_name]);
return OM;
end if;
if (not is_string(front := front(2))) and (not front(1) = "ast_of") then
printy(["****** Error: function not supplied for assumed_symbol part in APPLY ",theory_name," replacement list: ",orig_front," ",front," ", next_thm_name]);
return OM;
end if;
if (not is_string(front)) then
[-,replacement_symbol_name,front_args] := front;
front_args := front_args(2..); -- drop the prefixed 'ast_list'
if exists fa in front_args | not is_string(fa) then
printy(["****** Error: illegal compound arguments in symbol-definition part of APPLY ",theory_name," replacement list: ",orig_stg," ",next_thm_name]); return OM;
end if;
if (nfa := #front_args) > #{x: x in front_args} then
printy(["****** Error: repeated arguments in symbol-definition part of APPLY ",theory_name," replacement list: ",orig_stg," ",next_thm_name]); return OM;
end if;
-- we now check that every replacement_symbol_name is an assumed symbol of the theory being applied
if (naa := nargs_of_assumed_symbol_name(replacement_symbol_name)) = OM then
printy(["****** Error: replacements can only be specified for assumed symbol of theory: ",theory_name," ",replacement_symbol_name,
" is not an assumed symbol of this theory: ",orig_stg," ",next_thm_name]); return OM;
end if;
-- check the number of arguments supplied for the replacement_symbol_name
if (nfa /= naa) then
printy(["****** Error: wrong number of arguments supplied for ",replacement_symbol_name,": ",orig_stg," which has ",naa," arguments, but ",nfa," were supplied"]);
return OM;
end if;
-- and that no replacement_symbol_name appears twice
if apply_params_parsed(replacement_symbol_name) /= OM then
printy(["****** Error: only one replacement can be specified for assumed symbol ",replacement_symbol_name," of theory: "," ",theory_name]);
return OM;
end if;
apply_params_parsed(replacement_symbol_name) := [front_args,back(2)];
-- capture the parameters and the right_hand_side definition for the assumed_symbol
-- check that the list of ancestors of the theory being applied is contained in the list of ancestors of the current theory ***TODO***
-- check that every free variable and function name appearing in the back part of the definition is defined ***TODO***
-- in the theory containing the APPLY or in one of its parent theories
else -- a parameterless constant has been supplied
replacement_symbol_name := front;
-- check that replacement_symbol_name is an assumed symbol of the theory being applied
if (naa := nargs_of_assumed_symbol_name(replacement_symbol_name)) = OM then
printy(["****** Error: replacements can only be specified for assumed symbol of theory: ",theory_name,". ",replacement_symbol_name,
" is not an assumed symbol of this theory: ",orig_stg," ",next_thm_name]); return OM;
end if;
-- check the number of arguments supplied for the replacement_symbol_name
if (0 /= naa) then printy(["****** Error: arguments missing for ",replacement_symbol_name,": ",orig_stg]); return OM; end if;
-- and that no replacement_symbol_name appears twice
if apply_params_parsed(replacement_symbol_name) /= OM then
printy(["****** Error: only one replacement can be specified for assumed symbol ",replacement_symbol_name," ",next_thm_name]); return OM;
end if;
apply_params_parsed(replacement_symbol_name) := [[],back(2)];
end if;
--printy(["front,back: ",front(2)," ",back(2)]);
end loop;
-- now check that every assumed symbol of the theory being applied has a replacement_symbol_name
if (no_defs_for := {symb: [symb,-] in nargs_of_assumed_symbol_name | apply_params_parsed(symb) = OM}) /= {} then
printy(["****** Error: no replacement has been specified for the following assumed symbols: ",no_defs_for," ",next_thm_name]);
return OM;
end if;
--printy(["assumps_and_consts: ",assumps_and_consts]);
--printy(["ancestor_theories: ",ancestor_theories]);
--printy(["nargs_of_assumed_symbol_name: ",nargs_of_assumed_symbol_name]);
--printy(["theorems_and__defs_of_theory: ",theorems_and__defs_of_theory]);
return [assumps_and_consts,apply_params_parsed]; -- at this point the APPLY has passed all its syntactic checks.
end test_apply_syntax;
-- ************* Analysis of APPLY hints within proofs ***************
procedure decompose_apply_hint(hint); -- decomposes the 'hint' portion of an APPLY statement
-- returns [theory_name,apply_params,apply_outputs]
-- the split_apply_params is a list of pairs [assumed_fcn(vars),replacement_expn]
-- the apply_outputs is the colon-and-comma punctuated string defining the functions to be generated
-- printy(["decompose_apply_hint: ",hint]);
rspan(hint,"\t "); -- remove possible whitespace
hint_tail := hint(6..); apply_outputs := break(hint_tail,")"); match(hint_tail,")"); match(apply_outputs,"(");
--printy(["apply_outputs: ",apply_outputs," ",hint_tail]);
theory_name := break(hint_tail,"("); span(theory_name," \t");
match(hint_tail,"("); tail_end := rbreak(hint_tail,")"); attached_thmname := rbreak(tail_end,">");
span(attached_thmname," \t"); rspan(attached_thmname," \t");
apply_params := hint_tail; rmatch(apply_params,")");
apply_params := split_at_bare_commas(apply_params);
split_apply_params := []; -- split the apply parameters at "->" and collect the resulting pairs
for ap in apply_params loop
ap_seg := segregate(ap,"->");
if exists (segp = ap_seg(j)) | segp = "->" then
split_apply_params with:= [+/ap_seg(1..j - 1),+/ap_seg(j + 1..)];
end if;
end loop;
span(theory_name," \t"); rspan(theory_name," \t"); -- trim whitespace
return [theory_name,apply_params,apply_outputs,if #attached_thmname = 0 then "" else "T" end if + attached_thmname];
end decompose_apply_hint;
-- ************* Finding the variables to be substituted of APPLY _thryvars ***************
procedure get_apply_output_params(apply_outputs,hint); -- decompose and validate apply_outputs, returning them as a tuple of pairs
-- this routine expects its apply_outputs parameter to be a comma-separated list of colon-separated pairs thryvar:replacement
-- derived from an APPLY hint. It chcks he format of the hint crudely, and verifies that no thryvar occurs reepatedly in the hint.
-- if all of these crude tests are passed, return the list of pairs of thryvars and their replacements
split_apply_outputs := breakup(breakup(apply_outputs,","),":"); -- break into a tuple of pairs
--printy(["split_apply_outputs: ",split_apply_outputs]);
-- check that we have only pairs, whose first component has the required xxx_thryvar form
if exists p = split_apply_outputs(j) | #p /= 2 or (p1 := p(1))(1) notin "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz¥•"
or not_all_alph(p1) or (np1 := #p1) < 9 or p1(np1 - 7..) /= "_thryvar" then
printy(["****** Error: illformed defined-variables list in APPLY. ",p," ",hint]); return OM;
end if;
-- check that the second components of these pairs have characters legitimate in function and/or operator names
if exists p = split_apply_outputs(j) | (p2 := p(2))(1) notin "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz¥•"
or not_all_alph(p2) then
printy(["****** Error: illformed defined variable in APPLY. ",p(2)," ",hint]); return OM;
end if;
numoccs := {};
-- now check that none of the thryvar names supplied occur more than once
for [thryvar,-] = split_apply_outputs(j) | substvar /= OM loop
numoccs(substvar) := (numoccs(substvar)?0) + 1;
end loop;
if exists n = numoccs(v) | n > 1 then
printy(["****** Error: Repeatedly defined thryvar variable in APPLY. ",v," ",hint]); return OM;
end if;
return split_apply_outputs; -- if all of these crude tests are passed, return the list of pairs of thryvars and their replacements
end get_apply_output_params;
-- ************* Skolemization inference checking (outside proofs) ***************
procedure check_a_skolem_inf(next_thm_name,theorem_to_derive,apply_params,apply_outputs);
-- checks a skolem inference
-- we first separate out the defined-parameter list of the APPLY.
-- These are coarsely validated (prior to the call of this routine), and then the apply_params are further
-- validated by verifying that the individual parts have forms corresponding to valid definitions.
-- apply_outputs is the comma-and-colon separated string of pairs thryvar:replacement taken from the APPLY hint.
-- apply_params is the list of input symbols required for the APPLY, taken from APPLY(apply_outputs) theory_name(apply_params)
if (tree_and_split_outputs := check_skolem_conclusion_tree(theorem_to_derive,apply_params,apply_outputs)) = OM then return OM; end if;
-- tree_and_split_outputs returns the parse tree of Skolem desired_conclusion and the list of apply outputs
-- as pairs [thryvar,replacement].
-- a coarse lexical validity test is applied to (partially) validate the format of the APPLY hint supplying the apply_outputs.
[tree,split_apply_outputs] := tree_and_split_outputs; -- get syntax tree of theorem_to_derive, and lis of [thryvar,replacemen pairs]
if (required_hypothesis := build_Skolem_hypothesis(theorem_to_derive,tree,split_apply_outputs)) = OM then return OM; end if;
-- get hypothesis required for Skolem conclusion after first applying relevant syntactic checks
--print("<P>check_a_skolem_inf: ",next_thm_name," ",theorem_to_derive," apply_params: ",apply_params," apply_outputs: ",apply_outputs);
--print("<P>required_hypothesis: ",required_hypothesis,"<P>");
return test_conclusion_follows(next_thm_name,required_hypothesis);
end check_a_skolem_inf;
procedure check_skolem_conclusion_tree(desired_conclusion,apply_params,apply_outputs);
-- returns the parse tree of Skolem desired_conclusion and the list of apply outputs as pairs [thryvar,replacement].
-- a coarse lexical validity test is applied to (partially) validate the format of the APPLY hint supplying the apply_outputs.
if apply_params /= [] then -- assumed symbols are improperly being supplied for APPLY Skolem, which requirs none
printy(["\n****** Error verifying step: in proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nNo substitutions appear in APPLY Skolem."]);
error_count +:= 1; return OM;
end if;
-- decompose and validate apply_outputs, returning them as a tuple of pairs
-- get_apply_output_params expects its apply_outputs parameter to be a comma-separated list of colon-separated pairs thryvar:replacement
-- derived from an APPLY hint. It chcks he format of the hint crudely, and verifies that no thryvar occurs reepatedly in the hint.
-- if all of these crude tests are passed, return the list of pairs of thryvars and their replacements
if (split_apply_outputs := get_apply_output_params(apply_outputs,hint)) = OM then
printy(["\n****** Error verifying step: in proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nBad substitution in APPLY Skolem."]);
error_count +:= 1; return OM;
end if;
-- probably redundant check of format of initial thryvar components of pairs
if exists p = split_apply_outputs(j) | (p1 := p(1))(1) /= "v" or p1(2..#p1 - 8) /= str(j) then -- check for 'vnnnn_thryvar' format
printy(["\n****** Error verifying step: in proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nIllformed defined-variables list in Skolem APPLY. ",apply_params]);
error_count +:= 1; return OM;
end if;
--printy(["check_a_skolem_inf: "," ",split_apply_outputs," desired_conclusion: ",desired_conclusion]);
-- we parse the statement to be inferred
if (tree := parse_expr(desired_conclusion + ";")) = OM then
printy(["\n****** Error verifying step: in proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nSkolem conclusion has bad syntax"]);
error_count +:= 1; return OM;
end if;
return [tree(2),split_apply_outputs]; -- drop the 'list' prefix
end check_skolem_conclusion_tree;
procedure build_Skolem_hypothesis(desired_conclusion,tree,split_apply_outputs);
-- applies syntactic checks and builds hypothesis required for Skolem conclusion
-- desired_conclusion is a string representing the desired conclusion; 'tree' is its syntax tree
-- split_apply_outputs is a tuple of pairs [thryvar,replacement]. These are pairs of strings.
if running_on_server then desired_conclusion := unparse(tree); end if;
-- avoid '•' character problem on windows
--print("running_on_server: ",desired_conclusion," • ¥ ");
if tree(1) /= "ast_forall" then -- the Skolem required_hypothesis will be existentially quantified, and
-- all occurrences of the Skolem symbols must be parameterless
-- find all the occurences of the replacement symbols in the syntax tree of the desired_conclusion
relevant_nodes := symbol_occurences(tree,skolem_outputs := {case_change(y,"lu"): [-,y] in split_apply_outputs});
if exists rn in relevant_nodes | #(rn(3)?[]) > 0 then -- error: in this case Skolem constants cannot have parameters
printy(["\n****** Error verifying step: in Skolem proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nSkolem constants cannot have parameters"]);
error_count +:= 1; return OM;
end if;
-- build the required hypothesis by prefixing purely exitential quantifiers to the conclusion
required_hypothesis := "(EXISTS " + join([y: y in {x: [-,x,-] in relevant_nodes}],",") + " | " + desired_conclusion + ")";
return required_hypothesis;
end if;
if exists v in (varlist := tree(2)(2..)) | not is_string(v) then
printy(["\n****** Error verifying step: in Skolem proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nInitial iterator of Skolem conclusion must be simple"]);
error_count +:= 1; return OM;
end if;
if (nvl := #varlist) /= #{x: x in varlist} then
printy(["\n****** Error verifying step: in Skolem proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nRepeated variables in initial iterator of Skolem conclusion."]);
error_count +:= 1; return OM;
end if;
--printy(["tree: ",varlist," ",{case_change(y,"lu"): [-,y] in split_apply_outputs}]);
relevant_nodes := symbol_occurences(t3 := tree(3),skolem_outputs := {case_change(y,"lu"): [-,y] in split_apply_outputs});
--printy(["relevant_nodes: ",relevant_nodes]);
if exists rn in relevant_nodes, v in rn(3) | (v in rn(1)) then
printy(["\n****** Error verifying step: in Skolem proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nSkolem functions and constants can only be in scope of initial quantifier."]);
error_count +:= 1; return OM;
end if;
if exists rn in relevant_nodes | (nskoa := #(sko_args := rn(3)?[])) > nvl or sko_args(1..nskoa) /= sko_args then
printy(["\n****** Error verifying step: in Skolem proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nArguments of Skolem functions must be initial prefix of variable in prefixed quantifier list."]);
error_count +:= 1; return OM;
end if;
nargs_of_skolem_fcns := {[rn(2),#(sko_args := rn(3)?[])]: rn in relevant_nodes}; -- mapping of skolem fuctions into their number of arguments
-- note that only the symbols in the domain of this function, i.e. the declared skolem fcns
-- which actually appear in the stated conclusion, are actually used in whar follows
if (nsko := #nargs_of_skolem_fcns) /= #domain(nargs_of_skolem_fcns) then
printy(["\n****** Error verifying step: in Skolem proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nAll occurrences of Skolem functions must have the same number of arguments."]);
error_count +:= 1; return OM;
end if;
-- at this point the Skolem inference has passed all its syntactic tests
skolem_functions_in_order := merge_sort([[n,x]: [x,n] in nargs_of_skolem_fcns]);
-- arrange the Skolem functions in order of their number of arguments
-- Next we generate the 'required hypothesis' for the Skolem application. This is done by first building the
-- prefixed quantifier for it.
varlist_in_order := [[j,x]: x = varlist(j)];
--printy(["skolem_functions_in_order: ",skolem_functions_in_order," ",varlist]);
prior_was := OM; -- was the last symbol scanned a skolem_function name, or a variable in the varlist
skix := vix := 1; -- indices for the scan
numquants := 0; -- will count number of quantifiers
quantifier_string := ""; -- will build the pre-Skolemization quantifier string
while skix <= nsko or vix <= nvl loop
if skix <= nsko and vix <= nvl then -- both indices are in range
take := if skolem_functions_in_order(skix)(1) < varlist_in_order(skix)(1) then 1 else 2 end if;
elseif skix <= nsko then -- skix is in range
take := 1;
else -- vix is in range
take := 2;
end if;
--printy(["skix,vix: ",skix," ",vix," nsko: ",nsko," nvl: ",nvl," take: ",take," prior_was: ",prior_was," ",quantifier_string]);
if take = 1 then -- take from skolem_functions_in_order
if prior_was = 1 then -- just take variable name, prefixed by a comma
quantifier_string +:= (", " + skolem_functions_in_order(skix)(2));
elseif prior_was = 2 then
quantifier_string +:= (" | (EXISTS " + skolem_functions_in_order(skix)(2));
numquants +:= 1;
else -- prior_was = OM; like prior_was = 2, but omit '|'
quantifier_string +:= ("(EXISTS " + skolem_functions_in_order(skix)(2));
numquants +:= 1;
end if;
prior_was := 1; skix +:= 1; -- advance the skolem index
else
if prior_was = 2 then -- just take variable name, prefixed by a comma
quantifier_string +:= (", " + varlist_in_order(vix)(2));
elseif prior_was = 1 then
quantifier_string +:= (" | (FORALL " + varlist_in_order(vix)(2));
numquants +:= 1;
else -- prior_was = OM; like prior_was = 1, but omit '|'
quantifier_string +:= ("(FORALL " + varlist_in_order(vix)(2));
numquants +:= 1;
end if;
prior_was := 2; vix +:= 1; -- advance the variables index
end if;
end loop;
return quantifier_string + " | " + unparse(remove_arguments(t3,[y: [-,y] in skolem_functions_in_order])) + numquants * ")";
end build_Skolem_hypothesis;
-- ************* Skolemization inference checking (within proofs) ***************
procedure check_a_Skolem_inf_inproof(stat_stack,theory_name,apply_params,apply_outputs);
-- handle a single, non-skolem, theory-application inference
-- To handle these inferences, we first separate out the defined-parameter list and the substitution-list of the
-- APPLY. These are coarsely validated (prior to the call of this routine), and then the apply_params are further
-- validated by verifying that their individual parts have forms corresponding to valid definitions.
if (tree_and_split_outputs := check_skolem_conclusion_tree(desired_concl := drop_labels(stat_stack(nss := #stat_stack))(1),apply_params,apply_outputs)) = OM then
return OM;
end if;
-- get parse tree of Skolem conclusion after applying checks
[tree,split_apply_outputs] := tree_and_split_outputs;
if (required_hypothesis := build_Skolem_hypothesis(desired_concl,tree,split_apply_outputs)) = OM then return OM; end if;
-- get hypothesis required for Skolem conclusion after first applying relevant syntactic checks
conj_to_test := build_conj("",stat_stack(1..nss - 1) with required_hypothesis,OM);
test_conj(conj_to_test); -- test this conjunct for satisfiability
--printy(["conj_to_test tested_ok: ",tested_ok]);
if not tested_ok then
printy(["\n****** Error verifying step: in Skolem proof step \n\t: "
+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nHypothesis for Skolem inference are not available."]);
error_count +:= 1;
end if;
return true; -- otherwise the conclusion follows
end check_a_Skolem_inf_inproof;
procedure conclusion_follows(theory_name,apply_outputs,apply_params_parsed,conclusion_wanted);
-- here we know that the 'required hypothesis' of an APPLY is available, we collect all those Theorems of the THEORY
-- being applied which do not involve any constant or function defined in the theory
-- and which are not in (the assumed functions/constants list) or in the generated-symbols list.
-- These are conjoined in universally quantified form; then 'replace_fcn_symbol' is used repeatedly
-- to replace every assumed and generated symbol with its appropriate replacement. The result of this substitution
-- becomes the clause which the APPLY operation makes available.
thms_of_theory := {x in theors_defs_of_theory(theory_name) | x(1) = "T"}; -- the theorems of the theory being applied
span(apply_outputs," \t("); rspan(apply_outputs," \t)");
--printy(["thms_of_theory: ",thms_of_theory]);
apply_outputs_map := {x: x in breakup(breakup(apply_outputs,","),":")};
apply_outputs_map := {[case_change(x,"lu"),y]: [x,y] in apply_outputs_map}; -- quoted theorems of theory being applied are upper case July 6, 2005
--print("apply_outputs_map after uppercasing: ",apply_outputs_map);
if exists x in apply_outputs_map | #x /= 2 then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nIllformed APPLY output list",
"\nApply outputs are: ",apply_outputs]);
error_count +:= 1;
return OM;
end if;
defined_in_theory := (alldefs := def_in_theory(theory_name)?{}) - {case_change(x,"lu"): x in domain(apply_outputs_map)};
-- + {case_change(x,"lu"): x in range(apply_outputs_map)}; -- domain and range might overlap
-- if no symbol is defined within a theory, def_in_theory(theory_name) will be OM
fully_quantified_recovered_defs_of_theory :=
[fully_quantified(theory_name,unparse(def_as_eq(symb,sdf))): symb in alldefs | (sdf := symbol_def(theory_name + ":" + symb)) /= OM and sdf(2) /= "defined_by_theory_application"];
-- convert the definitions of the theory to theorems (****** should be only if they are available in light of symbols they use)
-- ignore the supplementary definitions coming from assumed theory constants and implicit definitions by APPLY
--printy(["alldefs: ",alldefs," ",theory_name," fully_quantified_recovered_defs_of_theory: ",fully_quantified_recovered_defs_of_theory]);
-- the symbols defined in the theory which are not output symbols
thms_available := [fully_quantified(theory_name,tstg): t in thms_of_theory | free_vars_and_fcns(tstg := theorem_map(t)) * defined_in_theory = {}]
+ fully_quantified_recovered_defs_of_theory;
-- get all those theorems of the theory which do not involve any symbol defined in the theory which is not a theory output
if thms_available = [] then thms_available := ["true"]; end if; -- make sure that at least one theorem is available
--printy(["thms_of_theory: ",thms_of_theory," ",thms_available," ",defined_in_theory," ",[free_vars_and_fcns(theorem_map(t)) * defined_in_theory: t in thms_of_theory | free_vars_and_fcns(tstg := theorem_map(t)) * defined_in_theory /= {}]]);
-- form the conjunction of the fully quantified thms_available, and in it replace all the output symbols
-- by their replacements
--print("thms_of_theory: ",thms_of_theory," defined_in_theory: ",defined_in_theory," apply_outputs_map: ",apply_outputs_map," alldefs: ",alldefs);
conjoined_thms_avail := join(["(" + av + ")": av in thms_available]," & ");
parsed_concl := parse_expr(conjoined_thms_avail + ";")(2); -- parse the conjunction of the hypotheses
-- now use 'replace_fcn_symbol' repeatedly to replace every assumed symbol
-- of the THEORY being applied by its replacement expression
for [symb_referenced,[def_vars,def_body]] in apply_params_parsed loop
parsed_concl := replace_fcn_symbol(parsed_concl,symb_referenced,def_vars,def_body);
end loop;
--print("apply_outputs_map: ",apply_outputs_map);
-- also replace the output symbols of the theory by the symbols which replace them
for [orig,replacement] in apply_outputs_map loop
--print("<BR>[orig,replacement]: ",[orig,replacement],"<BR>parsed_concl: ",parsed_concl);
parsed_concl := replace_fcn_symbol(parsed_concl,case_change(orig,"lu"),OM,case_change(replacement,"lu"));
--print("<BR>parsed_concl: ",unicode_unpahrse(parsed_concl));
end loop;
--->debug
--print("<P>parsed conjunction of theorems of theory after replacement of assumed by supplied functions:<BR>",theory_name," apply_outputs_map: ",apply_outputs_map," parsed_concl: ",unicode_unpahrse(parsed_concl)); if just_whitespace(conclusion_wanted) then print("******** conclusion wanted is empty"); else --print("<P>conclusion_wanted:<BR>",unicode_unpahrse(parse_expr(conclusion_wanted +";"))); end if;
-- we now require that the stated conclusion of the APPLY be a ELEM consequence of the parsed_concl
conj_to_test := build_conj("",[uparc := unparse(parsed_concl),conclusion_wanted],OM); -- the last formula on the 'stack' passed is negated
test_conj(conj_to_test); -- test this conjunct for satisfiability
--printy(["tested_ok: ",tested_ok," ",not tested_ok]);
if not tested_ok then
error_count +:= 1;
print("<BR>",["conj_to_test for conclusion: ",conj_to_test," tested_ok: ",tested_ok]);
print("<BR>",["Stated conclusion does not follow from theory: ",theory_name," conclusion of theory is: ",uparc," conclusion_wanted is: ",conclusion_wanted]);
return OM;
end if;
--printy(["return true: ",true]);
return true;
end conclusion_follows;
procedure just_whitespace(stg); span(stg," \t"); return stg = ""; end just_whitespace;
procedure build_required_hypothesis(hypotheses_list,apply_params_parsed);
-- build required_hypothesis for theory application; return as string
conjoined_hypotheses := join(["(" + hyp + ")": hyp in hypotheses_list]," & ");
--printy(["conjoined_hypotheses: ",conjoined_hypotheses]);
parsed_conj := parse_expr(conjoined_hypotheses + ";")(2); -- parse the conjunction of the hypotheses
-- now use 'replace_fcn_symbol' repeatedly to replace every assumed symbol
-- of the THEORY being applied by its replacement expression
for [symb_referenced,[def_vars,def_body]] in apply_params_parsed loop
parsed_conj := replace_fcn_symbol(parsed_conj,symb_referenced,def_vars,def_body);
end loop;
--printy(["parsed_conj: ",unparse(parsed_conj)]);
return unparse(parsed_conj);
end build_required_hypothesis;
procedure def_as_eq(symb,args_and_def); -- rebuild a definition as an equality or equivalence
[args,rt_side] := args_and_def;
left_side := if #args = 0 then symb else ["ast_of", symb,["ast_list"] + args] end if;
--printy(["def_as_eq: ",symb," ",args_and_def," ",left_side]);
return [if tree_is_boolean(rt_side) then "DOT_EQ" else "ast_eq" end if,left_side,rt_side];
end def_as_eq;
procedure tree_is_boolean(tree); -- test a tree to see if its value is boolean
return if is_string(tree) then tree in {"ast_true","ast_false"} else
tree(1) in {"ast_and","ast_or","ast_not","ast_eq","ast_neq","DOT_EQ","DOT_INCIN","ast_in","ast_notin","ast_incs"} end if;
end tree_is_boolean;
-- *************************************************************
-- ************* Interfacing to external provers ***************
-- *************************************************************
-- The following routine organizes the Ref system the ability to interface with external provers such as Otter.
-- This is done by a syntactic extension of the normal Ref 'APPLY' directive, i.e. extrenal provers are regarded
-- as providing special kinds of Ref THEORYs. When such provers are being communicated with,
-- the normal keyword 'APPLY' used to invoke a THEORY is changed to 'APPLY_provername', where 'provername' names the
-- external prover in question. In this case, the normal Ref THEORY delaration is expanded to list Ref-syntax translations
-- of all the theorems being drawn from the external prover, and of all the external symbol definitions on which these depend.
-- An external file, also named in the modiified Ref 'APPLY' directive, must be provided as certification of each such THEORY.
-- Ref examins this file to establish that it is a valid proof, by the external prover named, of all the theorems
-- which the THEORY claims.
-- The axioms and definitions assumed by the external prover are listed in the external theory declaration
-- preceding the '==>' mark which starts the list of conclusions of the external theory, and its conclusions follow this mark
-- defintions are introced by the token ExtDef. Ref must verify the presence of corresopnding theorems and
-- defintions to justify use of the conclusions provided by the external prover.
-- See the main shared_scenario, THEORY_otter orderedGroups, for an example of these rules.
-- The first routine below merely detects what external prover is being used to derive the Ref theory in question,
-- and calls a procedure appropriat to this theory. One such procedure is provided for each external prover available to Ref.
procedure check_an_external_theory(th,assumed_fcns,assumps_and_thms); -- check declaration of an external THEORY
match(th,"_"); prover_name := break(th,"_"); match(th,"_"); -- separate out the name of the responsible prover
if not (exists ath = assumps_and_thms(j) | ath = "==>") then
printy(["******* Error: declaration of external theory ",th," for prover ",prover_name,"lacks '==> separator between axioms and theorems"]);
total_err_count +:= 1; return;
end if;
case prover_name
when "otter" => return check_an_otter_theory(th,assumed_fcns,assumps_and_thms(1..j - 1),assumps_and_thms(j + 1..));
-- check declaration of an otter THEORY
end case;
end check_an_external_theory;
-- ************* Interface to 'Otter' prover ***************
procedure otter_clean(line); -- removes otter comments and forumla_to_use lines
front := break(line,"%"); span(front,"\t "); rspan(front,"\t ");
if front = "formula_list(usable)." then return ""; end if;
if front = "end_of_list." then return ""; end if;
return front;
end otter_clean;
procedure check_an_otter_theory(th,assumed_fcns,assumps,thms); -- check declaration of an otter THEORY
--return true; -- disable temporarly
setup_parse_priorities_and_monadics(otter_op_precedence,otter_can_be_monadic,otter_refversion);
-- prepare for conversion from Otter to Ref syntax
otter_lines := get_lines(th + "_otMA.txt"); -- read in the file of oter proofs which will be used as the external justification
if otter_lines = [] or otter_lines = OM then -- check tat this read was OK
printy(["******* Error: cannot find formatted Otter file ",of_name]);
total_err_count +:= 1; return;
end if;
-- decompose the otter file into its 'FILE' sections, suppressing comments
-- these are delimited by lines starting "FILE::", e.g. for ordered_groups we have
-- FILE:: orderedGroups_axioms.txt, FILE:: orderedGroups_defs.txt, FILE:: orderedGroups_basicLaws.txt
-- etc.
otter_lines := [oc: line in otter_lines | (oc := otter_clean(line)) /= ""];
file_header_lines := [j: line = otter_lines(j) | #line > 6 and line(1..6) = "FILE::"];
file_body := {}; -- will map file descriptors into their following lines
theorems_files := defs_files := axioms_files := proofs_files := {}; -- the four categories of header lines to be segregated
for j = file_header_lines(k) loop -- iterate over the otter file, looking for its "FILE::" header lines,
-- and from these isloating the following file descriptor name
file_header_line := otter_lines(j); match(file_header_line,"FILE::"); span(file_header_line," \t");
break(file_header_line,"_"); match(file_header_line,"_");
rbreak(file_header_line,"."); rmatch(file_header_line,"."); -- file_header_line is now the file descriptor
file_body(file_header_line) := otter_lines(j + 1..(file_header_lines(k + 1)?(#otter_lines + 1)) - 1);
-- associate the following file section with its header line
-- the headers of the otter file sections are assumed to have one of several forms,
-- distinguished by the lst part of their names. These are exemplified by
-- FILE:: orderedGroups_****axioms****.txt (required axioms), orderedGroups_****defs****.txt (definitions)
-- orderedGroups_basic****Laws****.txt, (otter theorems) orderedGroups_A1.out.txt (otter prrofs)
fhlc := file_header_line; tag := rmatch(fhlc,"Laws");
if tag /= "" then -- collect the Ottter file section headers into the four categories described above
theorems_files with:= file_header_line; -- collect otter theorem
else
fhlc := file_header_line; tag := rmatch(fhlc,".out");
if tag /= "" then -- collect otter proof
proofs_files with:= file_header_line;
else
fhlc := file_header_line; tag := rmatch(fhlc,"axioms");
if tag /= "" then -- collect otter axiom
axioms_files with:= file_header_line;
else -- collect otter definition
fhlc := file_header_line; tag := rmatch(fhlc,"defs");
if tag /= "" then defs_files with:= file_header_line; end if;
end if;
end if;
end if;
end loop;
-- convert all non-proof files (axioms, definitions, and theorems) from otter to Ref syntax
for fh in theorems_files + defs_files + axioms_files loop
ok_converts := [otr: item in file_body(fh) | (otr := otter_to_ref(item,of_name)) /= OM];
if otr /= OM and (potr := parse_expr(otr + ";")) = OM then -- check the syntax of the recoverd material, diagnosing failures
printy(["******* Error: bad reconverted Otter item ",otr]);
total_err_count +:= 1; return;
end if;
-- record a clean, uparsed bersion of the recovered formula
file_body(fh) := [case_change(unparse(parse_expr(otr + ";")(2)),"lu"): otr in ok_converts];
-- canon
end loop;
--printy(["done otter to Ref syntax loop: ",#file_header_lines]); stop;
-- now verify that the Ref-side assumptions, definitions, and theorems agree with the
-- Otter-side assumptions, definitions, and theorems
-- get the otter assumptions, definitions, and theorems in Ref syntax
otter_assumps := {} +/ [{lin: lin in file_body(fh) | lin(#lin) /= "."}: fh in axioms_files];
otter_defs := {} +/ [{lin: lin in file_body(fh) | lin(#lin) /= "."}: fh in defs_files];
otter_theorems := {} +/ [{lin: lin in file_body(fh) | lin(#lin) /= "."}: fh in theorems_files];
--print("file_body('absLaws'): ",file_body("absLaws")," ",theorems_files," ",defs_files," ",axioms_files," ",proofs_files);
--print("otter_lines: ",file_header_lines,"\n",join(otter_lines(1..100),"\n"));
--print("assumps: ",assumps);
canonical_assumptions := {}; -- we collect the external assumptions and definitions and put them in canonical form
-- onestandardized, external defintions are treated in the same wayas eternal assumptions,
for af in assumps loop -- check the syntax of the Ref-side assumps
ed := match(af,"ExtDef"); -- we deal with the Ref form of an otter definition
--->working
if ed /= "" then -- have an 'external definition' line in Ref
null;
span(af,"\t "); rspan(af,"\t "); -- process the definition, as formulated in Ref
if (afp := parse_expr(af + ";")) = OM or (afp := afp(2)) = OM or afp(1) /= "ast_assign" then
printy(["******* Error: illformed definition in external theory ",th,": ",af]);
total_err_count +:= 1; return;
end if;
--print("th: ",th);
afp(1) := "ast_eq"; af := fully_quantified_external("_otter_" + th,unparse(afp));
printy(["definition_body: ",af]);
elseif (afp := parse_expr(af + ";")) = OM then
printy(["******* Error: illformed assumption in external theory ",th,": ",af]);
total_err_count +:= 1; return;
else -- have an 'external assumption' line
afp := afp(2); -- as usual
--print("afp: ",afp);
if afp(1) = "ast_forall" then
boddie := afp(3); -- the body of the assumption
if boddie(1) = "DOT_IMP" and (bod3 := boddie(3))(1) = "ast_of" and bod3(2) = "IN_DOMAIN" then continue; end if;
-- bypass initial closure assumptions
elseif afp(1) = "ast_of" and afp(2) = "IN_DOMAIN" then
continue; -- bypass initial closure assumptions
end if;
canonical_assumptions with:= case_change(unparse(afp),"lu");
-- collect the ref assumption in canonical form
end if;
--print("assump: ",af);
end loop;
otter_assumps := {suppress_chars(join(breakup(x,char(165)),"YEN_")," \t"): x in otter_assumps}; -- ignore discrepant blanks
canonical_assumptions := {suppress_chars(join(breakup(x,char(165)),"YEN_")," \t"): x in canonical_assumptions}; -- ignore discrepant blanks
--print("otter_assumps: ",otter_assumps); print("canonical_assumptions: ",canonical_assumptions);
if (missing := otter_assumps - canonical_assumptions) /= {} then -- some assumptions are missing in the Ref form of the theory
for assump in missing loop
printy(["ERROR ******** Otter assumption: ",assump," is not present in Ref assumption list for theory ",th]);
total_err_count +:= 1;
end loop;
end if;
if (superf := canonical_assumptions - otter_assumps) /= {} then -- some assumptions are missing in the Ref form of the theory
for assump in superf loop
printy(["WARNING ******** Theory assumption: ",assump," is not required for Otter theory ",th]);
end loop;
end if;
--print("canonical_assumptions: ",canonical_assumptions);
--print("otter_theorems: ",otter_theorems);
canonical_theorems := {}; -- will collect the Ref-side theorems and put them in canonical form
tags_to_theorems := {}; -- map of theorem tags to theorem statements
printy([" "]);
for thm in thms loop -- check the syntax of the Ref-side thms
rbreak(thm,"]"); tag := rbreak(thm,"["); rmatch(thm,"["); rspan(thm,"\t ");
rspan(tag,"\t "); rmatch(tag,"]");
if (thmp := parse_expr(thm + ";")) = OM then
printy(["******* Error: illformed theorem in external theory ",th,": ",thm]);
total_err_count +:= 1; return;
end if;
canonical_theorems with:= suppress_thryvar(case_change(unparse(thmp),"lu"));
-- collect the ref assumption in canonical form
tags_to_theorems with:= [tag,thm];
end loop;
--printy(["tags_to_theorems: ",tags_to_theorems]);
otter_theorems := {suppress_chars(join(breakup(x,char(165)),"YEN_")," \t"): x in otter_theorems}; -- ignore discrepant blanks
canonical_theorems := {suppress_chars(join(breakup(x,char(165)),"YEN_")," \t"): x in canonical_theorems}; -- ignore discrepant blanks
if (missing := canonical_theorems - otter_theorems) /= {} then -- some assumptions are missing in the Ref form of the theory
for thm in missing loop
printy(["ERROR ******** theorem: ",thm," is not present in Otter theorem set for theory ",th]);
total_err_count +:= 1;
end loop;
end if;
if (superf := otter_theorems - canonical_theorems) /= {} then -- some assumptions are missing in the Ref form of the theory
for thm in superf loop
printy(["WARNING ******** Otter theorem: ",thm," is not carried over to theory ",th]);
end loop;
end if;
--printy(["otter_theorems: ",otter_theorems," canonical_theorems: ",canonical_theorems]);
--print("check_an_otter_theory: ",th," assumed_fcns: ",assumed_fcns,"\nassumps",assumps,"\nthms",thms);
end check_an_otter_theory;
procedure suppress_thryvar(stg); -- suppresses all instances of '_THRYVAR' in string
return "" +/ [p in segregate(stg,"_THRYVA") | p /= "_THRYVAR"];
end suppress_thryvar;
procedure otter_to_ref(otter_item,otfile_name); -- converts an otter item to SETL syntax; returns unparsed tree or OM
--print("otter_item: ",otter_item);
orig_otter_item := otter_item;
if #otter_item > 1 and otter_item(1) = "(" then -- remove enclosing parentheses
match(otter_item,"("); span(otter_item,"\t "); rspan(otter_item,"\t "); rmatch(otter_item,")."); rspan(otter_item,"\t ");
end if;
if #otter_item > 3 and otter_item(1..3) = "all" then -- we have an axiom, definition, or theorem
rspan(otter_item,"\t "); rmatch(otter_item,"."); rspan(otter_item,"\t "); -- remove trailing dot if any
otter_item := otter_item(4..); span(otter_item,"\t "); prefix := break(otter_item,"("); rspan(prefix,"\t ");
prefix := [x in breakup(prefix," \t") | x /= ""];
If (parse_tree := alg_parse(otter_item)) = OM then -- otter item in external file has bad syntax
printy(["ERROR ******* Otter item in external file has bad syntax: ",orig_otter_item]);
total_err_count +:= 1; return OM;
end if;
-- res := "(FORALL " + join(prefix,",") + " | (" + join(["In_domain(" + p + ")": p in prefix]," & ") + ") •imp (" + unparse(parse_tree) + "))"; -- Mac version
res := "(FORALL " + join(prefix,",") + " | (" + join(["In_domain(" + p + ")": p in prefix]," & ") + ") ¥imp (" + unparse(parse_tree) + "))";
if parse_expr(res + ";") = OM then
printy(["ERROR ******* Otter item in external file has bad Ref version: ",orig_otter_item," ",res]);
total_err_count +:= 1; return OM;
end if;
--print("otter_item and translation: ",orig_otter_item,"\n",res,"\n",parse_tree);
return res;
else
return otter_item; -- return item unchanged
end if;
end otter_to_ref;
-- **************************************************************
-- ************* Formula-feature search utilities ***************
-- **************************************************************
-- Next follos a miscellaneous collection of formula search and manipulation utilities.
procedure find_free_vars_and_fcns(node); -- find the free variables and function symbols in a tree (main entry)
all_free_vars := all_fcns := {}; find_free_vars_and_fcns_in(node,[]); return [all_free_vars,all_fcns];
-- use the recursive workhorse and a global variable
end find_free_vars_and_fcns;
procedure find_free_vars_and_fcns_in(node,bound_vars); -- find the free variables in a tree (recursive workhorse)
--if node(1) = "ast_if_expr" then printy(["find_free_vars_and_fcns_in: ",node]); end if;
if is_string(node) then -- ******** the following should be generalized so as not to assume that we are in Set_theory ********
if (isund := symbol_def("Set_theory:" + node) = OM) and node notin bound_vars and node /= "OM" and node /= "_nullset" and node notin special_set_names then
all_free_vars with:= node;
elseif not isund then -- the symbold has a definition, so count it as a parameterless constant, i.e. function
all_fcns with:= node;
end if;
return;
end if;
case (ah := abbreviated_headers(node(1)))
when "and","or","==","+","-","{-}","in","notin","/==","=","/=","[]","[-]","{.}","itr","Etr","incs","incin","imp","*","->","not","null" => -- ordinary operators
for sn in node(2..) loop find_free_vars_and_fcns_in(sn,bound_vars); end loop;
when "arb","range","domain" => -- ordinary operators
for sn in node(2..) loop find_free_vars_and_fcns_in(sn,bound_vars); end loop;
when "()" => -- this is the case of functional and predicate application; the second variable is a reserved symbol, not a set
all_fcns with:= node(2);
for sn in node(3..) loop find_free_vars_and_fcns_in(sn,bound_vars); end loop;
when "{}","{/}","EX","ALL" => bound_vars +:= find_bound_vars(node); -- setformer or quantifier; note the bound variables
--printy(["bound_vars: ",bound_vars]);
for sn in node(2..) loop find_free_vars_and_fcns_in(sn,bound_vars); end loop; -- collect free variables in args
when "@" => -- functional application
for sn in node(2..) loop find_free_vars_and_fcns_in(sn,bound_vars); end loop; -- collect free variables in args
when "if" => -- conditional expression
for sn in node(2..) loop find_free_vars_and_fcns_in(sn,bound_vars); end loop; -- collect free variables in args
otherwise => -- additional infix and prefix operators, including if-expressions
all_fcns with:= node(1);
for sn in node(2..) loop find_free_vars_and_fcns_in(sn,bound_vars); end loop; -- collect free variables in args
end case;
end find_free_vars_and_fcns_in;
procedure find_quantifier_bindings(node); -- find the variable bindings at the top of an iterator tree
case abbreviated_headers(node(1))
when "{}" => iter_list := node(3); -- setformer; get iteration list from position 3
when "EX","{/}" => iter_list := node(2); -- existential or setformer without exp; get iteration list from position 2
when "ALL" => iter_list := node(2); -- universal; get iteration list from position 2
otherwise => return {}; -- no bound variables at this node
end case; -- now process the iteration list
if is_string(itl2 := iter_list(2)) then return {[itl2,["OM"]]}; end if; -- case of an unconstrained quantifier
bindings := {}; -- start to collect ordered set of bound variables
for iter_clause in iter_list(2..) loop
case abbreviated_headers(iter_clause(1))
when "=" => bindings with:= [iter_clause(2),["OM"]]; -- x = f(y) or x = f{y} iterator.
bindings with:= [iter_clause(3)(3)(2),["OM"]];
-- from the 'functional' tail of the iterator, dig out the argument list and then its first element
-- Note: in iterator constructions like x = f(y,z,w), only the first argument is bound by the iterator
when "in" => bindings with:= [iter_clause(2),["in",iter_clause(3)]];
when "incin" => bindings with:= [iter_clause(2),["OM"]]; -- x in s or x incin s iterator; collect x
end case;
end loop;
--printy(["find_quantifier_bindings: ",node,"\nbindings: ",bindings]);
return bindings;
end find_quantifier_bindings;
procedure list_of_vars_defined(theory_in,kind_hint_stat_tup); -- find the ordered list of variables defined in a proof
-- these are the variables in loc_defs, those in statement substitutions of existential, notin, or notincs kind
-- and the output variables of APPLYS
lovd := []; -- the list to be assembled
for [hint_kind,hint,stat] in kind_hint_stat_tup loop
--print("<BR>[hint_kind,hint,stat]: ",[hint_kind,hint,stat]);
case hint_kind
when "Suppose_not" =>
span(hint," \t"); rspan(hint," \t");
if hint /= "Suppose_not" then -- check the variables provided for substitution
if (p_hint := parse_expr(hint + ";")) = OM then
printy(["******** Error: Illformed variable list in 'Suppose_not': ",hint]); continue;
end if;
if (p_hint := p_hint(2))(1) /= "ast_of" or (exists x in (argl := p_hint(3)) | not is_string(x)) then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nIllformed list of variables in 'Suppose_not' proof step"]);
error_count +:= 1; continue;
end if;
else -- there are no arguments
argl := ["null"];
end if;
lovd +:= argl(2..);
--printy(["Suppose_not case: ",lovd]);
when "Suppose" =>
ancestor_theories := [theory_in]; -- find the ancestor theories of the theory being applied
theory_nm := theory_in;
while (parent_theory := parent_of_theory(theory_nm)) /= OM loop
ancestor_theories := [parent_theory] + ancestor_theories; theory_nm := parent_theory;
end loop;
freevs_of_stat := if (pas := parse_expr(drop_labels(stat)(1) + ";")) = OM then {} else find_free_vars(pas(2)) end if;
-- get those free variables which have not yet been defined either globally or locally
symbols_glob_defd := {symb: symb_w_theory in domain(symbol_def) | symbol_def(symb_w_theory)(1) = []
and ([th,symb] := breakup(symb_w_theory,":"))(2)(1) notin "¥•" and th in ancestor_theories};
-- all parameterless constants defined in ancestor theories of this theory
consts_of_ancestor_theories := [case_change(x,"lu"): thry in ancestor_theories, x in (assumps_and_consts_of_theory(thry)?[[]])(1) | "(" notin x];
-- all parameterless constants assumed in this theory and its ancestors
--printy(["consts_of_ancestor_theories: ",consts_of_ancestor_theories]);
if (bad_freevs := {v: v in freevs_of_stat | (v notin lovd) and (v notin symbols_glob_defd)
and (v notin consts_of_ancestor_theories)}) /= {} then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nvariables " + bad_freevs + " not previously defined used in 'Suppose' statement"]);
printy(["Erroneous free variables are ",bad_freevs]);
error_count +:= 1;
end if;
--printy(["Suppose case: ",freevs_of_stat," ",stat," symbols_glob_defd: ",symbols_glob_defd," theory_in: ",theory_in," ancestor_theories: ",ancestor_theories]);
when "Loc_def" => if (p_stat := parse_expr(drop_labels(stat)(1) + ";")) = OM then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nIllformed 'Loc_def' inference in proof step"]);
error_count +:= 1; continue;
end if;
-- flatten the tree, wich might be a conjunct, and test its parts separately
tup := conj_to_tuple(p_stat(2)); -- walks a syntax tree of a conjunct, converting it to a tuple
--print("tup: ",tup," ",p_stat(2));
for pconj in tup loop
if pconj(1) /= "ast_eq" or not is_string(left := pconj(2)) then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nStatement " + stat + "in Loc_def inference is not a conjunction of simple equalities"]);
printy(["Statement must be a conjunction of simple equalities, each with simple variable on the left: ",stat]);
error_count +:= 1; continue;
end if;
lovd with:= left;
end loop;
--printy(["Loc_def case: ",stat," ",lovd]);
when "APPLY" => lovd +:= [case_change(y?("??" + hint + "??"),"lu"): [x,y] in breakup(breakup(hint,","),":")]; -- here 'hint' is actually the output variable list
--printy(["APPLY case: ",lovd]);
when "Stat_subst" => -- here 'stat' is actually the statement cited
hint_pieces := segregate(hint,"->");
if not (exists piece = hint_pieces(j) | piece = "-->") then
continue; -- bypass statement-substitution cases without supplied variables
end if;
piece := hint_pieces(1); rspan(piece," \t"); span(piece," \t");
preflist := piece; rspan(preflist,"\t "); span(preflist,"\t "); -- clean the list of substitution items from the hint
lp := match(preflist,"("); rp := rmatch(preflist,")");
if lp = "" then preflist := [preflist]; else preflist := split_at_bare_commas(preflist); end if;
-- decompose the list into its comma-separated pieces
--printy(["piece:: ",piece," ",preflist]);
-- statement_cited := join(breakup(stat,"&")," and "); -- replace ampersands with 'ands' in the statement cited
[stat_with_freevars,freevars] := strip_quants(parse_expr(stat + ";")(2),npl := #preflist);
-- the strip_quants procedure returns the list freevars of generated variables,
-- in the modified version stat_with_freevars of stat that would be used for substitution
-- if the statement substitution inference were actually performed. These are implictly
-- marked as 'existential' or 'universal'; the existentials have the form 'VAR_nn' with odd
-- values of the integer nn, while the universals have even values.
--printy(["freevars by strip_quants: ",freevars," ",preflist," ",stat]);
if #freevars < #preflist then
--print("<BR>Error: "+ number_of_statement + " of theorem ",freevars," ",preflist," ",stat);
printy(["\n****** Error verifying step: "+ number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nToo many items to be substituted were supplied in 'Stat' substitution"]);
printy(["Surplus items will be treated as existentially instantiated"]);
error_count +:= 1;
end if;
for substitem = preflist(j) loop
if (fv := freevars(j)) = OM or (#fv > 4 and odd(unstr(fv(4..#fv - 1)))) then -- treat as existential
if (pes := parse_expr(substitem + ";")) = OM or not is_string(pes(2)) then
printy(["\n****** Error verifying step: " + number_of_statement + " of theorem "
+ number_of_statement_theorem + "\n\t", statement_being_tested,
"\nIllformed or non-simple expression supplied for existential instantiation"]);
error_count +:= 1; continue;
end if;
lovd with:= case_change(substitem,"lu"); -- capture the substitem
end if;
-- in the universal case there is nothing to do here
end loop;
--printy(["Stat_subst case: ",lovd]);
end case;
end loop;
return lovd;
end list_of_vars_defined;
procedure trim_front(stg); span(stg," \t"); return stg; end trim_front; -- removes leading whitespace
procedure front_label(stg); -- finds prefixed Statnnn: in string, if any and returns it; otherwise returns an empty string
-- front labels can be any alphanumeric prefix starting with Stat and ending in a colon
-- drop_locdef(stg); -- drop "Loc_def:" if it appears
tup := segregate(stg,"Stabcdefghijklmnopqrsuvwxyz:0123456789");
newt := lablocs := [];
sect := tup(1); -- get the first section
if #sect > 5 and sect(1..4) = "Stat" and sect(#sect) = ":" then -- this is a prefixed label; return it
return sect(1..#sect - 1);
end if;
return ""; -- return an empty string, indicating no label
end front_label;
procedure loc_break(stg); -- finds location of ==> in string, if any; position of last character is returned
tup := segregate(stg,">=");
return if exists sect = tup(j) | (sect = "==>" or sect = "===>") then +/ [#piece: piece in tup(1..j)] else OM end if; -- position of last character is returned
end loc_break;
procedure decompose_ud_auto_hint(hint); -- analyze Use_def AUTO case hint into its (up to) 3 parts
-- in AUTO cases, the hint can have one of 4 forms:
-- Use_def(symbol)
-- Use_def(symbol->Statnnn)
-- Use_def(symbol(p_1,...,p_k))
-- Use_def(symbol(p_1,...,p_k)->Statnnn)
hint := hint(9..); -- take tail only
if not (exists c = hint(k) | c = "(") then -- parameterless case
if exists j in [1..#hint - 2] | hint(j..j + 2) = "-->" then -- labeled case
tail := hint(j + 3..); hint := hint(1..j - 1); -- isolate the label
span(tail," \t"); rspan(tail," \t)");
span(hint," \t"); rspan(hint," \t"); -- clean up the remainder of the hint
return [hint,[],tail];
else -- no label
span(hint," \t"); rspan(hint," \t)"); -- clean up the symbol (only)
return [hint,[]];
end if;
else -- case with parameters
if exists j in [1..#hint - 2] | hint(j..j + 1) = "->" then -- labeled case
tail := hint(j + 2..); hint := hint(1..j - 1); -- isolate the label
span(tail," \t"); rspan(tail," \t)");
-- now extract the symbol and params
span(hint," \t"); rspan(hint," \t"); rmatch(hint,")");
symbol := break(hint,"("); rspan(symbol," \t)");
match(hint,"("); rmatch(hint,")"); -- drop outermost parens
return [symbol,split_at_bare_commas(hint),tail];
else -- no label
-- extract the symbol and params
span(hint," \t"); rspan(hint," \t"); rmatch(hint,")");
symbol := break(hint,"("); rspan(symbol," \t)");
match(hint,"("); rmatch(hint,")"); -- drop outermost parens
return [symbol,split_at_bare_commas(hint)];
end if;
end if;
end decompose_ud_auto_hint;
procedure split_at_bare_commas(stg); -- splits a string at commas not included in brackets
newt := tup := single_out(stg,"{}[](),"); -- single out all the relevant marks
nesting := 0; -- iterate thru the resulting list, keeping track of the parenthesis level
for x = tup(j) loop
case x
when "{","[","(" => nesting +:= 1; -- increment level at each open paren
when "}","]",")" => nesting -:= 1; -- decrement level at each open paren
when "," => if nesting > 0 then newt(j) := "`"; end if; -- change nested commas to back apostrophes
end case;
end loop;
return [trim(join(breakup(stg,"`"),",")): stg in breakup("" +/ newt,",")];
-- break the resulting string at the remaining (non-nested) commas, and restore the back-apostrophes
-- in the remaining pieces to the commas originally present; remove surrounding whitespace if any
end split_at_bare_commas;
procedure trim(line); span(line,"\t "); rspan(line,"\t "); return line; end trim; -- trim off whitespace
procedure paren_check(stg); -- preliminary parenthesis-check
for x in "{[(" loop count(x) := 0; end loop;
for c = stg(j) | c in "{[()]}" loop
if c in "{[(" then
count(c) +:= 1;
else
if(count(c) -:= 1) < 0 then
abort("Parenthesis error of type " + c + " at position " + j + " in line:\n" + stg);
end if;
end if;
end loop;
if exists x in "{[(" | count(x) /= 0 then abort("Parenthesis error of type " + x + " at end of " + stg); end if;
return true;
end paren_check;
procedure find_defined_symbols(); -- extracts the sequence of definitions, theorems, and theories from a scenario file
end find_defined_symbols;
procedure strip_quants(tree,nquants); -- strip a specified number of quantifiers from a formula
var var_generator := 0; -- (main entry)
--print("<P>strip_quants ",unparse(tree)," ",nquants);
res := strip_quants_in(tree,nquants); -- call internal workhorse
return res;
procedure strip_quants_in(tree,nquants); -- strip a specified number of quantifiers from a formula
-- this returns a pair [formula_tree,free_vars_list]; if there are not enough bound variables to strip,
-- then nquants > free_vars_list upon return
-- the allowed trees must represent formulas of one of the forms
-- (FORALL ...)
-- (EXISTS ...)
-- y in {e(x): ...} which is treated as (EXISTS ... | y = e(x))
-- y notin {e(x): ...} which is treated as (FORALL ... | y /= e(x))
-- s /= t for which only a collection of special cases are treated:
-- if one of s and t, say t is explicitly null, this is treated as (EXISTS y in s)
-- if s and t are setformers with identical iterators, this is treated as (EXISTS ... | e(x) /= e'(x) or not(P(x) •eq P'(x)))
-- otherwise we merely conclude that there exists a c in one set but not the other
-- trees of forms not amenable to processing are simply returned, along with a null list of variables.
-- the elements of a conjunct are processed separately, and the generated lists of free variables
-- are made disjoint and appended.
--printy(["tree: ",tree]);
if is_string(tree) then return [tree,[]]; end if; -- in this cases (basically an error) there are no bound vars to strip
[n1,n2,n3] := tree;
case n1 -- consider the various cases separately
when "ast_not" => -- negations
if not is_tuple(n2) then return [tree,[]]; end if; -- cannot process
[m1,m2,m3] := n2; -- unpack, and look for cases that can be handled
case m1 -- consider various cases separately
when "ast_not" => return strip_quants_in(m2,nquants); -- double negation
when "ast_and","AMP_" => return strip_quants_in(["ast_or",["ast_not",m2],["ast_not",m3]],nquants);
-- negated conjunction
when "ast_or" => return strip_quants_in(["ast_and",["ast_not",m2],["ast_not",m3]],nquants);
-- negated disjunction
when "DOT_IMP" => return strip_quants_in(["ast_and",m2,["ast_not",m3]],nquants); -- rewrite as conjunction
when "ast_forall" => return strip_quants_in(["ast_exists",m2,["ast_not",m3]],nquants); -- use deMorgan Law
when "ast_exists" => return strip_quants_in(["ast_forall",m2,["ast_not",m3]],nquants); -- use deMorgan Law
when "ast_in" => return strip_quants_in(["ast_notin",m2,m3],nquants); -- reverse membership
when "ast_notin" => return strip_quants_in(["ast_in",m2,m3],nquants); -- reverse membership
when "ast_eq" => return strip_quants_in(["ast_ne",m2,m3],nquants); -- convert to inequality
when "ast_ne" => return strip_quants_in(["ast_eq",m2,m3],nquants); -- convert to equality
when "ast_incs" => -- negated inclusion: not (m2 incs m3)
if m2 = "0" or m2 = "_nullset" then return strip_quants_in(["ast_ne",m2,m3],nquants); end if; -- treat as inequality
-- otherwise look for identical iterators up to the number of quantifiers required
if (m21 := m2(1)) /= (m31 := m3(1)) or (m21 /= "ast_genset" and m21 /= "ast_genset_noexp") then
-- merely assert existence of element in one set but not in other
genvar := "VAR" + ((var_generator +:= 2) + 1) + "_";
return [["ast_and",["ast_in",genvar,m3],["ast_notin",genvar,m2]],[genvar]];
end if;
-- otherwise we are comparing setformers with identical iterators
if m21 = "ast_genset" then -- if these are both setformers with lead expreesssions
[-,m22,m23,m24] := m2; [-,m32,m33,m34] := m3; -- unpack the two setformers being compared
else -- otherwise these are both setformers without lead expreesssions
[-,m23,m24] := m2; [-,m33,m34] := m3; -- unpack the two setformers being compared
end if;
if m23 /= m33 then -- no special handling unless the iterators are the same
genvar := "VAR" + ((var_generator +:= 2) + 1) + "_";
return [["ast_and",["ast_in",genvar,m3],["ast_notin",genvar,m2]],[genvar]];
end if;
-- in this case, say that for some variables in the range of the iterators, either the conditions in the setformers (if any) are inequivalent
-- or the lead expressions of the setformers have different values
s_andnot_f := if (m24 /= ["ast_null"]) and (m34 /= ["ast_null"]) then ["ast_and",["ast_not",m24],m34] -- second condition does not imply the first
elseif m24 /= ["ast_null"] then ["ast_not",m24]
else []
end if;
--print("<P>strip_quants_in1 ",m24," ",m34," "," ",s_andnot_f);
if m21 = "ast_genset" then -- say that leading terms are not equal or second condition does not imply the first
statbod := if s_andnot_f /= [] then ["ast_or",["ast_ne",m22,m32],s_andnot_f] else ["ast_ne",m22,m32] end if;
elseif s_andnot_f = [] then
statbod := "FALSE"; -- since the negated inclusion not ({x in s} incs {x in s | p2(x)}) is always false
else
statbod := s_andnot_f;
-- since the negated inclusion not ({x in s | p1(x)} incs {x in s | p2(x)}) is equivalent to (exists x in s | p2(x) and not p1(x))
end if;
--print("<P>strip_quants_in ",(["ast_exists",m23,statbod?"BODYUNDEFINED"])," ",nquants," ast_true: ",ast_true);
--print("<P>strip_quants_in ",unparse(["ast_exists",m23,statbod]));
return strip_quants_in(["ast_exists",m23,statbod],nquants); -- convert into existential
when "DOT_INCIN" => -- negated inclusion: not (m3 incs m2)
return strip_quants_in([n1,["ast_incs",m3,m2]],nquants); -- permute arguments and treat as preceding case
otherwise => return [tree,[]]; -- cannot process
end case;
when "ast_and","AMP_" => [left_tree,left_vars] := strip_quants_in(n2,nquants);
if (nlv := #left_vars) >= nquants then return [[n1,left_tree,n3],left_vars]; end if;
-- only first conjunct need be processed
[right_tree,right_vars] := strip_quants_in(n3,nquants - nlv); -- otherwise try to get remaining free variables
return [[n1,left_tree,right_tree],left_vars + right_vars]; -- from right-hand side of conjunction
when "ast_or" => [left_tree,left_vars] := strip_quants_in(n2,nquants);
if (nlv := #left_vars) >= nquants then return [[n1,left_tree,n3],left_vars]; end if;
-- only first conjunct need be processed
[right_tree,right_vars] := strip_quants_in(n3,nquants - nlv); -- otherwise try to get remaining free variables
return [[n1,left_tree,right_tree],left_vars + right_vars]; -- from right-hand side of disjunction
when "DOT_IMP" => return strip_quants_in(["ast_or",["ast_not",n2],n3],nquants); -- rewrite as disjunction
when "ast_forall" => iter_list := n2(2..); quant_body := n3; -- extract the iteration list
if nquants < (nil := #iter_list) then -- use only the specified number of quantifiers
il := iter_list(1..nquants);
[newvar_list,var_map] := setup_vars(il,0);
-- second parameter designates nature of variable returned (universal or existential)
-- build the formula with unsubstituted variables, and replace them with new variables
uns_form := ["ast_not",conjoin_iters(il,["ast_exists",[n2(1)] + iter_list(nquants + 1..),
if quant_body(1) = "ast_not" then quant_body(2) else ["ast_not",quant_body] end if])];
--printy(["uns_form: ",uns_form,"\n",quant_body]);
return [substitute(uns_form,var_map),newvar_list];
elseif nquants = nil then -- use all the quantifiers
[newvar_list,var_map] := setup_vars(iter_list,0);
uns_form :=["ast_not", conjoin_iters(iter_list,if quant_body(1) = "ast_not" then quant_body(2) else ["ast_not",quant_body] end if)];
return [substitute(uns_form,var_map),newvar_list];
elseif (qb1 := quant_body(1)) /= "ast_forall" and qb1 /= "ast_exists" then
-- use as many quantifiers as are available at this final level
[newvar_list,var_map] := setup_vars(iter_list,0);
uns_form := ["ast_not",conjoin_iters(iter_list,if quant_body(1) = "ast_not" then quant_body(2) else ["ast_not",quant_body] end if)];
return [substitute(uns_form,var_map),newvar_list];
else -- there are insufficiently many top level quantifiers; pursue to next level since the body starts with a quantifier
[newbod,vars_below] := strip_quants(quant_body,nquants - nil);
[newvar_list,var_map] := setup_vars(iter_list,0);
uns_form := ["ast_not",conjoin_iters(iter_list,if newbod(1) = "ast_not" then newbod(2) else ["ast_not",newbod] end if)];
return [substitute(uns_form,var_map),newvar_list + vars_below];
end if;
when "ast_exists" => iter_list := n2(2..); quant_body := n3; -- extract the iteration list
if nquants < (nil := #iter_list) then -- use only the specified number of quantifiers
il := iter_list(1..nquants);
[newvar_list,var_map] := setup_vars(il,1);
-- second parameter designates nature of variable returned (universal or existential)
-- build the formula with unsubstituted variables, and replace them with new variables
uns_form := conjoin_iters(il,[n1,[n2(1)] + iter_list(nquants + 1..),quant_body]);
return [substitute(uns_form,var_map),newvar_list];
elseif nquants = nil then -- use all the quantifiers
[newvar_list,var_map] := setup_vars(iter_list,if n1 = "ast_forall" then 0 else 1 end if);
uns_form := conjoin_iters(iter_list,quant_body);
return [substitute(uns_form,var_map),newvar_list];
elseif (qb1 := quant_body(1)) /= "ast_forall" and qb1 /= "ast_exists" then
-- use as many quantifiers as are available at this final level
[newvar_list,var_map] := setup_vars(iter_list,if n1 = "ast_forall" then 0 else 1 end if);
uns_form := conjoin_iters(iter_list,quant_body);
return [substitute(uns_form,var_map),newvar_list];
else -- there are insufficiently many top level quantifiers; pursue to next level since the body starts with a quantifier
[newbod,vars_below] := strip_quants(quant_body,nquants - nil);
[newvar_list,var_map] := setup_vars(iter_list,if n1 = "ast_forall" then 0 else 1 end if);
uns_form := conjoin_iters(iter_list,newbod);
return [substitute(uns_form,var_map),newvar_list + vars_below];
end if;
when "ast_in" =>
the_memb := n2; setformer_tree := n3;
if not is_tuple(setformer_tree) then return [tree,[]]; end if;
[m1,m2,m3,m4] := setformer_tree;
if m1 = "ast_genset" then -- general setformer; treat as disguised existential
quant_body := if m4 /= ["ast_null"] then ["ast_and",["ast_eq",the_memb,m2],m4] else ["ast_eq",the_memb,m2] end if;
-- use this as 'body' of existential
iter_list := m3(2..);
if nquants < (nil := #iter_list) then -- use only the specified number of quantifiers
il := iter_list(1..nquants);
[newvar_list,var_map] := setup_vars(il,1);
uns_form := conjoin_iters(il,["ast_exists"] + iter_list(nquants + 1..) with quant_body);
return [substitute(uns_form,var_map),newvar_list];
else -- use the whole iter_list
[newvar_list,var_map] := setup_vars(iter_list,1);
uns_form := conjoin_iters(iter_list,quant_body);
return [substitute(uns_form,var_map),newvar_list];
end if;
elseif m1 = "ast_genset_noexp" then -- setformer without lead expression (can be just one iter)
[iter_kind,iter_var,iter_restriction] := (iter_list := m2(2..))(1);
newbod := if m3 = ["ast_null"] then [iter_kind,the_memb,iter_restriction]
else ["ast_and",[iter_kind,the_memb,iter_restriction],substitute(m3,{[iter_var,the_memb]})] end if;
return [newbod,[]]; -- in this case the bound variable of the iterator has already been replaced
else -- not a setformenr
return [tree,[]];
end if;
when "ast_notin" => the_nonmemb := n2; setformer_tree := n3;
the_memb := n2; setformer_tree := n3;
if not is_tuple(setformer_tree) then return [tree,[]]; end if;
[m1,m2,m3,m4] := setformer_tree;
if m1 = "ast_genset" then -- general setformer; treat as disguised existential
quant_body := if m4 /= ["ast_null"] then ["ast_and",["ast_eq",the_memb,m2],m4] else ["ast_eq",the_memb,m2] end if;
-- use this as 'body' of existential
iter_list := m3(2..);
if nquants < (nil := #iter_list) then -- use only the specified number of quantifiers
il := iter_list(1..nquants);
[newvar_list,var_map] := setup_vars(il,0);
uns_form := ["ast_not",conjoin_iters(il,["ast_exists"] + iter_list(nquants + 1..) with quant_body)];
return [substitute(uns_form,var_map),newvar_list];
else -- use the whole iter_list
[newvar_list,var_map] := setup_vars(iter_list,0);
uns_form := ["ast_not",conjoin_iters(iter_list,quant_body)];
return [substitute(uns_form,var_map),newvar_list];
end if;
elseif m1 = "ast_genset_noexp" then -- setformer without lead expression (can be just one iter)
[iter_kind,iter_var,iter_restriction] := (iter_list := m2(2..))(1);
newbod := if m3 = ["ast_null"] then [iter_kind,the_memb,iter_restriction]
else ["ast_and",[iter_kind,the_memb,iter_restriction],substitute(m3,{[iter_var,the_memb]})] end if;
return [["ast_not",newbod],[iter_var]]; -- in this case the bound variable of the iterator has already been replaced
else -- not a setformenr
return [tree,[]];
end if;
when "ast_ne" =>
-- in this case only certain special cases are treated:
-- if one of s and t, say t is explicitly null, this is treated as (EXISTS y in s)
-- if s and t are setformers with identical iterators, this is treated as (EXISTS ... | e(x) /= e'(x) or not(P(x) •eq P'(x)))
-- otherwise we merely conclude that there exists a c in one set but not the other
if n2 = "0" or n2 = "_nullset" then [n2,n3] := [n3,n2]; end if; -- if nullset present, standardize it as final variable
if n3 = "0" or n3 = "_nullset" then -- treat as existential
if not is_tuple(n2) or ((n21 := n2(1)) /= "ast_genset" and n21 /= "ast_genset_noexp") then
return [["ast_in",genvar := "VAR" + ((var_generator +:= 2) + 1) + "_",n2],[genvar]];
-- convert to existential membership statement
end if;
-- otherwise treat as existential
if n21 = "ast_genset" then
[-,-,iters,cond] := n2; if cond = ["ast_null"] then cond := "true"; end if;
else -- "ast_genset_noexp" case
[-,iters,cond] := n2; if cond = ["ast_null"] then cond := "true"; end if;
end if;
return strip_quants_in(["ast_exists",iters,cond],nquants); -- convert into existential
end if;
-- otherwise look for identical iterators up to the number of quantifiers required
if (n21 := n2(1)) /= (n31 := n3(1)) or (n21 /= "ast_genset" and n21 /= "ast_genset_noexp") then
-- merely assert existence of element in one set but not in other
genvar := "VAR" + ((var_generator +:= 2) + 1) + "_";
return [["ast_or",["ast_and",["ast_in",genvar,n2],["ast_notin",genvar,n3]],["ast_and",["ast_notin",genvar,n2],["ast_in",genvar,n3]]],[genvar]];
end if;
if n21 = "ast_genset" then
[-,m22,m23,m24] := n2; [-,m32,m33,m34] := n3; -- unpack the two setformers being compared
else
[-,m23,m24] := n2; [-,m33,m34] := n3; -- unpack the two setformers being compared
end if;
if m23 /= m33 then -- no special handling unless the iterators are the same
genvar := "VAR" + ((var_generator +:= 2) + 1) + "_";
return [["ast_or",["ast_and",["ast_in",genvar,n2],["ast_notin",genvar,n3]],["ast_and",["ast_notin",genvar,n2],["ast_in",genvar,n3]]],[genvar]];
end if;
-- in this case, say that for some variables in the range of the iterators, either the conditions in the setformers (if any) are inequivalent
-- or the lead expressions of the setformers have different values
conds_ineq := if m24 /= ["ast_null"] and m34 /= ["ast_null"] then
["ast_or",["ast_and",m24,["ast_not",m34]],["ast_and",["ast_not",m24],m34]]
elseif m24 /= ["ast_null"] then
["ast_not",m24]
elseif m34 /= ["ast_null"] then
["ast_not",m34]
else
[]
end if;
if n21 = "ast_genset" then -- say that leading terms are not equal or conditions not equivalent
statbod := if conds_ineq /= [] then ["ast_or",["ast_ne",m22,m32],conds_ineq] else ["ast_ne",m22,m32] end if;
else
statbod := conds_ineq;
end if;
return strip_quants_in(["ast_exists",m23,statbod],nquants); -- convert into existential
when "ast_eq" =>
-- in this case only certain special cases are treated:
-- if one of s and t, say t is explicitly null, this is treated as (FORALL y in s | not P(y) )
-- otherwise we merely conclude that there exists no c in one set but not the other
if n2 = "0" or n2 = "_nullset" then [n2,n3] := [n3,n2]; end if; -- if nullset present, standardize it as final variable
if n3 = "0" or n3 = "_nullset" then -- treat as universal
if not is_tuple(n2) or ((n21 := n2(1)) /= "ast_genset" and n21 /= "ast_genset_noexp") then
return [["ast_notin",genvar := "VAR" + (var_generator +:= 2) + "_",tree],[genvar]];
-- convert to universal nonmembership statement
end if;
-- otherwise treat as universal
if n21 = "ast_genset" then
[-,-,iters,cond] := n2; if cond = ["ast_null"] then cond := "true"; end if;
else -- "ast_genset_noexp" case
[-,iters,cond] := n2; if cond = ["ast_null"] then cond := "true"; end if;
end if;
return strip_quants_in(["ast_forall",iters,["ast_not",cond]],nquants); -- convert into universal
end if;
genvar := "VAR" + ((var_generator +:= 2) + 1) + "_";
return [["ast_or",["ast_and",["ast_in",genvar,n2],["ast_notin",genvar,n3]],["ast_and",["ast_notin",genvar,n2],["ast_in",genvar,n3]]],[genvar]];
when "ast_incs" =>
-- in this case only certain special cases are treated:
-- the first argument is explicitly null, this is treated as (FORALL y in s | not P(y) )
-- otherwise we merely conclude that there exists no c in the first set but not in the second
if n2 = "0" or n2 = "_nullset" then -- treat as equality
return strip_quants_in(["ast_eq",n2,n3],nquants);
end if;
genvar := "VAR" + ((var_generator +:= 2) + 1) + "_";
return [["ast_not",["ast_and",["ast_in",genvar,n2],["ast_notin",genvar,n3]]],[genvar]];
when "DOT_INCIN" => return strip_quants_in(["ast_incs",n3,n2],nquants); -- treat as reversed 'incs'
when "DOT_NINCIN" => return strip_quants_in(["ast_not",["ast_incs",n3,n2]],nquants); -- treat as negated reversed 'incs'
when "DOT_NINCS" => return strip_quants_in(["ast_not",["ast_incs",n2,n3]],nquants); -- treat as negated 'incs'
otherwise => return [tree,[]]; -- cases not amenable to treatment
end case;
end strip_quants_in;
procedure setup_vars(il,offs); -- 'offs' flags nature of variable returned (universal or existential)
var_list := [if is_tuple(iter) then iter(2) else iter end if: iter in il];
newvar_list := ["VAR" + ((var_generator +:= 2) + offs) + "_": v in var_list];
var_map := {[v,newvar_list(j)]: v = var_list(j)};
return [newvar_list,var_map];
end setup_vars;
end strip_quants;
procedure conjoin_iters(list_of_iters,quant_body); -- conjoin list of iterators to formula body
-- unrestricted iterators are ignored
for j in [nli := #list_of_iters,nli - 1..1] | is_tuple(iter := list_of_iters(j)) and iter(3) /= "OM" loop
quant_body := ["ast_and",iter,quant_body];
end loop;
--printy(["quant_body: ",quant_body]);
return quant_body;
end conjoin_iters;
procedure ordered_free_vars(node); -- find the free variables in a tree, in order of occurrence (main entry)
ordrd_free_vars := []; ordered_free_vars_in(node,[]); return ordrd_free_vars;
-- use the recursive workhorse and a global variable
end ordered_free_vars;
procedure ordered_free_vars_in(node,bound_vars); -- find the free variables in a tree (recursive workhorse)
if is_string(node) then
if node notin bound_vars and node /= "OM" and node /= "_nullset"and node notin ordrd_free_vars
and node notin special_set_names then ordrd_free_vars with:= node; end if;
return;
end if;
case (ah := abbreviated_headers(node(1)))
when "and","or","==","+","-","{-}","in","notin","/==","=","/=","[]","[-]","{.}","itr","Etr","incs","incin","imp","*","->","not","null" => -- ordinary operators
for sn in node(2..) loop ordered_free_vars_in(sn,bound_vars); end loop;
when "arb","range","domain" => -- ordinary operators
for sn in node(2..) loop ordered_free_vars_in(sn,bound_vars); end loop;
when "()" => -- this is the case of functional and predicate application; the second variable is a reserved symbol, not a set
for sn in node(3..) loop ordered_free_vars_in(sn,bound_vars); end loop;
when "{}","{/}","EX","ALL" =>
bound_vars +:= (fbv := find_bound_vars(node)); -- setformer or quantifier; note the bound variables
--printy(["ordered_free_vars_in: ",node," bound vars in node: ",fbv]);
for sn in node(2..) loop ordered_free_vars_in(sn,bound_vars); end loop; -- collect free variables in args
when "@" => -- functional application
for sn in node(2..) loop ordered_free_vars_in(sn,bound_vars); end loop; -- collect free variables in args
otherwise => -- additional infix and prefix operators, including if-expressions
for sn in node(2..) loop ordered_free_vars_in(sn,bound_vars); end loop; -- collect free variables in args
end case;
end ordered_free_vars_in;
procedure remove_arguments(node,fcn_list); -- remove arguments from list of functions
ordrd_free_vars := [];
--printy(["remove_arguments: ",unparse(node)]);
return remove_arguments_in(node,fcn_list,[]); -- use the recursive workhorse and a global variable
end remove_arguments;
procedure remove_arguments_in(node,fcn_list,bound_vars); -- remove arguments from list of functions
if is_string(node) then return node; end if;
case (ah := abbreviated_headers(n1 := node(1)))
when "and","or","==","+","-","{-}","in","notin","/==","=","/=","[]","[-]","{.}","itr","Etr","incs","incin","imp","*","->","not","null" => -- ordinary operators
return [n1] + [remove_arguments_in(sn,fcn_list,bound_vars): sn in node(2..)];
when "arb","range","domain" => -- ordinary operators
return [n1] + [remove_arguments_in(sn,fcn_list,bound_vars): sn in node(2..)];
when "()" => -- this is the case of functional and predicate application
if (n2 := node(2)) in fcn_list then return n2; end if;
return [n1,n2] + [remove_arguments_in(sn,fcn_list,bound_vars): sn in node(3..)];
when "{}","{/}","EX","ALL" =>
bound_vars +:= find_bound_vars(node);
return [n1] + [remove_arguments_in(sn,fcn_list,bound_vars): sn in node(2..)];
when "@" => -- functional application
return [n1] + [remove_arguments_in(sn,fcn_list,bound_vars): sn in node(2..)];
otherwise => -- additional infix and prefix operators, including if-expressions
return [n1] + [remove_arguments_in(sn,fcn_list,bound_vars): sn in node(2..)];
end case;
end remove_arguments_in;
procedure symbol_occurences(tree,symbol_list);
-- finds list of free occurrences of symbols in the indicated symbol list, and returns the list of such nodes
list_of_symbol_occurences := []; -- the symbol occurrences are returned as pairs
symbol_occurences_in(tree,symbol_list,{}); -- use the recursive workhorse and a global variable
return list_of_symbol_occurences;
end symbol_occurences;
procedure symbol_occurences_in(node,symbol_list,bound_vars);
-- finds list of free occurrences of symbols in the indicated symbol list (recursive workhorse)
if is_string(node) and node in symbol_list then
if node notin bound_vars and node /= "OM" and node /= "_nullset"and node notin list_of_symbol_occurences
and node notin special_set_names then
list_of_symbol_occurences with:= [bound_vars,node];
end if;
return;
end if;
case (ah := abbreviated_headers(node(1)))
when "and","or","==","+","-","{-}","in","notin","/==","=","/=","[]","[-]","{.}","itr","Etr","incs","incin","imp","*","->","not","null" => -- ordinary operators
for sn in node(2..) loop symbol_occurences_in(sn,symbol_list,bound_vars); end loop;
when "arb","range","domain" => -- ordinary operators
for sn in node(2..) loop symbol_occurences_in(sn,symbol_list,bound_vars); end loop;
when "()" => -- this is the case of functional and predicate application; the second variable is a reserved symbol, not a set
--printy(["symbol_occurences_in: ",node," ",symbol_list]);
if (n2 := node(2)) in symbol_list then
list_of_symbol_occurences with:= [bound_vars,n2,node(3)(2..)];
end if;
for sn in node(3..) loop symbol_occurences_in(sn,symbol_list,bound_vars); end loop;
when "{}","{/}","EX","ALL" =>
bound_vars +:= {x: x in find_bound_vars(node)}; -- setformer or quantifier; note the bound variables
for sn in node(2..) loop symbol_occurences_in(sn,symbol_list,bound_vars); end loop; -- collect free variables in args
when "@" => -- functional application
for sn in node(2..) loop symbol_occurences_in(sn,symbol_list,bound_vars); end loop; -- collect free variables in args
otherwise => -- additional infix and prefix operators, including if-expressions
for sn in node(2..) loop symbol_occurences_in(sn,symbol_list,bound_vars); end loop; -- collect free variables in args
end case;
end symbol_occurences_in;
procedure free_vars_and_fcns(thm); -- get all the free variables and functions of a theorem
[fvs,fcns] := find_free_vars_and_fcns(parse_expr(thm + ";"));
return fvs + fcns;
end free_vars_and_fcns;
procedure fully_quantified(theory_nm,thm); -- construct the fully quantified form of a theorem in a theory
ancestor_theories := [theory_nm]; cur_th := theory_nm; -- construct the chain of ancestor theories
while (cur_th := parent_of_theory(cur_th)) /= OM loop -- of the theory containing the theorem
ancestor_theories with:= cur_th;
end loop;
dont_quantify := {} +/ [def_in_theory(thry)?{}: thry in ancestor_theories];
-- all the constants of the theory containing the theorem to be applied, and the ancestors of that theory
freevs := [v: v in freevars_of_theorem(theory_nm,thm) | v notin dont_quantify];
-- get all the free variables of a theorem statement (in string form) given its theory
return if freevs = [] then thm else "(FORALL " + join(freevs,",") + " | " + thm + ")" end if;
end fully_quantified;
procedure fully_quantified_external(theory_nm,thm);
-- construct the fully quantified form of a theorem in an external theory
-- this is like the preceding theorem, but t conditions the quantified result returned by an 'In_domain' clause
ancestor_theories := [theory_nm]; cur_th := theory_nm; -- construct the chain of ancestor theories
while (cur_th := parent_of_theory(cur_th)) /= OM loop -- of the theory containing the theorem
ancestor_theories with:= cur_th;
end loop;
dont_quantify := {} +/ [def_in_theory(thry)?{}: thry in ancestor_theories];
-- all the constants of the theory containing the theorem to be applied, and the ancestors of that theory
freevs := [v: v in freevars_of_theorem(theory_nm,thm) | v notin dont_quantify];
-- get all the free variables of a theorem statement (in string form) given its theory
domain_membership_clause := if #freevs > 1 then "(" else "" end if + join(["In_domain(" + v + ")": v in freevs]," & ")
+ if #freevs > 1 then ")" else "" end if + " ¥imp (";
-- + if #freevs > 1 then ")" else "" end if + " •imp ("; -- Mac version
return if freevs = [] then thm else "(FORALL " + join(freevs,",") + " | " + domain_membership_clause + thm + "))" end if;
end fully_quantified_external;
procedure freevars_of_theorem(theory_nm,thm); -- find the quantifiable free variables of a theorem, given theory
fv := ordered_free_vars(parsed_thm := parse_expr(thm + ";")(2)); -- get the free variables of the theorem
--printy(["def_in_theory(theory_nm): ",theory_nm," ",def_in_theory(theory_nm)]);
assumed_symbols_of_theory := {};
for fcn_with_args in assumps_and_consts_of_theory(theory_nm)(1) loop
symb_part := break(fcn_with_args,"("); assumed_symbols_of_theory with:= case_change(symb_part,"lu");
end loop;
--printy(["assumed_symbols_of_theory: ",assumed_symbols_of_theory]);
return [x: x in fv | x notin (def_in_theory(theory_nm)?{}) and x notin assumed_symbols_of_theory];
-- return the free variables, eliminating defined constants, outputs, and assumed symbols
end freevars_of_theorem;
procedure not_all_alph(stg); span(stg,"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_¥•"); return stg /= ""; end not_all_alph;
-- ***************************************************************************************
-- ************* Utilities for statistical analysis of the proof scenarios ***************
-- ***************************************************************************************
procedure get_hints(proofno1,proofno2); -- examine the hints which occur in a given range of proofs and report statistics
if digested_proof_handle = OM then -- read the full tuple of digested proofs if they have not already been read
init_logic_syntax_analysis(); -- obligatory initialization
digested_proof_handle ?:= open(user_prefix + "digested_proof_file","TEXT-IN");
reada(digested_proof_handle,digested_proofs); --printy([#digested_proofs]);
end if;
counts := {};
for proofno in [proofno1..proofno2],[hint,stat] in digested_proofs(proofno)(2..) loop
span(hint," \t"); match(hint,"Proof+:"); match(hint,"Proof:"); span(hint," \t"); rspan(hint," \t"); num := rspan(hint,"0123456789ax_"); nh := #hint;
if num /= "" and nh > 0 and hint(nh) = "T" then hint := "Thm_citation";
elseif num /= ""and nh > 4 and hint(nh - 3..nh) = "Stat" then hint := "Stat_Instance";
elseif nh > 5 and hint(nh - 4..nh) = "Ax_ch" then hint := "Ax_ch";
elseif nh > 6 and hint(nh - 5..nh) = "Assump" then hint := "Theory_Assumption";
elseif nh > 11 and hint(nh - 10..nh) = "KBAlgebraic" then hint := "KBAlgebraic";
elseif nh > 5 and hint(1..5) = "APPLY" then hint := "Theory_Application";
elseif hint(nh) = "." then hint := "Theorem_cite_in_theory";
elseif nh > 0 and hint(nh) = ")" then rbreak(hint,"("); rmatch(hint,"(");
elseif nh > 0 and hint(1) = "(" then break(hint,")"); match(hint,")"); end if;
counts(hint) := (counts(hint)?0) + 1;
end loop;
tup := merge_sort([[y,x]: [x,y] in counts]); for j in [#tup,#tup - 1..1] loop printy([tup(j)]); end loop;
end get_hints;
procedure view_theorem_citations(proofno1,proofno2); -- count the number of theorem citations in a given range, and print them
if digested_proof_handle = OM then -- read the full tuple of digested proofs if they have not already been read
init_logic_syntax_analysis(); -- obligatory initialization
digested_proof_handle ?:= open(user_prefix + "digested_proof_file","TEXT-IN");
reada(digested_proof_handle,digested_proofs); --printy([#digested_proofs]);
end if;
if theorem_map_handle = OM then -- read the theorem_map fies if it has not already been read
init_logic_syntax_analysis(); -- obligatory initialization
theorem_map_handle ?:= open("theorem_map_file","TEXT-IN");
reada(theorem_map_handle,theorem_map); --printy([#digested_proofs]);
end if;
count := 0;
for proofno in [proofno1..proofno2],[hint,stat] in digested_proofs(proofno)(2..) loop
span(hint," \t"); match(hint,"Proof+:"); match(hint,"Proof:"); span(hint," \t"); rspan(hint," \t"); num := rspan(hint,"0123456789ax_"); nh := #hint;
if num /= "" and nh > 0 and hint(nh) = "T" then hint := "T" + num;
printy([proofno," ",hint," ",theorem_map(hint),"\n -- ",stat]); count +:= 1;
end if;
end loop;
printy(["Number of citations: ",count]);
end view_theorem_citations;
procedure inspect_proofs(tup_of_numbers); -- inspect a specified list of proofs, from digested_proof_file
reada(handl := open(user_prefix + "digested_proof_file","TEXT-IN"),dpf);
printy(["Number of proofs in file of digested proofs: ",#dpf]);
for pfno in tup_of_numbers loop
if (pf := dpf(pfno)) = OM then
printy(["\nProof ",pfno," not found"]); continue;
end if;
printy(["\nProof ",pfno,":"]);
-- bypass the prefixed proof identifier
for [hint,prbody] in pf(2..) loop printy([(hint + 40 * " ")(1..40),prbody]); end loop; -- list the proof, with its hints
end loop;
close(handl);
end inspect_proofs;
-- ***********************************************************************
-- ********** Code for automated optimization of proof scenarios *********
-- ***********************************************************************
procedure search_for_all_w_extra(stat_tup,extra_clause); -- searches for all of the critical items in a range, no supplementary clause
extra_conj := extra_clause; -- note extra clause
return search_for_all_gen(stat_tup); -- call general form
end search_for_all_w_extra;
procedure search_for_all(stat_tup); -- searches for all of the critical items in a range, no supplementary clause
extra_conj := OM; -- note that there is no extra clause
return search_for_all_gen(stat_tup); -- call general form
end search_for_all;
procedure search_for_all_gen(stat_tup); -- searches for all of the critical items in a range, general form
-- here tup should be the full context preceding a designated statment
-- we apply a test which is passed only if we have all the critical items,
-- i.e. the conjuction of the known criticals with the tup of line numbers passed to the
-- test_range function seen below yields a rproof of the last line
best_time_so_far := 1000000; -- for determining optimal time
statement_tuple_being_searched := stat_tup; -- globalize, for use in other procedures of this group
tup := [1..#stat_tup]; -- the tuple of line numbers in the statement list passed in
known_criticals := {}; -- lines already known to be critical
nt := #tup; -- length of the tuple
search_past := 0; -- start by searching the entire tuple, known to pass the test
examine_tup := tup; -- start by examing the entire tuple
prior_critical := 0;
debug_count := 150;
while (critical := search_in(examine_tup)) < #examine_tup loop
-- this delivers a critical value, or nt if that (or none) is critical
-- the critical value found is the first in the tuple being examined
--print("<BR>critical: ",critical," ",examine_tup);
if critical < (nexat := #examine_tup - 1) and critical > 0 then
known_criticals with:= (prior_critical +:= critical);
examine_tup := examine_tup(critical + 1..);
elseif not test_range([]) then -- the last element is vital
known_criticals with:= (prior_critical + nexat);
exit; -- since in this this case there is no more to be searched
else -- the last element is not vital
exit;
end if;
if(debug_count -:= 1) < 0 then print("<P>looping"); stop; end if;
end loop;
if not test_range([x + prior_critical: x in examine_tup]) then known_criticals with:= 1; end if;
--print("<P>known_criticals: ",known_criticals);
return merge_sort(known_criticals);
end search_for_all_gen;
procedure search_in(tup); -- searches a tuple for the start of a critical range,
-- located by a subfunction test_range
start_point := (nt := #tup) - 1; -- start search with smallest tuple suffix
too_large := OM; -- the smallest start point which is too large
while start_point > 1 loop -- expanding search; keep doubling the range
if test_range(tup(start_point..)) then exit; end if;
too_large := start_point; -- the current range starting point is too large
start_point -:= (nt - start_point);
end loop;
start_point max:= 1; -- start of the range that must be binary-searched
hi := too_large?(nt - 1); -- value that is probably too large
while hi - start_point > 1 loop -- standard binary search; keep halving the range
mid := (start_point + hi) /2;
if not test_range(tup(mid..)) then
hi := mid; -- the mid is too high
else
start_point := mid; -- the mid is not too high
end if;
end loop;
return if not test_range(tup(hi..)) then start_point else hi end if;
end search_in;
procedure test_range(tup); -- tests a range in a tuple to see if it is large enough
statements_to_use := [statement_tuple_being_searched(lix): lix in merge_sort(known_criticals) + tup];
conj := form_elem_conj("",statements_to_use); -- use all the statements in the collection passed,
starting_cycles := opcode_count(); -- note start time for this verification attempt
-- inverting the last
--print("<BR>conj in test_range: ",conj);
test_conj(if extra_con /= OM then "(" + extra_conj + ") and (" + conj + ")" else conj end if);
-- test this (possibly extended) conjunct for satisfiability
if tested_ok then best_time_so_far min:= ((opcode_count() - starting_cycles) / oc_per_ms); end if;
-- keep track of best successful step time
return tested_ok;
-- return (#known_criticals + #[j: x = tup(j) | x = 1]) >= tot_num_criticals;
end test_range;
-- ***********************************************************************
-- ******************* Miscellaneous input utilities *********************
-- ***********************************************************************
procedure read_range(stg); -- convert to list of proofs to be printed; force range indicator to legal form and return it
stg := suppress_chars(stg," \t");
good := span(stg,",.0123456789");
if stg /= "" then return OM; end if;
pieces := [x: x in segregate(good,".,") | x /= ""];
if pieces = [] then pieces := ["1"]; end if; -- be prepared to handle empty pieces
if pieces(1)(1) in ".," then pieces := pieces(2..); end if; -- no separators at start
if pieces(np := #pieces)(1) in ".," then pieces := pieces(1..np - 1); end if; -- or at end
pieces := "" +/ [if x(1) = "." then ".." elseif x(1) = "," then "," else x end if: x in pieces];
pieces := [first_last(x): x in breakup("" +/ pieces,",")];
return [] +/ pieces;
end read_range;
procedure first_last(stg); -- reduce dotted ranges to first and last elements, and read
if "." notin stg then return [unstr(stg)]; end if;
lend := break(stg,"."); rend := rbreak(stg,".");
return [unstr(lend)..unstr(rend)];
end first_last;
end verifier_top_level;
-- *****************************************************************
-- ************ Test routines for 'Proof by Structure' *************
-- *****************************************************************
program test; -- tests for proof_by_structure
use proof_by_structure, string_utility_pak,parser,logic_syntax_analysis_pak,logic_syntax_analysis_pak2;
init_logic_syntax_analysis(); -- initialize for logic syntax-tree operations
-- test_basic_descriptor_actions(); -- test effect of basci operators on descriptors
-- test_compound_expressions(); -- test effect of compound expressions on descriptors
test_descriptor_extraction(); -- test the top level descriptor extraction routine
procedure test_descriptor_extraction(); -- test the top level descriptor extraction routine
print("\ntest_extract_relevant_descriptors for: ",stg := "(arb(s) in s) and Ord(s)");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(enum(x,t) in s) and Ord(s)");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(#t in #r) and Ord(#r)");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(x •PLUS y) in Za");
test_extract_relevant_descriptors(stg);
stop;
print("\ntest_extract_relevant_descriptors for: ",stg := "Countable(u) and Finite(v) and h = {[x + y,e(x,y)]: x in u, y in v | P(x,y)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "Countable(u) and Countable(v) and h = {[x + y,e(x,y)]: x in u, y in v | P(x,y)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "Finite(u) and Finite(v) and h = {[x + y,e(x,y)]: x in u, y •incin v | P(x,y)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "Finite(u) and Countable(v) and h = {[x + y,e(x,y)]: x •incin u, y •incin v | P(x,y)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u /= 0) and h = {[x,e(x)]: x in u}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u /= 0) and h = {[x,e(x)]: x in u | P(x)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(not Finite(u)) and h = {[x,e(x)]: x in u}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(not Finite(u)) and h = {[x,e(x)]: x in u | P(x)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u /= 0) and (not(v = 0)) and h = {[x + y,e(x,y)]: x in u, y in v}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u /= 0) and (not(v = 0)) and h = {[x + y,e(x,y)]: x in u, y in v}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u /= 0) and h = {[x + y,e(x,y)]: x in u, y in v}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(not(v = 0)) and h = {[x + y,e(x,y)]: x in u, y in v}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u /= 0) and (not Finite(v)) and h = {[x + y,e(x,y)]: x in u, y in v}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(not Finite(u)) and (not Finite(v)) and h = {[x + y,e(x,y)]: x in u, y in v}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u /= 0) and (not Finite(v)) and h = {[x + y,e(x,y)]: x in u, y in v | P(x,y)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(not Finite(u)) and (not Finite(v)) and h = {[x + y,e(x,y)]: x in u, y in v | P(x,y)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "Finite(u) and Finite(v) and h = {[x + y,e(x,y)]: x in u, y in v | P(x,y)}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "Finite(x) and ((y + w) in u) and (not Finite(y)) and (Svm(y)) and (one_1_map(y)) and (Is_map(w)) and (y •incin w) and (u •incin Za) and h = {[x,e(x)]: x in u}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "Finite(x) and (not y = 0) and (Svm(y)) and (one_1_map(y)) and (Is_map(w)) and (y •incin w) and (u •incin Za) and h = {[x,e(x)]: x in u}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "Finite(x) and (y /= 0) and (Svm(y)) and (one_1_map(y)) and (Is_map(w)) and (y •incin w) and (u •incin Za) and h = {[x,e(x)]: x in u}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u •incin Za) and h = {[x •PLUS y,e(x,y)]: x in u, y in u - v}");
test_extract_relevant_descriptors(stg);
print("\ntest_extract_relevant_descriptors for: ",stg := "(u •incin Za) and (h = x •PLUS y) and (x in u) and (y in u - v)");
test_extract_relevant_descriptors(stg);
end test_descriptor_extraction;
procedure test_extract_relevant_descriptors(stg);
tree := parse_expr(stg + ";"); print(get_assertions_from_descriptors(erd := extract_relevant_descriptors(tree(2)))); -- ," ",erd
end test_extract_relevant_descriptors;
procedure test_compound_expressions(); -- test effect of compound expressions on descriptors
vars_to_descriptors := {["S",{{"ZA"}}]};
print("descriptor for expression ",estg := "{if a(x) then x •PLUS y elseif b(x) then x •TIMES y else x end if: x in s, y in s}"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["S",{{"ZA"}}]};
print("descriptor for expression ",estg := "{if a(x) then x •PLUS y elseif b(x) then x •TIMES y else x + y end if: x in s, y in s}"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["X",{"FIN","NONNULL"}],["Y",{"FIN"}]};
print("descriptor for expression ",estg := "if a then x else y end if"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["X",{"FIN","NONNULL"}],["Y",{"FIN","NONNULL"}],["Z",{"FIN"}]};
print("descriptor for expression ",estg := "if a then x elseif b then y else z end if"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["X",{"FIN","NONNULL"}],["Y",{"FIN","NONNULL"}],["Z",{"FIN","NONNULL"}]};
print("descriptor for expression ",estg := "if a then x elseif b then y else z end if"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["X",{"FIN","NONNULL",{"ZA"}}]};
print("descriptor info ",vars_to_descriptors," translates as ", get_assertions_from_descriptors(vars_to_descriptors));
vars_to_descriptors := {["X",{"INFIN","NONNULL",{"ZA"}}]};
print("descriptor info ",vars_to_descriptors," translates as ", get_assertions_from_descriptors(vars_to_descriptors));
vars_to_descriptors := {["U", {{"ZA"}}]}; -- a set of integers
print("\ndescriptor for expression ",estg := "{[x •PLUS y,e(x,y)]: x in u, y in u - v}"," with variable descriptors ",vars_to_descriptors,
" is: \n",expression_descriptor(estg,vars_to_descriptors),"\n\n"); -- calculate
vars_to_descriptors := {["U",{{"ZA"}}]}; -- a finite, non-null, set of sets of reals
print("\ndescriptor for expression ",estg := "{[x,e(x)]: x in u}"," with variable descriptors ",vars_to_descriptors,
" is: \n",expression_descriptor(estg,vars_to_descriptors),"\n\n"); -- calculate
vars_to_descriptors := {["X",{"one_1_map",{["ZA","Re"]}}]};
print("descriptor info ",vars_to_descriptors," translates as ", get_assertions_from_descriptors(vars_to_descriptors));
vars_to_descriptors := {["X",{["ZA","Re"]}],["Y",{"ZA"}]};
print("descriptor info ",vars_to_descriptors," translates as ", get_assertions_from_descriptors(vars_to_descriptors));
vars_to_descriptors := {["M",{"ZA"}],["N",{"ZA"}]};
print("descriptor for expression ",estg := "m •PLUS n"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["S",{"FIN",{"ZA"}}],["T",{"FIN",{"ZA"}}]};
print("descriptor for expression ",estg := "{m •PLUS n: m in s, n in t}"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["S",{"FIN",{"Re"}}]};
print("descriptor for expression ",estg := "{R_rev(x): x in s}"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["S",{"FIN",{"Re"}}]};
print("descriptor for expression ",estg := "{abs(x): x in s}"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["F",{"NONNULL",{["ZA",["Re","Re"]]}}]};
print("descriptor for expression ",estg := "arb(range(f))"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["F",{"NONNULL",{["ZA","Re"]}}]};
print("descriptor for expression ",estg := "arb(domain(f))"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["F",{"NONNULL",{["ZA",["Re","Re"]]}}]};
print("descriptor for expression ",estg := "car(arb(range(f)))"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["F",{"NONNULL",{["ZA",["Re","Re"]]}}]};
print("descriptor for expression ",estg := "cdr(arb(range(f)))"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
vars_to_descriptors := {["S",{Fin,{"Re"}}],["T",{Fin,{"Re"}}],["R",{}],["U",{Fin,{{"Re"}}}]};
-- variables should have internal capitalized form
print("descriptor for expression ",estg," with variable descriptors ",vars_to_descriptors," is: \n",
expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
print("descriptor for expression ",estg := "Un({pow(s + t) - #r,u}) * v"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
print("descriptor for expression ",estg := "arb(pow(s + t))"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate descriptors of expression
-- these expressions have involved pow, Un,+,*,-,#. Need to do range, domain,pair,car,cdr.
-- test a setformer expression: finite non-null set of reals
vars_to_descriptors := {["S",{"FIN","NONNULL",{{"Re"}}}]}; -- a finite, non-null, set of sets of reals
print("descriptor for expression ",estg := "{x * t: x in s}"," with variable descriptors ",vars_to_descriptors,
" is: \n",expression_descriptor(estg,vars_to_descriptors)); -- calculate
vars_to_descriptors := {["S",{"FIN","NONNULL",{"Re"}}]}; -- a finite, non-null, set of reals
print("descriptor for expression ",estg := "{x * t: x •incin s}"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); --
vars_to_descriptors := {["S",{"FIN","NONNULL",{{"Re"}}}],["T",{"NONNULL",{{"Re"}}}]};
-- two non-null sets of sets of reals, one finite
print("descriptor for expression ",estg := "arb({x * y: x in s,y in t})"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); --
-- definition of the sum of two integers
vars_to_descriptors := {["S",{"FIN"}],["T",{"FIN"}]};
print("descriptor for expression ",estg := "#({[x,0]: x in s} + {[y,1]: y in t})"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); --
-- definition of the product of two integers
vars_to_descriptors := {["S",{"FIN"}],["T",{"FIN"}]};
print("descriptor for expression ",estg := "#({[x,y]: x in s, y in t})"," with variable descriptors ",
vars_to_descriptors," is: \n",expression_descriptor(estg,vars_to_descriptors)); --
end test_compound_expressions;
procedure test_basic_descriptor_actions();
print("powerset of a finite set of reals: ",descriptor_action_of_pow({"FIN",{"Re"}})); --
print("Union set of a finite set of reals: ",descriptor_action_of_Un({"FIN",{"Re"}})); --
print("Union set of a finite set of sets of reals: ",descriptor_action_of_Un({"FIN",{{"Re"}}})); --
print("Union of two sets of reals, one finite: ",descriptor_action_of_union({"FIN",{"Re"}},{{"Re"}})); --
print("Union of two sets of reals, both finite: ",descriptor_action_of_union({"FIN",{"Re"}},{"FIN",{"Re"}})); --
print("Intersection of a finite set of reals with another set: ",descriptor_action_of_intersection({"FIN",{"Re"}},{})); --
print("Difference of a set of reals and another set: ",descriptor_action_of_difference({"FIN",{"Re"}},{"Re"})); --
print("Range of a finite sequence of reals: ",descriptor_action_of_range({"FIN",{["ZA","Re"]}})); --
print("Domain of a finite sequence of reals: ",descriptor_action_of_domain({"FIN",{["ZA","Re"]}})); --
print("Pair of integer and real: ",descriptor_action_of_pair({"ZA"},{"Re"})); --
print("car of pair of integer and real: ",descriptor_action_of_car({["ZA", "Re"]})); --
print("cdr of pair of integer and real: ",descriptor_action_of_cdr({["ZA", "Re"]})); --
print("arb of a non-null real-valued function: ",descriptor_action_of_arb({"NONNULL",{["OM","Re"]}})); --
print("arb of a real-valued function: ",descriptor_action_of_arb({{["OM","Re"]}})); --
print("count of a non-null finite set of reals: ",descriptor_action_of_count({"FIN","NONNULL",{"Re"}})); --
print("union of a finite set of integers: ",descriptor_action_of_Un({"FIN",{"ZA"}}));
print("cartesian product of two finite sets of integers: ",descriptor_action_of_cartprod({"FIN",{"ZA"}},{"FIN",{"ZA"}}));
print("the inverse of a map: ",descriptor_action_of_func_inv({"FIN",{["ZA","Re"]}}));
print("functional product of two maps: ",descriptor_action_of_funcprod({"FIN",{["ZA","Re"]}},{"FIN",{["Re","CARD"]}}));
-- print("functional application of a map to a set: ",descriptor_action_of_func_app({"FIN",{"ZA"}})); -- questionable?????
end test_basic_descriptor_actions;
end test;
-- ***********************************************************************
-- ************* Test Program collection for Ref top level ***************
-- ***********************************************************************
---> program
program test; -- some verifier tests
use verifier_top_level;
use logic_syntax_analysis_pak,parser,prynter; -- use the top level of the logic verifier collection
print("test");
-- do_tests3(); -- do tests for this package
procedure do_tests3(); -- do tests for this package
init_logic_syntax_analysis(); -- ********** REQUIRED **********
otter_item := "all x ( -(nneg(x)) -> abs(x)=rvz(x) ).";
printy(["otter_to_ref: ",otter_to_ref(otter_item,"some_file.txt")]); -- converts an otter item to SETL syntax
printy(["stopped due to: stop in test"]); stop;
printy(["ordered_free_vars: ",stg := "(FORALL s,t | Finite(s) •imp (h(s,t) = if s = 0 then f0 else g2(h(s - {arb(s)},t),s) end if));",ordered_free_vars(parse_expr(stg)(2))]);
printy(["stopped due to: stop in test"]); stop;
test_check_a_skolem_inf(); printy(["stopped due to: stop in test"]); stop; -- test of check_a_skolem_inf function
test_check_an_apply_inf(); printy(["stopped due to: stop in test"]);stop; -- test of check_an_apply_inf function
conj := "(G = {[x1,F(x1)]: x1 in ss}) and " +
"(not ({CAR(x2): x2 in G} = {CAR(x2): x2 in {[x1,F(x1)]: x1 in S}}));";
--->test_conj
check_an_equals_inf(conj,"conclude",OM,OM,OM);
printy(["stopped due to: sto in test"]); stop;
test_conj(conj);
-- parse_scenario("Diana:Pub:Logic_repository:Defs_w_proofs_modif.pro"); -- parse the Defs_w_proofs file, producing all the files used subsequently
-- parse_Defs_w_proofs(); -- parse a specified Defs_w_proofs file
-- check_proofs(1,4);
-- check_proofs(121,160); -- check ELEM and discharge inferences in given range
inspect_proofs([1,3,666]);
--get_hints(1,40); -- read the hints which occur in a given range of proofs
--view_theorem_citations(1,40); -- count the number of theorem citations in a given range, and print them
--printy([tree_starts(["ast_list", ["ast_of", "IS_MAP", ["ast_list", ["ast_genset", ["ast_enum_tup"]]]]],parze_expr("Is_map({[x,y]: x in s | P(x)});"))]);
--init_logic_syntax_analysis(); printy([parze_expr("Is_svm({[x,e(x)]: x in s | P(x)});")(2)]);
--init_logic_syntax_analysis(); printy([blob_tree(parze_expr("Is_map({[x,e(x)]: x in s | P(x)}]);")(2)));
--init_logic_syntax_analysis(); printy([blob_tree(parze_expr("Is_map({[x,e(x),y]: x in s | P(x)}) •eq Is_map({[x,e(x)]: x in s | P(x)}]);")(2)));
--init_logic_syntax_analysis(); printy([blob_tree(parze_expr("Finite(#s]);")(2)));
--init_logic_syntax_analysis(); printy([blob_tree(parze_expr("{[x,e(x)]: x in s | P(x)} = {[x,e(x),y]: x in s | P(x)}];")(2)));
--printy([blob_tree(parze_expr("not ({[x,e(x)]: x in s | P(x)} = {[x,e(x),y]: x in s | P(x)}]);")(2)));
--printy([blob_tree(parze_expr("[{[x,e(x)]: x in s | P(x)},{[x,e(x),y]: x in s | P(x)}]];")(2)));
--targ_tree := parze_expr("Is_map({[a(x,y,yy),b(x,y,yy)]: x in s, y in t, yy in u | P(x,y,yy)});");
--replacement_map := {["S", "s"], ["T", "t"], ["U", "u"]};
--printy([unparse(substitute(targ_tree,replacement_map))]);
printy(["test_conj: ",test_conj("not(car(b3) = cdr(b3));")]);
printy(["stopped due to: sstop in test"]); stop;
test_pairs := [["(FORALL x in s | x > 0)",1],
-- ["(FORALL x in s, y in t | x > y)",1],
-- ["(FORALL x in s, y | x > y)",1],
-- ["(FORALL x in s, y in OM | x > y)",1],
-- ["(FORALL x, y in t | x > y)",1],
-- ["(FORALL x in OM, y in t | x > y)",1],
--
-- ["(FORALL x in s, y in t | x > y)",2],
-- ["(FORALL x in s, y | x > y)",2],
-- ["(FORALL x in s, y in OM | x > y)",2],
-- ["(FORALL x, y in t | x > y)",2],
-- ["(FORALL x in OM, y in t | x > y)",2],
--
-- ["(FORALL x in s, y in t | x > y)",3],
-- ["(FORALL x in s, y | x > y)",3],
-- ["(FORALL x in s, y in OM | x > y)",3],
-- ["(FORALL x, y in t | x > y)",3],
-- ["(FORALL x in OM, y in t | x > y)",3],
--
-- ["(FORALL x in s | (FORALL y in t, u in w | x > y))",2],
-- ["(FORALL x in s | (FORALL y, u | x > y))",2],
-- ["(FORALL x in s | (FORALL y in OM | x > y))",3],
-- ["(FORALL x | (FORALL y in t | x > y))",2],
-- ["(FORALL x in OM | (FORALL y in t | x > y))",2],
--
-- ["(EXISTS x in s, y in t | x > y)",1],
-- ["(EXISTS x in s, y | x > y)",1],
-- ["(EXISTS x in s, y in OM | x > y)",1],
-- ["(EXISTS x, y in t | x > y)",1],
-- ["(EXISTS x in OM, y in t | x > y)",1],
--
-- ["(EXISTS x in s, y in t | x > y)",2],
-- ["(EXISTS x in s, y | x > y)",2],
-- ["(EXISTS x in s, y in OM | x > y)",2],
-- ["(EXISTS x, y in t | x > y)",2],
-- ["(EXISTS x in OM, y in t | x > y)",2],
--
-- ["(EXISTS x in s, y in t | x > y)",3],
-- ["(EXISTS x in s, y | x > y)",3],
-- ["(EXISTS x in s, y in OM | x > y)",3],
-- ["(EXISTS x, y in t | x > y)",3],
-- ["(EXISTS x in OM, y in t | x > y)",3],
--
-- ["(EXISTS x in s | (FORALL y in t, u in w | x > y))",2],
-- ["(EXISTS x in s | (FORALL y, u | x > y))",2],
-- ["(EXISTS x in s | (FORALL y in OM | x > y))",3],
-- ["(EXISTS x | (FORALL y in t | x > y))",2],
-- ["(EXISTS x in OM | (FORALL y in t | x > y))",2],
--
-- ["(EXISTS x in s | (EXISTS y in t, u in w | x > y))",2],
-- ["(EXISTS x in s | (EXISTS y, u | x > y))",2],
-- ["(EXISTS x in s | (EXISTS y in OM | x > y))",3],
-- ["(EXISTS x | (EXISTS y in t | x > y))",2],
-- ["(EXISTS x in OM | (EXISTS y in t | x > y))",2],
-- ["y in {e(x): x in s | x > 0}",1],
-- ["cos(y) in {e(x): x in s, u in t | x > u}",1],
-- ["cos(y) in {e(x): x in s, u in t | x > u}",2],
-- ["cos(y) in {e(x): x in s, u in t | x > u}",3],
-- ["cos(y) in {e(x): x in s}",1],
-- ["cos(y) in {x in s | x > 0}",1],
--
-- ["y notin {e(x): x in s | x > 0}",1],
-- ["cos(y) notin {e(x): x in s, u in t | x > u}",1],
-- ["cos(y) notin {e(x): x in s, u in t | x > u}",2],
-- ["cos(y) notin {e(x): x in s, u in t | x > u}",3],
-- ["cos(y) notin {e(x): x in s}",1],
-- ["cos(y) notin {x in s | x > 0}",1],
-- ["{e(x): x in s | x > 0} /= {e(x): x in s | x > 1}",1],
-- ["{e(x): x in s | x > 0} /= {e(x): x in s}",1],
-- ["{e(x): x in s} /= {e(x): x in s | x > 1}",1],
--
-- ["{e(x): x in s, y in t | x > y} /= {e(x): x in s, y in t | x > y + 1}",1],
-- ["{e(x): x in s, y in t | x > y} /= {e(x): x in s, y in t}",1],
-- ["{e(x): x in s, y in t} /= {e(x): x in s, y in t | x > y}",1],
--
-- ["{e(x): x in s, y in t | x > y} /= {e(x): x in s, y in t | x > y + 1}",2],
-- ["{e(x): x in s, y in t | x > y} /= {e(x): x in s, y in t}",2],
-- ["{e(x): x in s, y in t} /= {e(x): x in s, y in t | x > y}",2],
--
-- ["{x in s | x > 0} /= {x in s | x > 1}",1],
--
-- ["{e(x): x in s | x > 0} /= {e(x): x in t | x > 1}",1],
--
-- ["a /= {e(x): x in s | x > 1}",1],
-- ["{e(x): x in s | x > 1} /= a + b",1],
-- ["{e(x): x in s | x > 0} /= {}",1],
-- ["{e(x): x in s | x > 0} /= 0",2],
-- ["{} /= {e(x): x in s | x > 0}",1],
-- ["{} /= {e(x): x in s | x > 0}",2],
--
-- ["{e(x): x in s, y in t | x > y} /= {}",1],
-- ["{e(x): x in s, y in t | x > y} /= 0",2],
-- ["{} /= {e(x): x in s, y in t | x > y}",1],
-- ["{} /= {e(x): x in s, y in t | x > y}",2],
-- ["(FORALL x in s, y in t | x > y) & (FORALL u in s, v in t | u + v < u - v)",1],
-- ["(FORALL x in s, y in t | x > y) & (FORALL u in s, v in t | u + v < u - v)",2],
-- ["(FORALL x in s, y in t | x > y) & (FORALL u in s, v in t | u + v < u - v)",3],
-- ["(FORALL x in s, y in t | x > y) & (FORALL u in s, v in t | u + v < u - v)",4],
-- ["(FORALL x in s, y in t | x > y) & (FORALL u in s, v in t | u + v < u - v)",5],
--
-- ["(FORALL x in s, y in t | x > y) & (EXISTS x in s, y in t | x + y < x - y)",1],
-- ["(FORALL x in s, y in t | x > y) & (EXISTS x in s, y in t | x + y < x - y)",2],
-- ["(FORALL x in s, y in t | x > y) & (EXISTS x in s, y in t | x + y < x - y)",3],
-- ["(FORALL x in s, y in t | x > y) & (EXISTS x in s, y in t | x + y < x - y)",4],
-- ["(FORALL x in s, y in t | x > y) & (EXISTS x in s, y in t | x + y < x - y)",5],
-- ["not(FORALL x in OM | (FORALL y in t | x > y))",2],
-- ["(FORALL x in OM | (FORALL y in t | x > y))",2],
-- ["not(EXISTS x in s, y in t | x > y)",1],
-- ["not(cos(y) in {e(x): x in s, u in t | x > u})",1],
-- ["not(cos(y) notin {e(x): x in s, u in t | x > u})",2],
-- ["not({e(x): x in s, y in t | x > y} = {})",1],
-- ["{e(x): x in s | x > 0} = {}",1],
-- ["{e(x): x in s | x > 0} = 0",2],
-- ["{e(x): x in s | x > 0} •incin 0",2],
-- ["0 incs {e(x): x in s | x > 0}",2],
-- ["not ({e(x): x in s | x > 0} incs {e(x): x in s | x > 1})",2],
-- ["not ({e(x): x in s | x > 0} •incin {e(x): x in s | x > 1})",2],
-- ["not(a in {p in f | car(p) in domain(f)})",1],
-- ["not (q in {k in Z | (k •TIMES n) •incin m})",1],
-- ["o notin {x •incin o | Ord(x) and P(x)}",1],
-- ["not(a •incin t)",1],
-- ["not(a incs t)",1],
-- ["not((FORALL x in t | x •incin t) & (FORALL x in t | (FORALL y in t | (x in y or y in x or x = y))))",2],
-- ["(not(FORALL x in t | x •incin t)) or (not(FORALL x in t | (FORALL y in t | (x in y or y in x or x = y))))",2],
-- ["(not(FORALL x in t | x •incin t))",2],
-- ["(EXISTS x in t | (not(x •incin t)))",2],
--
-- ["range(f) /= #t",1],
-- ["{cdr(x): x in f} /= 0",1],
-- ["x in {[car(x),cdr(x)]: x in f}",1],
-- ["((FORALL x in t | x •incin t)) or ((FORALL x in t | (FORALL y in t | (x in y or y in x or x = y))))",2],
-- ["(FORALL x in s, y in t | x > y)",1],
-- ["(EXISTS x in s, y in t | x > y)",1],
--
-- ["(FORALL x in s, y in t | x > y) or (FORALL u in s, v in t | u + v < u - v)",1],
-- ["(FORALL x in s, y in t | x > y) or (FORALL u in s, v in t | u + v < u - v)",2],
-- ["(FORALL x in s, y in t | x > y) or (FORALL u in s, v in t | u + v < u - v)",3],
-- ["(FORALL x in s, y in t | x > y) or (FORALL u in s, v in t | u + v < u - v)",4],
-- ["(FORALL x in s, y in t | x > y) or (FORALL u in s, v in t | u + v < u - v)",5],
--
-- ["(FORALL x in s, y in t | x > y) or (EXISTS x in s, y in t | x + y < x - y)",1],
-- ["(FORALL x in s, y in t | x > y) or (EXISTS x in s, y in t | x + y < x - y)",2],
-- ["(FORALL x in s, y in t | x > y) or (EXISTS x in s, y in t | x + y < x - y)",3],
-- ["(FORALL x in s, y in t | x > y) or (EXISTS x in s, y in t | x + y < x - y)",4],
-- ["(FORALL x in s, y in t | x > y) or (EXISTS x in s, y in t | x + y < x - y)",5],
["x /= 0",1],
["true /= false",0]];
for [form,nquants] in test_pairs loop
printy([]);
sq := strip_quants(parze_expr(form + ";")(2),nquants); --printy(["sq: ",sq]);
--printy(["sq: ",sq]);
if sq /= OM then
printy([10 * " ",unparse(sq(1))," ",sq(2)," dequant ",nquants]);
else
printy([sq]);
end if;
end loop;
miscellaneous_tests(); -- repository for miscellaneous top-level logic verifier tests under development
end do_tests3;
procedure test_check_a_skolem_inf; -- test of check_a_skolem_inf function
trips := [
["T42","v1_thryvar:enum_Ord","(FORALL s | Ord(enum_Ord(s)) & (s = {enum(y,s): y in enum_Ord(s)}) & " +
"(FORALL y in enum_Ord(s), z in enum_Ord(s) | ((y /= z) •imp (enum(y,s) /= enum(z,s)))));"],
["T42","v1_thryvar:enum_Ord","(FORALL s | Ord(enum_Ord(s)) & (s = {enum(y,s): y in enum_Ord(s)}) & " +
"(FORALL y in enum_Ord(s), z in enum_Ord(s) | ((y /= z) •imp (enum(y,s) = enum(z,s)))));"],
[]];
for [thm_name,apply_outputs,stat] in trips | apply_outputs /= OM loop
printy(["\ntesting: ",thm_name," ",apply_outputs," ",stat]);
if check_a_skolem_inf(thm_name,stat,[],apply_outputs) = OM then
printy(["********** Skolem inference failed"]);
else
printy(["Skolem inference successful......"]);
end if;
end loop;
end test_check_a_skolem_inf;
procedure test_check_an_apply_inf; -- test of check_an_apply_inf function
def_in_theory := {["product_order", {"ORD1P2_THRYVAR"}], -- symbols defined in various theories (for testing only)
["setformer", {"X_THRYVAR"}],
["finite_tailrecursive_fcn2", {"H_THRYVAR"}],
["setformer2", {"XY_THRYVAR", "X_THRYVAR", "Y_THRYVAR"}],
["one_1_test_2", {"Y2_THRYVAR", "XY_THRYVAR","X2_THRYVAR", "X_THRYVAR", "Y_THRYVAR"}],
["Svm_test_2", {"YP_THRYVAR", "XY_THRYVAR", "XP_THRYVAR", "X_THRYVAR", "Y_THRYVAR"}],
["well_founded_set", {"ORD_THRYVAL", "MINREL_THRYVAR", "ORDEN_THRYVAR"}],
["finite_recursive_fcn", {"H_THRYVAR"}],
["finite_induction", {"M_THRYVAR"}],
["transfinite_member_induction", {"MT_THRYVAR"}],
["wellfounded_recursive_fcn", {"HH", "INDEX", "H_THRYVAR"}],
["ordinal_induction", {"T_THRYVAR"}],
["transfinite_induction", {"MT_THRYVAR"}],
["comprehension", {"TT_THRYVAR"}],
["transfinite_definition_0_params", {"F_THRYVAR"}],
["transfinite_definition_1_params", {"F_THRYVAR"}],
["Svm_test_3", {"YP_THRYVAR", "ZP_THRYVAR", "XY_THRYVAR", "XP_THRYVAR", "Z_THRYVAR", "X_THRYVAR", "Y_THRYVAR"}],
["ordval_fcn", {"RNG_THRYVAL"}],
["Svm_test", {"XY_THRYVAR", "X_THRYVAR", "Y_THRYVAR"}],
["one_1_test", {"XY_THRYVAR", "X_THRYVAR", "Y_THRYVAR"}],
["fcn_symbol", {"XY_THRYVAR", "X_THRYVAR", "Y_THRYVAR"}],
["equivalence_classes", {"EQC_THRYVAR", "F_THRYVAR"}],
["Set_theory", {"ast_enum_tup", "RA_SEQ_0", "ONE_1_MAP", "R_ABS_RECIP", "C_0", "RA_0",
"RF_0", "RECIP", "IS_MAP", "C_RECIP", "R_RECIP", "DD", "CDD", "CRDD", "DOT_RA_GT",
"DOT_RF_GT", "IS_OPEN_C_SET", "RED", "NEXT", "DOT_R_GT", "ORD", "CARD", "CONCAT",
"POS_PART", "DOT_PROD", "4", "INT", "SQRT", "BFINT", "IDENT", "ULEINT", "DOT_MOD",
"ast_nelt", "LINE_INT", "MEMBS_X", "INTERVAL", "ULT_MEMB_1", "RA_EQSEQ", "RA_SEQ_1",
"SHIFTED_SEQ", "C_1", "RA_1", "RA_SEQ", "1", "E", "FINITE", "DOT_R_GE", "CE", "RANGE",
"PI", "RA_CAUCHY", "SI", "ENUM", "SVM", "NORM", "CNORM", "CM", "DOT_INV_IM", "GLB",
"DOT_RAS_OVER", "DOT_C_OVER", "DOT_R_OVER", "ULTRAFILTER", "CDR", "LUB", "CAR",
"DER", "CDER", "CRDER", "FILTER", "DOT_RA_OVER", "2", "FR", "MEMBS_2", "DOT_OVER",
"IS_CD_CURV", "IS_ANALYTIC_CF", "FSIG_INF", "RF", "RBF", "C_REV", "S_REV",
"IS_CONTINUOUS_RF", "RA_REV", "RF_REV", "IS_CONTINUOUS_RENF", "IS_CONTINUOUS_CENF",
"IS_CONTINUOUS_CORF", "CF", "INV", "BL_F", "IS_CONTINUOUS_CF", "SIG_INF", "IS_CONTINUOUS_CRENF",
"ZA", "C_EXP_FCN", "UN", "DOMAIN", "DOT_ON", "DOT_RAS_TIMES", "DOT_RAS_PLUS", "DOT_RAS_MINUS",
"DOT_C_PLUS", "DOT_RA_MINUS", "DOT_F_PLUS", "DOT_RA_TIMES", "DOT_S_PLUS", "FIN_SEQS",
"DOT_C_TIMES", "DOT_C_MINUS", "DOT_F_TIMES", "DOT_F_MINUS", "DOT_CF_MINUS", "DOT_RA_PLUS",
"DOT_S_TIMES", "DOT_S_MINUS", "DOT_MINUS", "ULT_MEMBS", "DOT_TIMES", "3", "C_ABS", "S_ABS",
"SAME_FRAC", "DOT_PLUS", "POW", "IS_NONNEG", "R_IS_NONNEG", "RA_IS_NONNEG", "FR_IS_NONNEG", "AT_", "TILDE_"}]};
quads := [ -- note that the "ordinal_induction" example is not actually at top level
-- ["T17","ordinal_induction",["P(x)->not(Ult_membs(x) •incin x)", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x)-not(Ult_membs(x) •incin x)", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x->not(Ult_membs(x) •incin x)", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x)->not(Ult_membs(x) •incin x))", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P2(x)->not(Ult_membs(x) •incin x)", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(f(x))->not(Ult_membs(x) •incin x)", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x,y)->not(Ult_membs(x) •incin x)", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x,x)->not(Ult_membs(x) •incin x)", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x)->not(Ult_membs(x) •incin x)", "o(x)->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P->not(Ult_membs(x) •incin x)", "o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x)->not(Ult_membs(x) •incin x)", "P(x)->(Ult_membs(x) incs x)"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x)->not(Ult_membs(x) •incin x)"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["o->oo"],"t_thryvar:t"],
-- ["T17","ordinal_induction",["P(x)->not(Ult_membs(x) •incin x)", "oi->oo"],"t_thryvar:t"],
["T343","equivalence_classes",["P(x,y)->Same_frac(x,y)","s->Fr"],"Eqc_thryvar:Ra,f_thryvar:Fr_to_Ra"],
[]];
printy(["Starting tests: "]);
for [next_thm_name,theory_name,apply_params,apply_outputs] in quads | apply_outputs /= OM loop
printy([]);
res := check_an_apply_inf(next_thm_name,theory_name,apply_params,apply_outputs);
if res = OM then printy(["APPLY inference failed"]); else printy(["APPLY inference successful"]); end if;
end loop;
end test_check_an_apply_inf;
end test;
-- ******* invocation stub for standalone use of verifier *******
program test;
use verifier_top_level;
verifier_invoker("1..100"); -- give the range string that would have been used in web invocation
end test;
-- ******* invocation stubs for the library of users ******* also need folder and entry in etnanova_main.php; see comment there
program a_invoke_verifier; -- small program to invoke logic verifier a_invoke_verifier.stl (alberto)
use verifier_top_level; -- use the logic verifier
verifier_invoker("a_folder/"); -- master invocation routine
end a_invoke_verifier;
program b_invoke_verifier; -- small program to invoke logic verifier b_invoke_verifier.stl (Alf - Alfredo)
use verifier_top_level; -- use the logic verifier
verifier_invoker("b_folder/"); -- master invocation routine
end b_invoke_verifier;
program C_invoke_verifier; -- small program to invoke logic verifier c_invoke_verifier.stl (Eu2 - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("c_folder/"); -- master invocation routine
end C_invoke_verifier;
program D_invoke_verifier; -- small program to invoke logic verifier d_invoke_verifier.stl (Eu3 - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("d_folder/"); -- master invocation routine
end D_invoke_verifier;
program e_invoke_verifier; -- small program to invoke logic verifier e_invoke_verifier.stl (eugenio)
use verifier_top_level; -- use the logic verifier
verifier_invoker("e_folder/"); -- master invocation routine
end e_invoke_verifier;
program F_invoke_verifier; -- small program to invoke logic verifier f_invoke_verifier.stl (Eu4 - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("f_folder/"); -- master invocation routine
end F_invoke_verifier;
program g_invoke_verifier; -- small program to invoke logic verifier g_invoke_verifier.stl (Guest)
use verifier_top_level; -- use the logic verifier
verifier_invoker("g_folder/"); -- master invocation routine
end g_invoke_verifier;
program H_invoke_verifier; -- small program to invoke logic verifier h_invoke_verifier.stl (Eu5 - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("h_folder/"); -- master invocation routine
end H_invoke_verifier;
program i_invoke_verifier; -- small program to invoke logic verifier i_invoke_verifier.stl (yo keller - Ait - Alexandru Ioan Tomescu)
use verifier_top_level; -- use the logic verifier
verifier_invoker("i_folder/"); -- master invocation routine
end i_invoke_verifier;
program j_invoke_verifier; -- small program to invoke logic verifier j_invoke_verifier.stl (jack)
use verifier_top_level; -- use the logic verifier
--print("verifier_top_level: "); stop;
verifier_invoker("j_folder/"); -- master invocation routine
end j_invoke_verifier;
program k_invoke_verifier; -- small program to invoke logic verifier k_invoke_verifier.stl (yo keller)
use verifier_top_level; -- use the logic verifier
verifier_invoker("k_folder/"); -- master invocation routine
end k_invoke_verifier;
program L_invoke_verifier; -- small program to invoke logic verifier l_invoke_verifier.stl (Eu6 - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("l_folder/"); -- master invocation routine
end L_invoke_verifier;
program m_invoke_verifier; -- small program to invoke logic verifier m_invoke_verifier.stl (mimo)
use verifier_top_level; -- use the logic verifier
verifier_invoker("m_folder/"); -- master invocation routine
end m_invoke_verifier;
program N_invoke_verifier; -- small program to invoke logic verifier n_invoke_verifier.stl (Eu7 - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("n_folder/"); -- master invocation routine
end N_invoke_verifier;
program O_invoke_verifier; -- small program to invoke logic verifier o_invoke_verifier.stl (Eu8 - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("o_folder/"); -- master invocation routine
end O_invoke_verifier;
program P_invoke_verifier; -- small program to invoke logic verifier p_invoke_verifier.stl (Eu9 - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("p_folder/"); -- master invocation routine
end P_invoke_verifier;
program Q_invoke_verifier; -- small program to invoke logic verifier q_invoke_verifier.stl (EuA - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("q_folder/"); -- master invocation routine
end Q_invoke_verifier;
program r_invoke_verifier; -- small program to invoke logic verifier r_invoke_verifier.stl (Martin)
use verifier_top_level; -- use the logic verifier
verifier_invoker("r_folder/"); -- master invocation routine
end r_invoke_verifier;
program s_invoke_verifier; -- small program to invoke logic verifier s_invoke_verifier.stl (toto)
use verifier_top_level; -- use the logic verifier
verifier_invoker("s_folder/"); -- master invocation routine
end s_invoke_verifier;
program T_invoke_verifier; -- small program to invoke logic verifier t_invoke_verifier.stl (EuB - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("t_folder/"); -- master invocation routine
end T_invoke_verifier;
program U_invoke_verifier; -- small program to invoke logic verifier u_invoke_verifier.stl (EuC - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("u_folder/"); -- master invocation routine
end U_invoke_verifier;
program V_invoke_verifier; -- small program to invoke logic verifier v_invoke_verifier.stl (EuD - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("v_folder/"); -- master invocation routine
end V_invoke_verifier;
program W_invoke_verifier; -- small program to invoke logic verifier w_invoke_verifier.stl (EuE - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("w_folder/"); -- master invocation routine
end W_invoke_verifier;
program X_invoke_verifier; -- small program to invoke logic verifier x_invoke_verifier.stl (EuF - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("x_folder/"); -- master invocation routine
end X_invoke_verifier;
program Y_invoke_verifier; -- small program to invoke logic verifier y_invoke_verifier.stl (EuG - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("y_folder/"); -- master invocation routine
end Y_invoke_verifier;
program Z_invoke_verifier; -- small program to invoke logic verifier z_invoke_verifier.stl (EuH - eugenio spare)
use verifier_top_level; -- use the logic verifier
verifier_invoker("z_folder/"); -- master invocation routine
end Z_invoke_verifier;