-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
54 lines (41 loc) · 1.27 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from FLTLf.parser import LTLfParser
from FLTLf.converter import Converter
from FLTLf import core
import input
import traceback
import torch
# preliminary log manipulation and padding
converter: Converter = Converter(input.predicate_names,input.traces)
# prepares the log, also slicing out predicates not in the formula
core.tensor_log = converter.log2tensor(input.formula,verbose=False)
# number of log traces
core.batch_size = converter.batch_size
# length of longest trace
core.maxlength = converter.maxlength
# debug (see main.py)
core.debug = input.debug
# Parsing into a formula
parser = LTLfParser()
try:
pyformula = parser(input.formula)
# Instant i
i = input.i
#print("")
#print(f"Log max legnth is {converter.maxlength}")
print(f"Evaluation of {pyformula.print()} at instant {i} :")
#print(pyformula.eval(i))
#print("")
#print("0")
#print(core.tensor_log)
visitor = core.Visitor()
visitor.visit(pyformula, i)
print("Result:")
#as big as the original tensor
#print(visitor.data)
#a 1-dimension tensor, for each trace
print(visitor.result)
#the old evaluation
if(input.debug):
print(f"{pyformula.eval(i)} - old code")
except Exception as e:
print(traceback.format_exc())