-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathPInBench.py
More file actions
1557 lines (1260 loc) · 60.2 KB
/
PInBench.py
File metadata and controls
1557 lines (1260 loc) · 60.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import sys
import glob
import subprocess
import abc
import csv
import pprint
import logging
import time
import os
import argparse
import re
from threading import Timer
class BenchmarkEngine(object):
"""
Overview file as class attribute.
"""
ov_file = None
def __init__(self, directory, extension, inference_engine, log_file_suffix, csv_file_suffix, file_overview_suffix, jar, csv_columns,
info_dict):
self.directory = directory
self.extension = extension
self.inf_eng = inference_engine
self.log_suffix = log_file_suffix
self.csv_suffix = csv_file_suffix
self.file_overview_suffix = file_overview_suffix
self.jar = jar
self.csv_cols = csv_columns
self.info_dict = info_dict
@abc.abstractmethod
def get_start_message(self):
"""
Returns a string describing the chosen framework to be benchmarked
"""
def main_wrapper(self):
"""
Method that wraps the sequence of functions.
"""
# -- 0. Check if framework file is there
self.check_jar()
# -- 1. Prep output files & logging
time_str = time.strftime("%Y_%m_%d-%H_%M")
log_file = time_str + self.log_suffix
csv_file = time_str + self.csv_suffix
overview_file = time_str + self.file_overview_suffix
# Empty list for file prefixes which should be excluded due to timeouts.
self.exclude_file_prefixes = []
# needed for BLOG Engine
# Source: https://stackoverflow.com/a/49202811
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=log_file,
level=logging.DEBUG,
format='%(levelname)s -- %(asctime)s %(message)s',
datefmt='%m/%d/%Y %H:%M:%S')
# Add handler for printing the logs on the console.
logging.getLogger().addHandler(logging.StreamHandler())
logging.info("Script call for reproducibility: > {0}\n".format(" ".join([x for x in sys.argv])))
logging.debug("All settings used:")
for k,v in sorted(vars(args).items()):
logging.debug("{0}: {1}".format(k,v))
#logging.info(self.get_start_message())
#if java_xmx:
# logging.info("'-Xmx16384M' will be used to fix the allocated java heap space.")
logging.info("Initialising output files: [%s] and [%s].", log_file, csv_file)
# Create output csv (with times, probabilities, ...)
output = open(csv_file, 'w')
writer = csv.DictWriter(output, dialect='excel', fieldnames=self.csv_cols, lineterminator='\n', delimiter=",")
writer.writeheader()
# Create output log (for file overview)
self.ov_file = open(overview_file, 'w')
# -- 2. Find model files and iterate over them
files = self.find_files_in_dir()
curr_setting_string = self.extract_setting_string(files[0])
self.subsequent_fail_counter = 0
for j, fileI in enumerate(files):
if self.extract_setting_string(fileI) != curr_setting_string:
self.subsequent_fail_counter = 0
curr_setting_string = self.extract_setting_string(fileI)
logging.info("### File no. %d/%d - '%s'", j+1, len(files), fileI)
if any([fileI.startswith(ef) for ef in self.exclude_file_prefixes]):
logging.info(" Skipping file because of previous timeout / (memory) error for same but smaller model.")
self.write_status_to_overview(fileI, "timeout or error skip")
else:
self.status_counts = {'success':0, 'errors':0, 'timeouts':0}
self.handle_file(fileI, writer)
output.flush()
self.handle_file_status_counters(fileI)
logging.info(">>> Finished - all work done!")
output.close()
self.ov_file.close()
self.cleanup()
self.finish_routine()
def check_jar(self):
"""
Aborts the script in case the needed jar / executable file cannot be found.
"""
if not os.path.exists(self.jar):
sys.exit("Did not find the needed executable / jar file. Should be named '{}'.".format(self.jar))
def finish_routine(self):
"""
Method that is called at the very end of a run (i.e., after all work is done.)
"""
try:
from Minibot.minibot import MiniBot
except:
print("No final Telegram message sent. (Could not import MiniBot Telegram)")
else:
mb = MiniBot(False)
msg = "Current PInBench run finished!\n"\
"Directory <code>{}</code>".format(self.directory)
mb.send_message(msg)
def extract_setting_string(self, filename):
return filename.split("#")[0]
def handle_file_status_counters(self, filename):
"""
Does all the status counter handling for a file.
1.) Checks if the file's status stats are considered a "fail":
-> When more timeouts and errors than successes.
2.) Checks if two subsequent files have been "fails"
-> Adds the setting string to the exclusion list.
Arguments:
filename {string} -- current file name
"""
logging.info(" >> File summary: success: {}, timeouts: {}, errors: {}".format(self.status_counts['success'], self.status_counts['timeouts'], self.status_counts['errors']))
if (self.status_counts['success'] < self.status_counts['timeouts'] + self.status_counts['errors']):
self.subsequent_fail_counter += 1
if self.subsequent_fail_counter == 2 and timeout_skip:
current_setting_string = self.extract_setting_string(filename) + ("#" if "#" in filename else "")
self.add_prefix_to_exclusion_list(current_setting_string)
self.subsequent_fail_counter = 0
else:
self.subsequent_fail_counter = 0
def add_prefix_to_exclusion_list(self, prefix):
logging.info(" Adding settings prefix '{}' to be excluded for future files.".format(prefix))
self.exclude_file_prefixes.append(prefix)
def write_status_to_overview(self, filename, status, query=""):
if query != "":
message = '{}, "{}", {}\n'.format(filename, query, status)
else:
if combine_queries:
message = "{}, {}\n".format(filename, status)
else:
message = '{}, "{}", {}\n'.format(filename, "all queries", status)
self.ov_file.write(message)
self.ov_file.flush()
def find_files_in_dir(self):
"""
Method that finds all model files with the given extension in the given directory
"""
files = glob.glob(self.directory + "/*." + self.extension)
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
try:
sorted_files = natural_sort(files)
except:
sorted_files = files
logging.info(str.format("Found {} {} file(s) in the selected directory: '{}'", len(sorted_files), self.extension,
self.directory))
return sorted_files
@abc.abstractmethod
def extract_queries(self, file_string, filename, engine):
"""
Method that does query processing. Implemented in the subclasses
"""
@abc.abstractmethod
def extract_information(self, cmd_output):
"""
Method that extracts needed information from the command line output. Implemented in each subclass.
"""
@abc.abstractmethod
def cleanup(self):
"""
Method that implements subclass specific cleanup tasks (e.g. deletes temporary files, ...)
"""
@abc.abstractmethod
def handle_file(self, filename, writer_obj):
"""
Method that needs to be implemented in concrete subclasses.
Handles the control whether the queries are done en bloc or one by one.
Depending on variable 'combine_queries' (which is set according to the script argument).
If one of the modes is not yet implemented, we create a overwrite the corresponding
methods (handle_single_queries or handle_combined_queries) and abort there.
:param filename: filename of the model file to be handled.
:param writer_obj: writer_obj to write the extracted information to
"""
if combine_queries:
# Mode: Combine queries
with open(filename, 'r') as f:
file_string = f.read()
queries = self.extract_queries(file_string, filename, self)
infos = self.handle_combined_queries(queries, filename)
for info in infos:
if self.info_check_ok(info):
writer_obj.writerow(info)
else:
logging.error("Info not okay (contains -1 value)")
else:
self.handle_single_queries(filename, writer_obj)
def handle_single_queries(self, filename, writer_obj):
"""
Standard wrapper method that handles information extraction for one file (e.g. splits up queries,
performs console calls, ...) and saves the info to the writer_obj via writerow(info_dict)
"""
with open(filename, 'r') as f:
model_string = f.read()
query_objects = self.extract_queries(model_string, filename, self)
pp = pprint.PrettyPrinter(depth=6)
# 3.) For each query: execute the forclift jar. Collect inference parameter information.
for i, query in enumerate(query_objects):
logging.info(" -- Query no. {}/{}: '{}'".format(i + 1, len(query_objects), query))
std_out, std_err = query.execute()
if std_err == "<Timeout>":
logging.warning("Timeout in script execution.")
self.write_status_to_overview(filename, "timeout", query=query.queryString)
self.status_counts['timeouts'] += 1
elif self.check_for_error(std_out, std_err):
try:
infos = self.extract_information(std_out)
except Exception as e:
logging.error("Uncaught exception in information extraction:")
logging.error(e)
logging.error("std_out: "+std_out)
logging.error("std_err: "+std_err)
infos = self.info_dict.copy()
infos['filename'] = str(filename)
infos['query'] = str(query)
if verbose:
print("> Extracted Information")
pp.pprint(infos)
if self.info_check_ok(infos):
self.write_status_to_overview(filename, "all ok", query=query.queryString)
self.status_counts['success'] += 1
writer_obj.writerow(infos)
else:
self.write_status_to_overview(filename, "error (info extraction)", query=query.queryString)
self.status_counts['errors'] += 1
else:
self.write_status_to_overview(filename, self.determine_error_type(std_err), query=query.queryString)
logging.error("Error in script execution. Output:\n" + self.get_error_message(std_out, std_err).replace("\r\n\tat", "\n"))
self.status_counts['errors'] += 1
def handle_combined_queries(self, queries, filename):
"""
Standard method that does the inference with combined queries.
:param queries: List of queries
:param filename: filename String
:return: list of info dicts containing the extracted information
"""
comp_string = ""
for query in queries:
# compose query string
comp_string = ",".join([query.queryString for query in queries])
logging.info(" -- Executing combined query string '{}' ".format(comp_string))
std_out, std_err = self.execute_combined_query(comp_string,filename)
if std_err == "<Timeout>":
logging.warning("Timeout in script execution.")
self.write_status_to_overview(filename, "timeout", query=query.queryString)
self.status_counts['timeouts'] += 1
return []
elif self.check_for_error(std_out, std_err):
# TODO: Create one info dict for each subquery
try:
infos = self.extract_information(std_out)
except Exception as e:
logging.error("Uncaught exception in information extraction:")
logging.error(e)
logging.error("std_out: "+std_out)
logging.error("std_err: "+std_err)
infos = self.info_dict.copy()
infos['filename'] = str(filename)
infos['query'] = str(comp_string)
if self.info_check_ok(infos):
self.write_status_to_overview(filename, "all ok")
self.status_counts['success'] += 1
else:
self.write_status_to_overview(filename, "error (info extraction)", query=query.queryString)
self.status_counts['errors'] += 1
if verbose:
pp = pprint.PrettyPrinter(depth=6)
pp.pprint(infos)
return [infos]
else:
self.write_status_to_overview(filename, self.determine_error_type(std_err))
logging.error("Error in script execution. Output:\n" + self.get_error_message(std_out, std_err).replace("\r\n\tat", "\n"))
self.status_counts['errors'] += 1
return []
def info_check_ok(self, info_dict):
"""
Checks a info dictionary if information extraction has worked as expected (i.e. none of the values are -1 - as initially set).
Returns True if all if all is okay, False otherwise.
"""
for key in info_dict:
if info_dict[key] == -1:
logging.error("-1 value in info_dict with key: '{}'".format(key))
if verbose:
logging.error("Complete info_dict: {}".format(info_dict))
return False
return True
@abc.abstractmethod
def execute_combined_query(self, comp_string, filename):
"""
Executes a combined query, represented by the comp_string as composed query string
in the given file.
"""
def determine_error_type(self, std_err):
if "NegativeArraySizeException" in std_err:
return "NegativeArraySizeException"
elif "ArrayIndexOutOfBoundsException" in std_err:
return "ArrayIndexOutOfBoundsException"
elif "Java heap space" in std_err:
return "OutOfMemoryError:JavaHeapSpace"
else:
return "unspecified error"
@abc.abstractmethod
def check_for_error(self, std_out, std_err):
"""
Checks `std_out` and `std_err` if an error occurs. This might vary between frameworks (i.e. some might not
use `std_err`).
Return:
True if all is good (= no error)
False if error occured.
"""
def get_error_message(self, std_out, std_err):
"""
Extract error message from `std_out` and `std_err`.
Return:
Error message
"""
return std_err
class ForcliftEngine(BenchmarkEngine):
def __init__(self, directory):
# Class config
inference_engine = ""
extension = "mln"
log_suffix = "_forclift.log"
csv_suffix = '_forclift_times.csv'
ov_suffix = '_forclift_overview.log'
jar = 'forclift-3.1.jar'
if combine_queries:
csv_cols = ['filename', 'query', 'P(query)', 't_inference']
info_dict = {'filename': '', 'query': '', 'P(query)': -1, 't_inference': -1}
else:
csv_cols = ['filename', 'query', 't_evidence', 't_query', 't_inference', 'ev_nnf_size',
'ev_smooth_nnf_size', 'q_nnf_size', 'q_smooth_nnf_size', 'Z', 'Z(query)',
'P(query)']
info_dict = {'filename': '', 'query': '', 't_evidence': -1, 't_query': -1, 't_inference': -1,
'ev_nnf_size': -1, 'ev_smooth_nnf_size': -1, 'q_nnf_size': -1, 'q_smooth_nnf_size': -1,
'Z': -1, 'Z(query)': -1, 'P(query)': -1}
super(ForcliftEngine, self).__init__(directory=directory,
extension=extension,
inference_engine=inference_engine,
log_file_suffix=log_suffix,
csv_file_suffix=csv_suffix,
file_overview_suffix = ov_suffix,
jar=jar,
csv_columns=csv_cols,
info_dict=info_dict)
def get_start_message(self):
return "Running benchmark on framework: [Forclift]"
def handle_file(self, filename, writer_obj):
if combine_queries:
self.handle_margs_file(filename, writer_obj)
else:
super(ForcliftEngine, self).handle_single_queries(filename, writer_obj)
def extract_information(self, fl_str):
"""
Function that extracts relevant information from the forclift output string.
Returns a dictionary containing the relevant information
"""
info_dict = self.info_dict.copy()
try:
lines = fl_str[fl_str.index("[t_evidence]"):].split("\n")
except:
logging.warning("[t_evidence] not found in return string. \
Skipping information extraction.")
else:
for line in [line for line in lines if line != ""]:
key = line[line.find("[") + 1:line.find("]")]
value = line[line.find("{") + 1:line.find("}")]
info_dict[key] = value
return info_dict
def extract_queries(self, file_string, filename, engine):
"""
Function that cleans up a list of queries
(removes comment-double-slashes ('//') and empty strings).
"""
# Reduce fileString to relevant lines
query_lines = file_string.split("[Queries]")[1].split("\n")[3:]
# Extract raw query strings from query lines
query_strings = [query[3:].strip() for query in query_lines if query.startswith("//")]
query_objects = [ForcliftQuery(query_string, filename, engine) for query_string in query_strings]
return query_objects
def cleanup(self):
# no cleanup necessary
pass
def handle_margs_file(self, filename, writer_obj):
"""
Wrapper function for sequence of actions, if margs argument is present.
1. Executes inference (on the ``filename`` file, no query needed, but with ``--margs`` argument! )
2. Extracts information
3. Writes extracted information to csv (via ``writer_obj.writerow(dict)``
:param filename: filename of model file to use inference on
:param writer_obj: writer object to write the extracted information
"""
cmd = ['java', '-jar', self.jar, '--margs', filename]
std_out, std_err = Query.run(cmd)
if self.check_for_error(std_out, std_err):
dicts = self.extract_margs_info(std_out)
for dict in dicts:
dict['filename'] = filename
writer_obj.writerow(dict)
self.write_status_to_overview(filename, "all ok")
self.status_counts['success'] += 1
else:
self.write_status_to_overview(filename, self.determine_error_type(std_err))
self.status_counts['errors'] += 1
logging.error("Error in script execution. Output:\n" + std_err.replace("\r\n\tat", "\n"))
def check_for_error(self, std_out, std_err):
return std_err == ""
def extract_margs_info(self, output):
"""
Method that extracts information in allmargs mode.
Possible output::
Partition function is exp(-2.798187775203149E7)
Building marginal query class for Att(X)
Probability for class of queries Att(X) is exp(-422.95578300207853)
Building marginal query class for Res(X)
Probability for class of queries Res(X) is exp(-0.4700036309659481)
[...]
done
[t_inference] {39140}
Extracts the query strings and the corresponding probabilities, as well as the total time needed.
:param output: output string from console forclift call
:return: list of dictionaries containing the relevant information (see above)
"""
dicts = []
query_number = output.count("Probability for class of queries")
lines = output.split("\n")
start, end, t_inference = 0, 0, 0
start_found = False
for i, line in enumerate(lines):
if "Building marginal query class" in line and not start_found:
start = i
start_found = True
elif "Probability for class of queries" in line:
end = i
if "[t_inference]" in line:
t_inference = line.split("{")[1].strip()[:-1]
prob_lines = [line.strip() for line in lines[start:end+1]]
for i in range(0, len(prob_lines), 2):
query = prob_lines[i].split("Building marginal query class for ")[1].strip()
prob = prob_lines[i+1].split("Probability for class of queries "+query+" is ")[1]
dicts.insert(0, self.info_dict.copy())
dicts[0]['t_inference'] = t_inference
dicts[0]['query'] = query
dicts[0]['P(query)'] = prob
return dicts
class AlchemyEngine(BenchmarkEngine):
def get_start_message(self):
return "Running benchmark on framework: [Alchemy]"
def __init__(self, directory, engine, maxSampleSteps=None):
# Class config
inference_engine = engine
extension = "mln"
log_suffix = "_alchemy.log"
csv_suffix = '_alchemy_times.csv'
ov_suffix = '_alchemy_overview.log'
jar = './Alchemy_liftedinfer'
self.max_sample_steps = maxSampleSteps
csv_cols = ['filename', 'query', 'Z', 'probs', 'maxSteps', 'total time', 'millisecs']
info_dict = {'filename': '', 'query': '', 'Z': -1,'probs':'', 'maxSteps':-1, 'total time': -1, 'millisecs':-1}
super(AlchemyEngine, self).__init__(directory=directory,
extension=extension,
inference_engine=inference_engine,
log_file_suffix=log_suffix,
csv_file_suffix=csv_suffix,
file_overview_suffix = ov_suffix,
jar=jar,
csv_columns=csv_cols,
info_dict=info_dict)
def extract_information(self, output):
info_dict = self.info_dict.copy()
if "Exact Z =" in output:
# Extraction of exact inference values (engine: ptpe)
"""
Output format like:
[...]
done writing dump files (mlndump.dat,symbolsdump.dat)
Exact Z = Starting Exact Lifted Model Counting
(Actual) 2.08035e+15
[total time] : {0.06 secs}
[millisecs] : {73}
"""
lines = output[output.find("Exact Z ="):].split("\n")
for i, line in enumerate(lines):
if str(line).startswith("Exact Z ="):
info_dict['Z'] = lines[i+1].strip()
break
elif "Z-curr =" in output:
# Extraction of sampling-based (approximate) inference values (with engine lis)
"""
Output format like:
[...]
iteration 975
Z-curr = (Actual) 9.59097e+10
cumulative-Z = (Actual) 1.31424e+14
Updating Distributions...
iteration 1000
Z-curr = (Actual) 9.59097e+10
cumulative-Z = (Actual) 1.34819e+14>
[total time] : {0.44 secs}
[millisecs] : {455}
"""
lines = output[output.rfind("Z-curr ="):].split("\n")
for i, line in enumerate(lines):
if str(line).startswith("Z-curr ="):
info_dict['Z'] = line.replace("Z-curr = (Actual)", "").strip()
break
elif "LBG Sampling Process" in output:
# Extraction of sampling-based (approximate) inference values (with engine lvg)
"""
Output format like:
[...]
LBG Sampling Process::Sampling from cluster level 0,iter=950
LBG Sampling Process::Sampling from cluster level 0,iter=975
LBG Sampling Process::Sampling from cluster level 0,iter=1000
Sampling Process exiting
[total time] : {0.84 secs}
[millisecs] : {866}
"""
lines = output[output.rfind("Sampling Process exiting"):].split("\n")
info_dict['Z']=-2 # no Z to extract available
else:
logging.warning("'Exact Z =' or 'Z-curr =' not found in return string. \
Skipping information extraction.")
lines = []
for line in [line for line in lines if line != "" and all(x in line for x in ["[", "]", "{", "}"])]:
key = line[line.find("[") + 1:line.find("]")]
value = line[line.find("{") + 1:line.find("}")]
info_dict[key] = value
info_dict['probs'] = self.extract_probs_from_file()
info_dict['maxSteps'] = self.max_sample_steps
return info_dict
def check_for_error(self, std_out, std_err):
return (std_out.find("error") == -1) and ("failed" not in std_err)
def get_error_message(self, std_out, std_err):
return " > std_out:\n{}\n\n > std_err:\n{}".format(std_out, std_err)
def execute_combined_query(self, comp_str, filename):
"""
Method that executes a combined query string.
:param comp_str: combined (composed) query string to be executed
:return: Tuple (std_out, std_err) from cmd call.
"""
# Only continue if OS is linux.
if 'linux' not in sys.platform:
sys.exit('Stopped benchmark. Alchemy benchmark is just supported on Linux not this OS.')
cmd = [self.jar, "-" + self.inf_eng, 'true', '-q', comp_str, '-i', filename]
return Query.run(cmd)
def extract_probs_from_file(self):
"""
Function for information extraction (probabilities) from the alchemy result.dat file
:return: a string with all queries' probabilities in the form: '{Query(A):0.123;Query(B):0.1}'
"""
with open("result.dat", 'r') as f:
lines = [line.strip().replace(" ", ":") for line in f if line.strip() != ""]
return "{" + ";".join(lines) +"}"
def extract_queries(self, file_string, filename, engine):
"""
Function that pulls queries out of a given file and cleans them up
(removes comment-double-slashes ('//') and empty strings).
:param file_string: content of the file
:param filename: filename
:param engine: engine object (needed for passing on)
:return: a list of query objects
"""
# Reduce fileString to relevant lines
query_lines = file_string.split("[Queries]")[1].split("\n")[3:]
# Extract raw query strings from query lines
query_strings = [query[3:].strip() for query in query_lines if query.startswith("//")]
query_objects = [AlchemyQuery(query_string, filename, engine) for query_string in query_strings]
return query_objects
def cleanup(self):
# Remove files if present:
files = ['mlndump.dat', 'result.dat', 'symbolsdump.dat']
for f in files:
if os.path.exists(f):
os.remove(f)
class GCFoveEngine(BenchmarkEngine):
def __init__(self, directory, inference_engine):
# Class config
extension = "blog"
inf_eng_str = "LVE" if (inference_engine == "fove.LiftedVarElim") else "VE"
log_suffix = "_gcfove_"+inf_eng_str+".log"
csv_suffix = '_gcfove_'+inf_eng_str+'_times.csv'
ov_suffix = '_gcfove_'+inf_eng_str+'_overview.log'
jar = 'gcfove.jar'
csv_cols = ['filename', 'query', 'P(query)', 'time']
info_dict = {'filename': '', 'query': '', 'P(query)':-1, 'time': -1}
super(GCFoveEngine, self).__init__(directory=directory,
extension=extension,
inference_engine=inference_engine,
log_file_suffix=log_suffix,
csv_file_suffix=csv_suffix,
file_overview_suffix = ov_suffix,
jar=jar,
csv_columns=csv_cols,
info_dict=info_dict)
def get_start_message(self):
return "Running benchmark on framework: [GCFove]\nInference Engine: [{}]".format(self.inf_eng)
def handle_combined_queries(self, queries, filename):
# This function is called out of the handle_file() function, queries are already extracted (via extract_queries() )and
# forwarded as an argument.
std_out, std_err = self.execute_combined_query("", filename)
if std_err == "<Timeout>":
logging.warning("Timeout in script execution.")
self.write_status_to_overview(filename, "timeout")
self.status_counts['timeouts'] += 1
return []
elif self.check_for_error(std_out, std_err):
infos = self.extract_cq_information(queries, std_out)
for dic in infos:
dic["filename"] = filename
# We need to return a list of info dicts
self.write_status_to_overview(filename, "all ok")
self.status_counts['success'] += 1
return infos
else:
self.write_status_to_overview(filename, self.determine_error_type(std_err))
logging.error("Error in script execution. Output:\n" + self.get_error_message(std_out, std_err).replace("\r\n\tat", "\n"))
self.status_counts['errors'] += 1
return[]
def execute_combined_query(self, comp_string, filename):
cmd = ['java', '-jar', self.jar, '-e', self.inf_eng, filename]
if java_xmx:
cmd.insert(2, '-Xmx16384M')
# TODO: insert passthrough args? problem: Function is defined in Query class.
#cmd = self.insert_passthrough_args(cmd, passThroughArgs)
if passThroughArgs != "":
raise Exception("Passing arguments through not yet implemented in GCFOVE combined query mode.")
return Query.run(cmd)
def check_for_error(self, std_out, std_err):
return std_err == ""
def extract_information(self, output):
"""
Function that extracts relevant information from the GCFOVE output string.
Returns a dictionary containing the relevant information
"""
info_dict = self.info_dict.copy()
# Extract substring between '**TIME**' and next occurence of linebreak ('\n')
if "**TIME**" in output:
time = output[output.find("**TIME**") + len("**TIME**"):
output.find("\n", output.find("**TIME**"))].strip()
else:
time = -1
logging.warning("Output from jar execution not in standard format (missing '**TIME**').")
info_dict['time'] = time
probs = self.extract_probabilities(output)
if len(probs) != 1:
logging.warning("More than 1 probability output found when not in combined query mode for GCFOVE.")
info_dict['P(query)']=-1
else:
info_dict['P(query)'] = probs[0][1]
return info_dict
def extract_cq_information(self, queries, std_out):
"""
Extracts the information out of a combined query GCFOVE call and returns a list
of info dicts.
Takes into account whether the chosen engine is 'fove.LiftedVarElim' or 've.VarElimEngine'.
The first returns 1 time per query, the latter 1 overall time.
"""
# 1. extracting and storing times in times list.
times = []
if "**TIME**" in std_out:
lines = std_out.splitlines()
for line in lines:
if line.startswith("**TIME**"):
val = line[len("**TIME**"):].strip()
times.append(val)
else:
times.append(-1)
logging.warning("Output from jar execution not in standard format (missing '**TIME**').")
probs = [tuple[1] for tuple in self.extract_probabilities(std_out)]
# Write collected information to dicts.
ret_dicts = []
for i,q in enumerate(queries):
tmp_dict = self.info_dict.copy()
tmp_dict["query"] = q.queryString
tmp_dict["P(query)"] = probs[i]
tmp_dict["time"] = times[i if len(times)>1 else 0]
ret_dicts.append(tmp_dict)
return ret_dicts
def handle_single_query_prob(self, split_output):
"""
Handles the probability lines of a single query, e.g.
`Distribution of values for Att(x1)`
`0.9422090158242775 false`
`0.05779098417572252 true`
or
`Distribution of values for TestQ(123)`
`1 false`
or
`0.9997378024340892 false`
`2.621975659107907E-4 true` // [note the scientific notation]
Returns:
A tuple of (Query, true_probability) of type (string, float)
"""
lines = split_output.split("\n")
if len(lines) not in [2,3]:
logging.warning("Unexpected number of lines in probability extraction per query, expected 2 or 3, was {}: \n{}".format(len(lines), "\n".join(lines)))
return (lines[0], float(-2))
#raise Exception("Unexpected number of lines in probability extraction per query, expected 2 or 3, was {}.".format(len(lines)))
query = lines[0]
probs = lines[1:]
found = False
for prob in probs:
if prob.find("true") != -1:
#true_prob = re.findall(r"[-+]?\d*\.\d+|\d+|NaN", prob)[0]
true_prob = re.findall(r"-?\d+(?:\.\d+)?(?:E-?\d+)|NaN|-?\d+(?:\.\d+)?", prob)[0]
found = True
break
true_prob = true_prob if found else 0
return (query, float(true_prob))
def extract_probabilities(self, output):
"""
Extracts the calculated probabilities from the generated console output.
Returns:
A list of (Query, Probability) tuples of type (string, float)
"""
query_sep = "======== Query Results ========="
if output.find(query_sep) == -1:
logging.error("Query separator not found in console output.")
print("Output: \n"+output)
#raise Exception("Query separator not found.")
return ("Q","error")
else:
txt = output.split(query_sep)[1]
# Split in query parts
split = txt.split("Distribution of values for ")[1:]
l = [self.handle_single_query_prob(s.strip()) for s in split]
return l
def extract_queries(self, file_string, filename, engine):
"""
Function that extracts a list of queries from a BLOG-file.
Returns query objects (not strings).
"""
lines = [line.strip() for line in file_string.split("\n") if "query" in line]
queries = [GCFoveQuery(line, filename, engine) for line in lines]
return queries
def cleanup(self):
# no cleanup necessary
pass
class JTEngine(GCFoveEngine):
def __init__(self, directory, inference_engine):
extension = "blog"
inf_eng_str_dict = {"fove.LiftedVarElim": "LVE",
"ve.VarElimEngine": "VE",
"fojt.LiftedJTEngine": "LJT",
"jt.JTEngine": "JT"}
inf_eng_str = inf_eng_str_dict[inference_engine]
self.inf_eng_str = inf_eng_str
self.base_engine = "JT" if inf_eng_str in ['LJT', 'JT'] else "VE"
log_suffix = "_JT_" + inf_eng_str + ".log"
csv_suffix = '_JT_'+inf_eng_str+'_times.csv'
ov_suffix = '_JT_'+inf_eng_str+'_overview.log'
jar = 'fojt.jar'
csv_cols = ['filename', 'query', 'P(query)', '|gr|', '|G|', '|E|', '|Q|', 'width', 'size', 'mem', 'VE_ops', 't_0', 't_1', 't_2', 't_queries', 'time']
info_dict = {'filename': '', 'query': '', 'P(query)':-1, 'time': -1, '|gr|':-1, '|G|':-1, '|E|':-1, '|Q|':-1, 'width':-1, 'size':-1,
'mem': -1, 'VE_ops':'', 't_0': -1, 't_1': -1, 't_2': -1, 't_queries': '', 'time': -1}
if inf_eng_str in ['VE', 'LVE']:
del_cols = ['size', 'width', 't_0', 't_1', 't_2']
for c in del_cols:
info_dict[c] = -2
print(">>>> ", info_dict)
#super(JTEngine, self).__init__(directory=directory,
super(GCFoveEngine, self).__init__(directory=directory,
extension=extension,
inference_engine=inference_engine,
log_file_suffix=log_suffix,
csv_file_suffix=csv_suffix,
file_overview_suffix = ov_suffix,
jar=jar,
csv_columns=csv_cols,
info_dict=info_dict)
def get_start_message(self):
return "Running benchmark on framework: [JT]\nInference Engine: [{}]".format(self.inf_eng)
def check_for_error(self, std_out, std_err):
"""
Checks for errors in script execution. Returns TRUE if no error occured (all is ok).
Namely, the following conditions are met:
* `std_err` is empty
* no "mem error" in `std_out`
"""
return super(JTEngine, self).check_for_error(std_out, std_err) and ("mem error" not in std_out)
def get_error_message(self, std_out, std_err):
return " > std_out:\n{}\n\n > std_err:\n{}".format(std_out, std_err)
def extract_cq_information(self, queries, std_out):
"""
Extracts the information in the combined query modes.
Returns a list of info dicts (1 per query).
"""
prob_output, info_output, time_output = self.split_output_prob_info_time(std_out)
probs = self.extract_probabilities(prob_output)
#info_dict['query'] = " ,".join([q.queryString for q in queries])
time_dict = self.dict_from_time_output(time_output)
time_dict['time'] = time_dict['t_total']
ret = []
for i, q in enumerate(queries):
loc_dict = self.info_dict.copy()
self.extract_from_info_output(loc_dict, info_output)
loc_dict['query'] = q.queryString
loc_dict['P(query)'] = probs[i][1]
time_copy_keys = ['time', 't_0', 't_1', 't_2', 'VE_ops', 'mem'] if self.base_engine == "JT" else ['time', 'VE_ops', 'mem']
for key in time_copy_keys:
loc_dict[key] = time_dict[key]
t_index = i+2 if self.base_engine == "JT" else i
if 't_{}'.format(t_index) in time_dict:
loc_dict['t_queries'] = time_dict['t_{}'.format(t_index)]