diff --git a/verifier/compare_template.html b/verifier/compare_template.html
deleted file mode 100644
index c627c056..00000000
--- a/verifier/compare_template.html
+++ /dev/null
@@ -1,281 +0,0 @@
-
-
-
- Heatmap in JavaScript
-
-
-
-
-
-
-
-
-
-
-
-
-
- Compare test results for $test_type
-
-
Instances for test $test_type. Pick 2!
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/verifier/compare_template.py b/verifier/compare_template.py
deleted file mode 100644
index e08d27ab..00000000
--- a/verifier/compare_template.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Templates for creating comparison pages in verifier
-import glob
-import json
-import logging
-import logging.config
-from string import Template
-import sys
-
-class compareTemplate():
- def __init__(self):
- logging.config.fileConfig("../logging.conf")
-
- # Read the template data
- compare_template = ''
- filename = 'compare_template.html'
- try:
- template_file = open(filename, mode='r')
- compare_template = template_file.read()
- template_file.close()
- except:
- logging.error('Cannot open compare_template %s', filename)
-
- self.html_template = Template(compare_template)
-
- # Template for picking tests - will be replaced by html generated in detail_template.html
- self.checkbox_test_template = Template(
- ''
- )
-
- def reportOutline(self):
- return self.html_template
diff --git a/verifier/summary_template.html b/verifier/summary_template.html
index d27a9487..ee585314 100644
--- a/verifier/summary_template.html
+++ b/verifier/summary_template.html
@@ -239,19 +239,8 @@
let background_color;
for (const test_type of test_types) {
tr = table.insertRow();
-
- test_type_td = tr.insertCell();
-
- // Create a link to the page to compare
- const a_ref = document.createElement('a');
- const link = document.createTextNode('Compare ' + test_type);
- a_ref.title = "Compare " + test_type;
- test_type.innerHTML = test_type;
- // Assemble the data for the instances
- let compare_list = [];
- let test_names = [];
-
-
+ td = tr.insertCell();
+ td.innerHTML = test_type
const tests = exec_summary_json[test_type];
for (const exec of executed_platforms) {
@@ -318,7 +307,6 @@
details.push(link);
}
}
-
td = tr.insertCell();
if (reports.length > 0) {
// Create the data for the reports
@@ -356,13 +344,7 @@
}
td = tr.insertCell();
td.innerHTML = details.join('');
- }
- // Set the compare link for this test type.
- const compare_link_text = "compare_" + test_type + ".html";
- test_type_td.innerHTML = "" + test_type + "";
-
-
+ }
}
}
diff --git a/verifier/testreport.py b/verifier/testreport.py
index b1045115..6cd345d5 100644
--- a/verifier/testreport.py
+++ b/verifier/testreport.py
@@ -2,7 +2,6 @@
# TODO: get templates from this module instead of local class
from report_template import reportTemplate
-from compare_template import compareTemplate
# For identifying test cases that are known problems
from check_known_issues import check_issues
@@ -120,24 +119,6 @@ def __init__(self, report_path, report_html_path):
self.unsupported_cases = []
self.known_issues = []
- # Find directories for comparison options
- # Same component in other platforms, same ICU version
- # Same component, same platform, other versions
- # Get top level with testOutput, platforms, and test type
- dir_name = os.path.dirname(report_path)
- icu_version = os.path.basename(os.path.dirname(dir_name))
- platform_path = os.path.dirname(os.path.dirname(dir_name))
- platform_name = os.path.basename(platform_path)
- top_dir_name = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(dir_name))))
- test_type = os.path.basename(dir_name)
-
- # Get same in
- path_all_platforms_same_icu = os.path.join(top_dir_name, 'testOutput', '*', icu_version, test_type + '_test_file.json')
- other_platforms_same_icu = glob.glob(path_all_platforms_same_icu)
- path_platform_all_icu = os.path.join(platform_path, '*', test_type)
- same_platform_all_icu = glob.glob(path_platform_all_icu)
- # Next, store these paths, subsituting 'testReport' for 'testData'
-
self.templates = templates = reportTemplate()
# For a simple template replacement
@@ -1265,7 +1246,6 @@ def summarize_reports(self):
'unsupported_count': len(test_json['unsupported']),
'known_issue_count': int(test_json['known_issue_count']),
'missing_verify_count': len(test_json['missing_verify_data']),
- 'result_dir_path': dir_path,
'json_file_name': filename,
'html_file_name': relative_html_path, # Relative to the report base
'version': platform,
@@ -1409,84 +1389,3 @@ def publish_results(self):
# Update summary HTML page with data on latest verification
# TODO: keep history of changes
return
-
-
-class CompareReport():
- # Set up page for comparing results for a given test type,
- # across different platforms, platform versions, and icu data versions
- def __init__(self, file_base, test_type):
- self.file_base = file_base
- self.test_type = test_type
- self.report_dir_name = 'testReports'
- self.output_name = 'compare_%s.html' % test_type
-
- self.compare_html_path = os.path.join(file_base,
- self.report_dir_name,
- self.output_name)
-
- self.templates = compareTemplate()
- self.html_map = {
- 'test_type': test_type
- }
-
- # More standard items for an instance
-
- def get_json_files(self):
- # For each executor directory in testReports,
- # Get each json report file
- report_dir_base = os.path.join(self.file_base, self.report_dir_name)
- version_join = os.path.join(report_dir_base, '*', '*')
- self.version_directories = glob.glob(version_join)
-
- test_type_raw_join = os.path.join(version_join, self.test_type)
- raw_reports = glob.glob(test_type_raw_join)
- self.raw_reports = raw_reports
- self.raw_reports.sort()
-
- logging.info('SUMMARY JSON RAW FILES = %s', self.raw_reports)
-
- # TODO: Get the values for these to add to template
- try:
- common_path = os.path.commonpath(self.raw_reports)
- except:
- logging.error('testreport.py: No raw reports for test type %s in this join: %s',
- self.test_type, test_type_raw_join)
- return None
-
- data_dirs = [x.replace(common_path, '.') for x in self.raw_reports]
- self.html_map['data_dirs'] = data_dirs
-
- # for each, get the platform, version, and icu_version
- test_names = []
- # Include the names parts [3] .. [-3]
- for dir in self.raw_reports:
- parts = dir.split('/')
- test_id = ' '.join(parts[3:-1])
- test_names.append(test_id)
- self.html_map['test_names'] = test_names
-
- # Classes of test results
- self.status_list = ['pass', 'fail', 'error', 'unsupported']
-
- return self.raw_reports
-
- def create_report(self):
- # Get the template & instantiate for the test type
- html_template = self.templates.reportOutline()
-
- # Fill in with the test info and set check boxes
- html_output = html_template.safe_substitute(self.html_map)
-
- # write the html file
- try:
- file = open(self.compare_html_path, mode='w', encoding='utf-8')
- file.write(html_output)
- file.close()
-
- except BaseException as err:
- sys.stderr.write(
- '!!!!!!! CANNOT WRITE SUMMARY_HTML REPORT at %s\n Error = %s' % (
- self.compare_html_path, err))
- return None
-
- return True
diff --git a/verifier/verifier.py b/verifier/verifier.py
index 73c3756b..d7c0e620 100644
--- a/verifier/verifier.py
+++ b/verifier/verifier.py
@@ -10,8 +10,6 @@
import sys
from testreport import SummaryReport
-from testreport import CompareReport
-
from testreport import TestReport
from verify_plan import VerifyPlan
@@ -321,16 +319,6 @@ def create_summary_reports(self):
if not result:
logger.error('!!!!!! SUMMARY HTML fails')
- # Create compare html for each test type
- for test_type in self.test_types:
- logging.info('Creating compare for %s', test_type)
- compare_report = CompareReport(self.file_base, test_type)
- # TODO!!!! Finish
-
- json_files = compare_report.get_json_files()
-
- compare_report.create_report()
-
def schema_results(self):
# Locate the files in schema, testData, and testOutput
schema_validation_name = 'schema_validation_summary.json'