-
-
Notifications
You must be signed in to change notification settings - Fork 13
Update logging with info on the source file running #531
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,6 +22,8 @@ fi | |
| # Install NVM if it is not install in CI | ||
|
|
||
| export NVM_DIR=$HOME/.nvm | ||
| # To clear lock | ||
| npm install --package-lock-only | ||
|
Comment on lines
+25
to
+26
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same |
||
| [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm | ||
| [ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,5 @@ | ||
| { | ||
| "dependencies": { | ||
| "@js-temporal/polyfill": "^0.5.1" | ||
| } | ||
| } | ||
|
Comment on lines
+1
to
+5
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should have a package.json in each node package directory, but probably not at the top level |
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,9 +27,9 @@ def main(args): | |
|
|
||
| logging.debug('TEST DATA PATH = %s', test_data_path) | ||
|
|
||
| logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") | ||
| logger = logging.Logger("schema/check_generated_data.py: Checking Test Data vs. Schemas LOGGER") | ||
| logger.setLevel(logging.INFO) | ||
| logger.info('+++ Test Generated test data vs. schemas files') | ||
| logger.info('schema/check_generated_data.py: +++ Test Generated test data vs. schemas') | ||
|
|
||
| # TODO: get ICU versions | ||
| icu_versions = [] | ||
|
|
@@ -39,8 +39,8 @@ def main(args): | |
| for dir_name in icu_dirs: | ||
| icu_versions.append(os.path.basename(dir_name)) | ||
|
|
||
| logging.debug('ICU directories = %s', icu_versions) | ||
| logging.debug('test types = %s', ALL_TEST_TYPES) | ||
| logging.debug('schema/check_generated_data.py: ICU directories = %s', icu_versions) | ||
| logging.debug('schema/check_generated_data.py: test types = %s', ALL_TEST_TYPES) | ||
|
Comment on lines
+42
to
+43
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hardcoding the filename prefix in log messages, as done throughout this file, is not ideal for maintainability. If the file is ever renamed, all these log strings would need to be updated manually. A better approach is to obtain the filename dynamically. You can define a constant at the top of the module: import os
FILENAME = os.path.basename(__file__)And then use it in your log messages, for example: |
||
|
|
||
| validator = schema_validator.ConformanceSchemaValidator() | ||
|
|
||
|
|
@@ -52,7 +52,7 @@ def main(args): | |
| validator.debug = 1 | ||
|
|
||
| all_results = validator.validate_test_data_with_schema() | ||
| logging.info(' %d results for generated test data', len(all_results)) | ||
| logging.info('schema/check_generated_data.py: %d results for generated test data', len(all_results)) | ||
|
|
||
| schema_errors = [] | ||
| failed_validations = [] | ||
|
|
@@ -78,7 +78,7 @@ def main(args): | |
| try: | ||
| summary_data = json.dumps(summary_json) | ||
| except BaseException as error: | ||
| logging.error('json.dumps Summary data problem: %s at %s', error, error) | ||
| logging.error('schema/check_generated_data.py: json.dumps Summary data problem: %s at %s', error, error) | ||
| sys.exit(1) | ||
|
|
||
| output_filename = os.path.join(test_data_path, 'test_data_validation_summary.json') | ||
|
|
@@ -88,17 +88,17 @@ def main(args): | |
| file_out.close() | ||
| except BaseException as error: | ||
| schema_errors.append(output_filename) | ||
| logging.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename) | ||
| logging.fatal('schema/check_generated_data.py: %s. Cannot save validation summary in file %s', error, output_filename) | ||
| sys.exit(1) | ||
|
|
||
| if schema_errors: | ||
| logging.critical('Test data file files: %d fail out of %d:', | ||
| logging.critical('schema/check_generated_data.py: Test data file files: %d fail out of %d:', | ||
| len(schema_errors), schema_count) | ||
| for failure in schema_errors: | ||
| logging.critical(' %s', failure) | ||
| logging.critical('schema/check_generated_data.py: %s', failure) | ||
| sys.exit(1) | ||
| else: | ||
| logging.info("All %d generated test data files match with schema", schema_count) | ||
| logging.info("schema/check_generated_data.py: All %d generated test data files match with schema", schema_count) | ||
|
|
||
|
|
||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -24,11 +24,9 @@ def main(args): | |
| else: | ||
| test_output_path = args[1] | ||
|
|
||
| logging.debug('TEST OUTPUT PATH = %s', test_output_path) | ||
|
|
||
| logger = logging.Logger("Checking Test Data vs. Schemas LOGGER") | ||
| logger.setLevel(logging.INFO) | ||
| logger.info('+++ Test Generated test data vs. schemas files') | ||
| logger.info('+++ schema/check_test_output') | ||
|
|
||
| # TODO: get ICU versions | ||
| executor_set = set() | ||
|
|
@@ -74,7 +72,7 @@ def main(args): | |
| validator.debug = 1 | ||
|
|
||
| all_results, test_validation_plans = validator.validate_test_output_with_schema() | ||
| logging.info(' %d results for test output', len(all_results)) | ||
| logging.info('schema/check_test_output: %d results for test output', len(all_results)) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hardcoding the filename prefix in log messages, as done throughout this file, is not ideal for maintainability. If the file is ever renamed, all these log strings would need to be updated manually. A better approach is to obtain the filename dynamically. You can define a constant at the top of the module: import os
FILENAME = os.path.basename(__file__)And then use it in your log messages, for example: |
||
|
|
||
| # Check if any files in the expected list were not validated. | ||
| test_paths = set() | ||
|
|
@@ -83,7 +81,7 @@ def main(args): | |
|
|
||
| for json_file in json_files: | ||
| if json_file not in test_paths: | ||
| logging.fatal('JSON file %s was not verified against a schema', json_file) | ||
| logging.fatal('schema/check_test_output: JSON file %s was not verified against a schema', json_file) | ||
| # Bail out right away! | ||
| sys.exit(1) | ||
|
|
||
|
|
@@ -128,7 +126,7 @@ def main(args): | |
| # Don't continue after this problem. | ||
| sys.exit(1) | ||
|
|
||
| logging.info("All %d test output files match with schema", schema_count) | ||
| logging.info("schema/check_test_output: All %d test output files match with schema", schema_count) | ||
| return | ||
|
|
||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You should run npm only in the package directories that need them