|
19 | 19 |
|
20 | 20 | from darc_compare.metrics import compute_score_round1 as utiliy_metric
|
21 | 21 |
|
| 22 | +import numpy as np |
22 | 23 | import pandas as pd
|
23 | 24 |
|
24 | 25 | from sklearn.metrics.pairwise import cosine_similarity
|
@@ -85,14 +86,21 @@ def oracle_test(ground_truth, aux, file_err):
|
85 | 86 | # We want to compare if two way of doing the metrics change something on the results
|
86 | 87 | for file_path in glob.glob(f"{config.TESTING_DIR}/AT/*.csv"):
|
87 | 88 | if file_path in file_err:
|
| 89 | + scores_oracle.drop(file_path, inplace=True) |
| 90 | + scores_new.drop(file_path, inplace=True) |
88 | 91 | continue
|
89 |
| - sub = pd.read_csv(file_path, names=T_COL.values(), skiprows=1) |
| 92 | + try: |
| 93 | + sub = pd.read_csv(file_path, names=T_COL.values(), skiprows=1) |
90 | 94 |
|
91 |
| - utility_scores, reid_scores, _, _ = utiliy_metric(ground_truth, aux, sub) |
92 |
| - scores_oracle.loc[file_path] = utility_scores + reid_scores |
| 95 | + utility_scores, reid_scores, _, _ = utiliy_metric(ground_truth, aux, sub) |
| 96 | + scores_oracle.loc[file_path] = utility_scores + reid_scores |
93 | 97 |
|
94 |
| - metrics = config.metric_class(ground_truth, sub) |
95 |
| - scores_new.loc[file_path] = metrics.scores() |
| 98 | + metrics = config.metric_class(ground_truth, sub) |
| 99 | + scores_new.loc[file_path] = metrics.scores() |
| 100 | + except Exception as err: |
| 101 | + scores_oracle.drop(file_path, inplace=True) |
| 102 | + scores_new.drop(file_path, inplace=True) |
| 103 | + logging.info(f"{file_path} encoutred the following error : {err}") |
96 | 104 |
|
97 | 105 | return scores_oracle, scores_new
|
98 | 106 |
|
|
0 commit comments