Skip to content

Commit 89b430a

Browse files
committed
test: catch error when computing scores
1 parent fef270c commit 89b430a

File tree

1 file changed

+13
-5
lines changed

1 file changed

+13
-5
lines changed

test.py

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
from darc_compare.metrics import compute_score_round1 as utiliy_metric
2121

22+
import numpy as np
2223
import pandas as pd
2324

2425
from sklearn.metrics.pairwise import cosine_similarity
@@ -85,14 +86,21 @@ def oracle_test(ground_truth, aux, file_err):
8586
# We want to compare if two way of doing the metrics change something on the results
8687
for file_path in glob.glob(f"{config.TESTING_DIR}/AT/*.csv"):
8788
if file_path in file_err:
89+
scores_oracle.drop(file_path, inplace=True)
90+
scores_new.drop(file_path, inplace=True)
8891
continue
89-
sub = pd.read_csv(file_path, names=T_COL.values(), skiprows=1)
92+
try:
93+
sub = pd.read_csv(file_path, names=T_COL.values(), skiprows=1)
9094

91-
utility_scores, reid_scores, _, _ = utiliy_metric(ground_truth, aux, sub)
92-
scores_oracle.loc[file_path] = utility_scores + reid_scores
95+
utility_scores, reid_scores, _, _ = utiliy_metric(ground_truth, aux, sub)
96+
scores_oracle.loc[file_path] = utility_scores + reid_scores
9397

94-
metrics = config.metric_class(ground_truth, sub)
95-
scores_new.loc[file_path] = metrics.scores()
98+
metrics = config.metric_class(ground_truth, sub)
99+
scores_new.loc[file_path] = metrics.scores()
100+
except Exception as err:
101+
scores_oracle.drop(file_path, inplace=True)
102+
scores_new.drop(file_path, inplace=True)
103+
logging.info(f"{file_path} encoutred the following error : {err}")
96104

97105
return scores_oracle, scores_new
98106

0 commit comments

Comments
 (0)