Skip to content

Commit 9e019c6

Browse files
handle finetuning errors gracefully (#8194)
1 parent f777857 commit 9e019c6

File tree

1 file changed

+8
-2
lines changed

1 file changed

+8
-2
lines changed

dspy/teleprompt/bootstrap_finetune.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,10 @@ def compile(
116116
for pred_ind, pred in enumerate(student.predictors()):
117117
data_pred_ind = None if self.multitask else pred_ind
118118
training_key = (pred.lm, data_pred_ind)
119-
pred.lm = key_to_lm[training_key]
119+
finetuned_lm = key_to_lm[training_key]
120+
if isinstance(finetuned_lm, Exception):
121+
raise RuntimeError(f"Finetuned LM for predictor {pred_ind} failed.") from finetuned_lm
122+
pred.lm = finetuned_lm
120123
# TODO: What should the correct behavior be here? Should
121124
# BootstrapFinetune modify the prompt demos according to the
122125
# train data?
@@ -149,7 +152,10 @@ def finetune_lms(finetune_dict) -> Dict[Any, LM]:
149152

150153
key_to_lm = {}
151154
for ind, (key, job) in enumerate(key_to_job.items()):
152-
key_to_lm[key] = job.result()
155+
result = job.result()
156+
if isinstance(result, Exception):
157+
raise result
158+
key_to_lm[key] = result
153159
job.thread.join()
154160
logger.info(f"Job {ind + 1}/{num_jobs} is done")
155161

0 commit comments

Comments
 (0)