Skip to content

Commit 44e8bf5

Browse files
authored
Merge pull request #1024 from philmcmahon/local-files-only-param
Add models_cache_only param
2 parents 36d2622 + 7b3c9ce commit 44e8bf5

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

whisperx/transcribe.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ def cli():
2626
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
2727
parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe")
2828
parser.add_argument("--model", default="small", help="name of the Whisper model to use")
29+
parser.add_argument("--model_cache_only", type=str2bool, default=False, help="If True, will not attempt to download models, instead using cached models from --model_dir")
2930
parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default")
3031
parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference")
3132
parser.add_argument("--device_index", default=0, type=int, help="device index to use for FasterWhisper inference")
@@ -90,6 +91,7 @@ def cli():
9091
model_name: str = args.pop("model")
9192
batch_size: int = args.pop("batch_size")
9293
model_dir: str = args.pop("model_dir")
94+
model_cache_only: bool = args.pop("model_cache_only")
9395
output_dir: str = args.pop("output_dir")
9496
output_format: str = args.pop("output_format")
9597
device: str = args.pop("device")
@@ -177,7 +179,7 @@ def cli():
177179
results = []
178180
tmp_results = []
179181
# model = load_model(model_name, device=device, download_root=model_dir)
180-
model = load_model(model_name, device=device, device_index=device_index, download_root=model_dir, compute_type=compute_type, language=args['language'], asr_options=asr_options, vad_method=vad_method, vad_options={"chunk_size":chunk_size, "vad_onset": vad_onset, "vad_offset": vad_offset}, task=task, threads=faster_whisper_threads)
182+
model = load_model(model_name, device=device, device_index=device_index, download_root=model_dir, compute_type=compute_type, language=args['language'], asr_options=asr_options, vad_method=vad_method, vad_options={"chunk_size":chunk_size, "vad_onset": vad_onset, "vad_offset": vad_offset}, task=task, local_files_only=model_cache_only, threads=faster_whisper_threads)
181183

182184
for audio_path in args.pop("audio"):
183185
audio = load_audio(audio_path)

0 commit comments

Comments
 (0)