Skip to content

Commit 370f93c

Browse files
committed
Don't skip tokenizer init
Signed-off-by: jthomson04 <[email protected]>
1 parent 6e39110 commit 370f93c

File tree

1 file changed

+0
-11
lines changed
  • components/src/dynamo/trtllm

1 file changed

+0
-11
lines changed

components/src/dynamo/trtllm/main.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -175,16 +175,6 @@ async def init(runtime: DistributedRuntime, config: Config):
175175
dynamic_batch_config=dynamic_batch_config,
176176
)
177177
modality = getattr(config, "modality", None) or "text"
178-
if config.use_trtllm_tokenizer:
179-
logging.info(
180-
"Using TensorRT-LLM's built in tokenizer. Setting skip_tokenizer_init to False"
181-
)
182-
skip_tokenizer_init = False
183-
else:
184-
logging.info(
185-
"Using dynamo's built in tokenizer. Setting skip_tokenizer_init to True"
186-
)
187-
skip_tokenizer_init = True
188178

189179
arg_map = {
190180
"model": model_path,
@@ -193,7 +183,6 @@ async def init(runtime: DistributedRuntime, config: Config):
193183
"pipeline_parallel_size": config.pipeline_parallel_size,
194184
"moe_expert_parallel_size": config.expert_parallel_size,
195185
"backend": Backend.PYTORCH,
196-
"skip_tokenizer_init": skip_tokenizer_init,
197186
"build_config": build_config,
198187
"kv_cache_config": kv_cache_config,
199188
"gpus_per_node": gpus_per_node,

0 commit comments

Comments
 (0)