We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f2cd562 commit 0ec8979Copy full SHA for 0ec8979
src/transformers/integrations/accelerate.py
@@ -446,10 +446,6 @@ def _get_device_map(
446
if max_memory is not None and device_name in max_memory:
447
inferred_max_memory[device_name] = min(inferred_max_memory[device_name], max_memory[device_name])
448
449
- # Here we need to retie the weights before the call even if they are all on meta device, otherwise accelerate
450
- # mess up the device_map computation
451
- # TODO Cyril: replace this function to avoid re-tying uselessly (and the function is very inefficient)
452
- model.tie_weights()
453
device_map = infer_auto_device_map(
454
model,
455
max_memory=inferred_max_memory,
0 commit comments