Skip to content
Snippets Groups Projects
Commit 3d1e9cd5 authored by lchu's avatar lchu
Browse files

minor code optimization

parent 41ffbcab
No related branches found
No related tags found
No related merge requests found
...@@ -137,7 +137,7 @@ def main(**kwargs): ...@@ -137,7 +137,7 @@ def main(**kwargs):
sharding_strategy=fsdp_config.sharding_strategy, sharding_strategy=fsdp_config.sharding_strategy,
device_id=torch.cuda.current_device(), device_id=torch.cuda.current_device(),
limit_all_gathers=True, limit_all_gathers=True,
sync_module_states=True if train_config.low_cpu_fsdp else False, sync_module_states=train_config.low_cpu_fsdp,
param_init_fn=lambda module: module.to_empty(device=torch.device("cuda"), recurse=False) param_init_fn=lambda module: module.to_empty(device=torch.device("cuda"), recurse=False)
if train_config.low_cpu_fsdp and rank != 0 else None, if train_config.low_cpu_fsdp and rank != 0 else None,
) )
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment