From 3d1e9cd58caafe11de8fa26f68961eeb835fad7c Mon Sep 17 00:00:00 2001
From: lchu <lchu@us.ibm.com>
Date: Tue, 8 Aug 2023 10:39:50 -0400
Subject: [PATCH] minor code optimization

---
 llama_finetuning.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llama_finetuning.py b/llama_finetuning.py
index ad33de38..cf573b02 100644
--- a/llama_finetuning.py
+++ b/llama_finetuning.py
@@ -137,7 +137,7 @@ def main(**kwargs):
             sharding_strategy=fsdp_config.sharding_strategy,
             device_id=torch.cuda.current_device(),
             limit_all_gathers=True,
-            sync_module_states=True if train_config.low_cpu_fsdp else False,
+            sync_module_states=train_config.low_cpu_fsdp,
             param_init_fn=lambda module: module.to_empty(device=torch.device("cuda"), recurse=False)
             if train_config.low_cpu_fsdp and rank != 0 else None,
         )
-- 
GitLab