From 3f2c33e4f8d68325b754fa5250eed2ac2ee7c7d1 Mon Sep 17 00:00:00 2001 From: Less Wright <lessw@etrillium.com> Date: Wed, 17 Jan 2024 10:58:15 -0800 Subject: [PATCH] Update finetuning.py - remove nightly check this check is blocking AWS from running as it enforces not just min version but that it is a dev release instead of released version. --- src/llama_recipes/finetuning.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py index 21d8f1c3..702a4aa0 100644 --- a/src/llama_recipes/finetuning.py +++ b/src/llama_recipes/finetuning.py @@ -76,11 +76,6 @@ def main(**kwargs): model alone would consume 2+TB cpu mem (70 * 4 * 8). This will add some comms overhead and currently requires latest nightly. """ - v = packaging.version.parse(torch.__version__) - verify_latest_nightly = v.is_devrelease and v.dev >= 20230701 - if not verify_latest_nightly: - raise Exception("latest pytorch nightly build is required to run with low_cpu_fsdp config, " - "please install latest nightly.") if rank == 0: model = LlamaForCausalLM.from_pretrained( train_config.model_name, -- GitLab