From 26dff882e1720181c8817c4cdb1856d1e95c7854 Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Mon, 14 Oct 2024 14:41:00 -0700 Subject: [PATCH] Remove old test --- src/tests/test_finetuning.py | 40 ------------------------------------ 1 file changed, 40 deletions(-) diff --git a/src/tests/test_finetuning.py b/src/tests/test_finetuning.py index 9d9e6faa..d90859e0 100644 --- a/src/tests/test_finetuning.py +++ b/src/tests/test_finetuning.py @@ -107,46 +107,6 @@ def test_finetuning( assert model.return_value.to.call_count == 0 -# @patch("llama_recipes.finetuning.torch.cuda.is_available") -# @patch("llama_recipes.finetuning.train") -# @patch("llama_recipes.finetuning.LlamaForCausalLM.from_pretrained") -# @patch("llama_recipes.finetuning.AutoTokenizer.from_pretrained") -# @patch("llama_recipes.finetuning.get_preprocessed_dataset") -# @patch("llama_recipes.finetuning.generate_peft_config") -# @patch("llama_recipes.finetuning.get_peft_model") -# @patch("llama_recipes.finetuning.optim.AdamW") -# @patch("llama_recipes.finetuning.StepLR") -# @pytest.mark.parametrize("cuda_is_available", [True, False]) -# def test_finetuning_peft_lora( -# step_lr, -# optimizer, -# get_peft_model, -# gen_peft_config, -# get_dataset, -# tokenizer, -# get_model, -# train, -# cuda, -# cuda_is_available, -# ): -# kwargs = {"use_peft": True} - -# get_dataset.return_value = get_fake_dataset() -# cuda.return_value = cuda_is_available - -# get_model.return_value.get_input_embeddings.return_value.weight.shape = [0] - -# main(**kwargs) - -# if cuda_is_available: -# assert get_peft_model.return_value.to.call_count == 1 -# assert get_peft_model.return_value.to.call_args.args[0] == "cuda" -# else: -# assert get_peft_model.return_value.to.call_count == 0 - - - - @patch("llama_recipes.finetuning.get_peft_model") @patch("llama_recipes.finetuning.setup") @patch("llama_recipes.finetuning.train") -- GitLab