Skip to content
Snippets Groups Projects
Commit f398bc54 authored by Matthias Reso's avatar Matthias Reso
Browse files

Added basic unit test for train method

parent 5b58afc7
No related branches found
No related tags found
No related merge requests found
import torch
from llama_recipes.utils.train_utils import train
def test_gradient_accumulation(mocker):
# import sys
# sys.path.append('/home/ubuntu/llama-recipes/')
model = mocker.MagicMock(name="model")
model().loss.__truediv__().detach.return_value = torch.tensor(1)
batch = {"input": torch.zeros(1)}
train_dataloader = [batch, batch, batch, batch, batch]
eval_dataloader = None
tokenizer = mocker.MagicMock()
optimizer = mocker.MagicMock()
lr_scheduler = mocker.MagicMock()
gradient_accumulation_steps = 1
train_config = mocker.MagicMock()
train_config.enable_fsdp = False
train_config.use_fp16 = False
train_config.run_validation = False
train(
model,
train_dataloader,
eval_dataloader,
tokenizer,
optimizer,
lr_scheduler,
gradient_accumulation_steps,
train_config,
)
assert optimizer.zero_grad.call_count == 5
optimizer.zero_grad.reset_mock()
gradient_accumulation_steps = 2
train(
model,
train_dataloader,
eval_dataloader,
tokenizer,
optimizer,
lr_scheduler,
gradient_accumulation_steps,
train_config,
)
assert optimizer.zero_grad.call_count == 3
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment