forked from meta-llama/llama-recipes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_train_utils.py
118 lines (99 loc) · 3.45 KB
/
test_train_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from unittest.mock import patch
import pytest
import torch
import os
import shutil
from llama_recipes.utils.train_utils import train
TEMP_OUTPUT_DIR = os.getcwd() + "/tmp"
@pytest.fixture(scope="session")
def temp_output_dir():
# Create the directory during the session-level setup
temp_output_dir = "tmp"
os.mkdir(os.path.join(os.getcwd(), temp_output_dir))
yield temp_output_dir
# Delete the directory during the session-level teardown
shutil.rmtree(temp_output_dir)
@patch("llama_recipes.utils.train_utils.MemoryTrace")
@patch("llama_recipes.utils.train_utils.nullcontext")
@patch("llama_recipes.utils.train_utils.torch.cuda.amp.GradScaler")
@patch("llama_recipes.utils.train_utils.torch.cuda.amp.autocast")
def test_gradient_accumulation(autocast, scaler, nullcontext, mem_trace, mocker):
model = mocker.MagicMock(name="model")
model().loss.__truediv__().detach.return_value = torch.tensor(1)
mock_tensor = mocker.MagicMock(name="tensor")
batch = {"input": mock_tensor}
train_dataloader = [batch, batch, batch, batch, batch]
eval_dataloader = None
tokenizer = mocker.MagicMock()
optimizer = mocker.MagicMock()
lr_scheduler = mocker.MagicMock()
gradient_accumulation_steps = 1
train_config = mocker.MagicMock()
train_config.enable_fsdp = False
train_config.use_fp16 = False
train_config.run_validation = False
train_config.gradient_clipping = False
train_config.save_metrics = False
train(
model,
train_dataloader,
eval_dataloader,
tokenizer,
optimizer,
lr_scheduler,
gradient_accumulation_steps,
train_config,
)
assert optimizer.zero_grad.call_count == 5
optimizer.zero_grad.reset_mock()
assert nullcontext.call_count == 5
nullcontext.reset_mock()
assert autocast.call_count == 0
gradient_accumulation_steps = 2
train_config.use_fp16 = True
train(
model,
train_dataloader,
eval_dataloader,
tokenizer,
optimizer,
lr_scheduler,
gradient_accumulation_steps,
train_config,
)
assert optimizer.zero_grad.call_count == 3
assert nullcontext.call_count == 0
assert autocast.call_count == 5
def test_save_to_json(temp_output_dir, mocker):
model = mocker.MagicMock(name="model")
model().loss.__truediv__().detach.return_value = torch.tensor(1)
mock_tensor = mocker.MagicMock(name="tensor")
batch = {"input": mock_tensor}
train_dataloader = [batch, batch, batch, batch, batch]
eval_dataloader = None
tokenizer = mocker.MagicMock()
optimizer = mocker.MagicMock()
lr_scheduler = mocker.MagicMock()
gradient_accumulation_steps = 1
train_config = mocker.MagicMock()
train_config.enable_fsdp = False
train_config.use_fp16 = False
train_config.run_validation = False
train_config.gradient_clipping = False
train_config.save_metrics = True
train_config.output_dir = temp_output_dir
results = train(
model,
train_dataloader,
eval_dataloader,
tokenizer,
optimizer,
lr_scheduler,
gradient_accumulation_steps,
train_config,
local_rank=0
)
assert results["metrics_filename"] not in ["", None]
assert os.path.isfile(results["metrics_filename"])