-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
225 lines (196 loc) · 10.2 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
# Handles training of the model over the provided dataset.
import argparse
import logging
import os
import random
from dataclasses import dataclass
from typing import Literal, Tuple, Union
import boto3
import numpy as np
import torch
from torch.optim import AdamW
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader
import flame
from dataset import GopilotDataset
from model import GopilotModel, GopilotTask, SophiaG
from tokenizer import GopilotTokenizer, HuggingFaceTokenizer
@dataclass
class Args:
model_cf: str
tokenizer: str
tokenizer_cf: str
from_checkpoint: str
@dataclass
class TrainingParametersArgs:
gradient_accumulation_steps: int
batch_size: int
dropout: float
weight_decay: float
lr: float
epsilon: float
# Unfortunately, neptune.ai does not support recording values greater than
# int32.max, so we have to use float instead.
token_budget: float
clip_gradients: float
precision: Union[str, torch.dtype]
warmup: int
seed: int
optimizer: Union[Literal["adamw"], Literal["sophiag"]]
@dataclass
class S3Args:
s3_bucket: str
s3_cache_dir: str
s3_checkpoints: bool
s3_dataset_prefix: str
@dataclass
class RunArgs:
device: Union[str, torch.device]
verbose: bool
neptune: bool
compile: bool
checkpoints_dir: str
def decompose_s3_url(url: str) -> Tuple[str, str, str]:
url = url.replace("s3://", "")
bucket, key = url.split("/", 1)
return bucket, key, os.path.basename(key)
def download_from_s3(s3_url: str, cache_dir: str):
bucket, key, filename = decompose_s3_url(s3_url)
dest = os.path.join(cache_dir, key)
os.makedirs(os.path.dirname(dest), exist_ok=True)
s3 = boto3.client("s3")
logging.info(f"Downloading s3://{bucket}/{key} to {dest}")
s3.download_file(bucket, key, dest)
return dest
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
# General arguments
parser = argparse.ArgumentParser()
parser.add_argument('--model-cf', type=str, required=True, help='Path to the model configuration file.')
parser.add_argument('--tokenizer-cf', type=str, required=True, help='Path to the tokenizer configuration file.')
parser.add_argument('--tokenizer', type=str, default="Gopilot", help='Name of the tokenizer to use.', choices=["gopilot", "hugging-face"])
parser.add_argument('--from-checkpoint', type=str, default=None, help='Path to a checkpoint to load the model from.')
args, remaining_args = parser.parse_known_args()
# Training parameters
tp_parser = argparse.ArgumentParser()
tp_parser.add_argument('--gradient-accumulation-steps', type=int, default=48, help='Number of gradient accumulation steps (Default 1, no accumulation).')
tp_parser.add_argument('--batch-size', type=int, default=12, help='Batch size.')
tp_parser.add_argument('--dropout', type=float, default=0.0, help='Dropout probability.')
tp_parser.add_argument('--weight-decay', type=float, default=0.1, help='Weight decay value.')
tp_parser.add_argument('--lr', type=float, default=3e-4, help='Maximum learning rate.')
tp_parser.add_argument('--epsilon', type=float, default=10e-12, help='AdamW epsilon parameter.')
tp_parser.add_argument('--token-budget', type=float, default=1e10, help='Training budget in number of tokens to be processed.')
tp_parser.add_argument('--clip-gradients', type=float, default=0.5, help='Clip gradients norm value.')
tp_parser.add_argument('--precision', type=str, default="fp16", choices=["fp32", "fp16"], help='Precision to use for training.')
tp_parser.add_argument('--seed', type=int, default=999, help='Random seed.')
tp_parser.add_argument('--warmup', type=int, default=1000, help='Number of warmup steps.')
tp_parser.add_argument('--optimizer', type=str, default="adamw", choices=["adamw", "sophiag"], help='Optimizer to use.')
tp_args, remaining_args = tp_parser.parse_known_args(remaining_args)
# S3 arguments
s3_parser = argparse.ArgumentParser()
s3_parser.add_argument('--s3-bucket', type=str, default="gopilot", help='S3 bucket name.')
s3_parser.add_argument('--s3-cache-dir', type=str, default=".cache", help='Local cache directory.')
s3_parser.add_argument('--s3-checkpoints', default=False, action='store_true', help='Enable remote checkpoints.')
s3_parser.add_argument('--s3-dataset-prefix', type=str, required=True, help='Prefix of the remote dataset.')
s3_args, remaining_args = s3_parser.parse_known_args(remaining_args)
# Run arguments
run_parser = argparse.ArgumentParser()
run_parser.add_argument('--device', type=str, default='auto', help='Device to use for training.')
run_parser.add_argument('--verbose', default=True, action='store_true', help='Enable verbose logging.')
run_parser.add_argument('--neptune', default=False, action='store_true', help='Enable Neptune integration.')
run_parser.add_argument('--compile', default=False, action='store_true', help='Enable torch.compile().')
run_parser.add_argument('--checkpoints-dir', type=str, default="out/checkpoints", help='Checkpoints directory.')
run_args = run_parser.parse_args(remaining_args)
args = Args(**vars(args))
tp_args = TrainingParametersArgs(**vars(tp_args))
s3_args = S3Args(**vars(s3_args))
run_args = RunArgs(**vars(run_args))
assert flame.s3_is_available(), "S3 is not available. Please set the relevant environment variables."
assert flame.neptune_is_available() or not run_args.neptune, "Neptune is not available. Please set the relevant environment variables."
# Seed for reproducibility
torch.manual_seed(tp_args.seed)
np.random.seed(tp_args.seed)
random.seed(tp_args.seed)
# Transform args
run_args.device = flame.best_device() if run_args.device == "auto" else torch.device(run_args.device)
tp_args.precision = torch.float32 if tp_args.precision == "fp32" else torch.float16
# Load the model
model = GopilotModel.from_config_file(args.model_cf, dropout=tp_args.dropout)
flame.log_model_summary(model)
if args.from_checkpoint is not None:
if args.from_checkpoint.startswith("s3://"):
args.from_checkpoint = download_from_s3(args.from_checkpoint, s3_args.s3_cache_dir)
assert os.path.exists(args.from_checkpoint), f"Checkpoint {args.from_checkpoint} does not exist."
checkpoint = torch.load(args.from_checkpoint, map_location=run_args.device)
for key in list(checkpoint['model'].keys()):
if key.startswith("_orig_mod."):
checkpoint['model'][key[len("_orig_mod."):]] = checkpoint['model'].pop(key)
model.load_state_dict(checkpoint['model']) # type: ignore
del checkpoint
logging.info(f"Loaded model from checkpoint {args.from_checkpoint}.")
# Optionally compile model
if run_args.compile:
assert run_args.device.type == "cuda", f"torch.compile() with Triton backend only runs on CUDA compatible devices."
model: GopilotModel = torch.compile(model, backend="inductor") # type: ignore
# Load the tokenizer
if args.tokenizer == "gopilot":
tokenizer = GopilotTokenizer.from_file(args.tokenizer_cf)
else:
tokenizer = HuggingFaceTokenizer.from_file(args.tokenizer_cf)
# Configure optimizer, learning rate scheduler
tokens_per_batch = tp_args.batch_size * tp_args.gradient_accumulation_steps * (model.get_config().context_length)
total_steps = int(tp_args.token_budget) // tokens_per_batch
logging.info(f"Compute budget summary: {tp_args.token_budget} tokens, {tokens_per_batch} tokens batch size, {total_steps} total steps, {flame.expected_loss(flame.model_size(model), tp_args.token_budget):.2f} expected loss.")
if tp_args.optimizer == "adamw":
optimizer = AdamW(model.parameters(), lr=tp_args.lr, weight_decay=tp_args.weight_decay, eps=tp_args.epsilon)
else:
optimizer = SophiaG(model.parameters(), lr=tp_args.lr, weight_decay=tp_args.weight_decay, rho=0.05)
scheduler = OneCycleLR(optimizer, max_lr=tp_args.lr, total_steps=total_steps, anneal_strategy='cos', pct_start=(tp_args.warmup/total_steps), final_div_factor=25)
# Configure the tracker
tracker = flame.NeptuneTracker("rojasdiegopro/gopilot") if (flame.neptune_is_available() and run_args.neptune) else flame.NoopTracker()
tracker.track_hyperparameters(vars(args))
tracker.track_hyperparameters(vars(tp_args))
tracker.track_hyperparameters(vars(model.get_config()))
tracker.track_hyperparameters({"dataset": s3_args.s3_dataset_prefix, "tokens_per_batch": tokens_per_batch, "total_steps": total_steps, "model_size": flame.model_size(model)})
# Load the dataset
dataset = GopilotDataset(
s3_args.s3_bucket,
s3_args.s3_dataset_prefix,
s3_args.s3_cache_dir,
window_size=model.get_config().context_length+1,
stride=model.get_config().context_length,
)
loader = DataLoader(dataset, batch_size=tp_args.batch_size, drop_last=True, pin_memory=run_args.device.type == "cuda", pin_memory_device="cuda" if run_args.device.type == "cuda" else "", num_workers=1, prefetch_factor=64)
# Configure trainer
trainer = flame.Trainer(
GopilotTask(
model,
optimizer,
pad_token_id=tokenizer.special_token_to_id("[PAD]"),
scheduler=scheduler,
clip_gradients=tp_args.clip_gradients,
precision=tp_args.precision
),
run_args.device)
trainer.register_handlers(
flame.CheckpointingHandler(
run_args.checkpoints_dir,
filename=tracker.get_run_id()+"-step={step}-loss={loss:.2f}.pt",
max_files=3,
max_step_interval=4096,
max_time_interval_sec=60*60*3,
),
flame.LoggingHandler(on_step=run_args.verbose, on_batch=False),
flame.TrackingHandler(tracker),
flame.S3RemoteCheckpointingHandler(
s3_args.s3_bucket,
f"checkpoints/{tracker.get_run_id()}",
max_files=3
) if s3_args.s3_checkpoints else flame.NoopHandler(),
)
# Run training
trainer.train(
num_epochs=-1,
train_loader=loader,
gradient_accumulation_steps=tp_args.gradient_accumulation_steps,
)