forked from bigscience-workshop/Megatron-DeepSpeed
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pretrain_gpt_tiny.sh
45 lines (39 loc) · 1.15 KB
/
pretrain_gpt_tiny.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#! /bin/bash
# Runs the "345M" parameter model
RANK=0
WORLD_SIZE=1
DATA_PATH=GPT2/c4_en_partial_gpt2_text_document
CHECKPOINT_PATH=GPT2
deepspeed --num_gpus 1 pretrain_gpt.py \
--num-layers 2 \
--hidden-size 128 \
--num-attention-heads 4 \
--micro-batch-size 4 \
--global-batch-size 8 \
--seq-length 256 \
--max-position-embeddings 256 \
--train-iters 10000 \
--lr-decay-iters 5000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
--tokenizer-type PretrainedFromHF \
--tokenizer-name-or-path t5-small \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--lr 0.00015 \
--min-lr 1.0e-5 \
--lr-decay-style cosine \
--weight-decay 1e-2 \
--clip-grad 1.0 \
--lr-warmup-fraction .01 \
--checkpoint-activations \
--log-interval 100 \
--save-interval 10000 \
--eval-interval 1000 \
--eval-iters 10 \
--fp16 \
--tensorboard-dir GPT2
# --vocab-file GPT2/gpt2-vocab.json \
# --merge-file GPT2/gpt2-merges.txt \