forked from bigscience-workshop/Megatron-DeepSpeed
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pretrain_gpt_single_node.sh
executable file
·61 lines (50 loc) · 1.28 KB
/
pretrain_gpt_single_node.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#!/bin/bash
# Adapted to use deepspeed on a single node
#
# Multi-node will require either a `hostfile` or switching to `torch.distributed.launch`
# adjust to the number of GPUs to use
N_GPUS=1
CHECKPOINT_PATH=checkpoints/gpt2
VOCAB_FILE=data/gpt2-vocab.json
MERGE_FILE=data/gpt2-merges.txt
DATA_PATH=data/meg-gpt2_text_document
GPT_ARGS=" \
--num-layers 24 \
--hidden-size 1024 \
--num-attention-heads 16 \
--seq-length 1024 \
--max-position-embeddings 1024 \
--micro-batch-size 4 \
--global-batch-size 8 \
--lr-decay-iters 320000 \
--lr 0.00015 \
--min-lr 1.0e-5 \
--lr-decay-style cosine \
--train-iters 5000 \
--vocab-file $VOCAB_FILE \
--merge-file $MERGE_FILE \
--data-impl mmap \
--split 949,50,1 \
--distributed-backend nccl \
--weight-decay 1e-2 \
--clip-grad 1.0 \
--lr-warmup-fraction .01 \
--fp16 \
"
OUTPUT_ARGS=" \
--log-interval 10 \
--save-interval 500 \
--eval-interval 100 \
--eval-iters 10 \
--checkpoint-activations \
"
DATA_ARGS=" \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
"
ALL_ARGS="$GPT_ARGS $OUTPUT_ARGS $DATA_ARGS"
LAUNCHER="deepspeed --num_gpus $N_GPUS"
CMD="$LAUNCHER pretrain_gpt.py $ALL_ARGS"
echo $CMD
$CMD