forked from allenai/OLMo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
v1_5-mix-medium-mitch-ish.yaml
179 lines (151 loc) · 4.46 KB
/
v1_5-mix-medium-mitch-ish.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
run_name: v1_5-mix-medium-mitch-ish
seed: 6198
dry_run: false
wandb:
name: ${run_name}
project: olmo-medium
group: v1_5-mix
model:
d_model: 4096
n_heads: 32
n_layers: 32
# mlp_ratio: 6
mlp_hidden_size: 22016
weight_tying: false
alibi: false
rope: true
flash_attention: false # not available on AMD
attention_dropout: 0.0
attention_layer_norm: false
multi_query_attention: false
include_bias: false
block_type: sequential
layer_norm_type: default
layer_norm_with_affine: false
bias_for_layer_norm: false
attention_layer_norm_with_affine: false
activation_type: swiglu
residual_dropout: 0.0
embedding_dropout: 0.0
max_sequence_length: 2048
vocab_size: 50280
embedding_size: 50304
eos_token_id: 0
pad_token_id: 1
init_device: meta
init_fn: mitchell
compile: null
optimizer:
name: adamw
learning_rate: 3.0e-4
weight_decay: 0.1
betas:
- 0.9
- 0.95
metrics_log_interval: 10
scheduler:
name: linear_with_warmup
t_warmup: 5000
alpha_f: 0.1
grad_clip_warmup_steps: 1000
grad_clip_warmup_factor: 10.0
tokenizer:
identifier: tokenizers/allenai_eleuther-ai-gpt-neox-20b-pii-special.json
truncate_direction: right
save_folder: ${oc.env:CHECKPOINTS_PATH}/${oc.env:SLURM_JOB_ID,${run_name}}
save_overwrite: false
# Sharded checkpoints (best for restarts)
save_interval: 1000
save_num_checkpoints_to_keep: -1
# Unsharded checkpoints (for final storage)
save_interval_unsharded: null # getting errors on LUMI right now
save_num_unsharded_checkpoints_to_keep: -1
load_path: null
max_duration: 2e12T # 2T tokens
global_train_batch_size: 2048
device_train_microbatch_size: 2
precision: amp_bf16
fsdp:
wrapping_strategy: null
precision: mixed
max_grad_norm: 1.0
max_grad_norm_ratio: null
speed_monitor:
window_size: 20
eval_interval: ${save_interval}
eval_subset_num_batches: -1
device_eval_batch_size: ${device_train_microbatch_size}
evaluators:
- label: all-small-ppl-validation
data:
num_workers: 0
drop_last: true
# pin_memory: true
# prefetch_factor: 1
# persistent_workers: false
# timeout: 0
datasets:
4chan-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/4chan/val.npy
c4_100_domains-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/c4_100_domains/val.npy
c4_en-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/c4_en/val.npy
gab-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/gab/val.npy
ice-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/ice/val.npy
m2d2_s2orc-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/m2d2_s2orc/val.npy
m2d2_wiki-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/m2d2_wiki/val.npy
manosphere-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/manosphere/val.npy
mc4_en-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/mc4_en/val.npy
pile-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/pile/val.npy
ptb-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/ptb/val.npy
twitterAEE-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/twitterAEE/val.npy
wikitext_103-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/wikitext_103/val.npy
##########################
# Downstream evaluations #
##########################
- label: piqa
type: downstream
- label: hellaswag
type: downstream
- label: winogrande
type: downstream
- label: openbook_qa
type: downstream
# - label: boolq # requires implemention of the pmi_dc matrix
# type: downstream
- label: sciq
type: downstream
- label: arc_easy
type: downstream
# - label: arc_challenge # requires implemention of the pmi_dc matrix
# type: downstream
- label: copa
type: downstream
- label: rte
type: downstream
- label: commitment_bank
type: downstream
- label: mrpc
type: downstream
- label: sst2
type: downstream
data:
paths: ${path.glob:${oc.env:DATA_PATH}/v1_5-sample/gpt-neox-20b-pii-special/*.npy}
pad_direction: right
num_workers: 0
drop_last: true
pin_memory: true
prefetch_factor: 16
persistent_workers: true
timeout: 0