Skip to content

Commit

Permalink
add cpm
Browse files Browse the repository at this point in the history
  • Loading branch information
Maigee committed Aug 8, 2023
1 parent 0510ba2 commit 65c52d0
Show file tree
Hide file tree
Showing 21 changed files with 88,888 additions and 13 deletions.
195 changes: 195 additions & 0 deletions configs/cpm/run_cpm_10b_finetune.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
seed: 0
run_mode: 'train'
output_dir: './output' # 当前不支持自定义修改,请勿修改该默认值
load_checkpoint: '/home/m30024275/cpm_model_10b.ckpt'
auto_trans_ckpt: False # If true, auto transform load_checkpoint to load in distributed model
only_save_strategy: False
resume_training: False

# ==== context config ====
context:
mode: 0 #0--Graph Mode; 1--Pynative Mode
device_target: "Ascend"
enable_graph_kernel: False
graph_kernel_flags: "--disable_expand_ops=Softmax,Dropout --enable_parallel_fusion=true --reduce_fuse_depth=8 --enable_auto_tensor_inplace=true"
max_call_depth: 10000
max_device_memory: "30GB"
save_graphs: False
device_id: 0

# aicc
remote_save_url: "Please input obs url on AICC platform."

# ==== model config ====
model:
model_config:
type: CPMBeeConfig
vocab_size: 86592
dim_model: 4096
dim_ff: 10240
num_layers: 48
num_heads: 32
dim_head: 128
dropout_p: 0.0
position_bias_num_buckets: 256
position_bias_num_segment_buckets: 256
position_bias_max_distance: 2048
eps: 1.e-6
half: True
arch:
type: CPMForPreTraining

trainer:
type: CausalLanguageModelingTrainer
model_name: 'cpm_10b'
# if True, do evaluate during the training process. if false, do nothing.
# note that the task trainer should support _evaluate_in_training function.
do_eval: False
eval_step_interval: -1 # num of step intervals between each eval, -1 means no step end eval.
eval_epoch_interval: 1 # num of epoch intervals between each eval, 1 means eval on every epoch end.

metric:
type: ADGENMetric
tokenizer_type: "glm_6b" # use ChatGLMTokenizer

processor:
return_tensors: ms
tokenizer:
type: CPMBeeTokenizer
type: CPMProcessor

# ==== dataset config ====
train_dataset: &train_dataset
data_loader:
type: MindDataset
dataset_dir: "/home/m30024275/cpm_mindrecord"
shuffle: True
input_columns: [ "inputs", "inputs_sub", "length", "context", "sample_ids", "num_segments", "segment",
"segment_rel_offset", "segment_rel", "spans", "ext_table_ids", "ext_table_sub", "label" ]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: True
batch_size: 1
repeat: 1
numa_enable: False
prefetch_size: 1
seed: 0

train_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *train_dataset

eval_dataset: &eval_dataset
data_loader:
type: MindDataset
dataset_dir: ""
shuffle: True
input_columns: [ "inputs", "inputs_sub", "length", "context", "sample_ids", "num_segments", "segment_ids",
"segment_rel_offset", "segment_rel", "spans", "ext_ids", "ext_sub", "target" ]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: True
batch_size: 1
repeat: 1
numa_enable: False
prefetch_size: 1
seed: 0

eval_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *eval_dataset

# ==== runner config ====
runner_config:
epochs: 1
batch_size: 1
sink_mode: False
sink_size: -1

runner_wrapper:
type: ScaleTrainOneStepCell
scale_sense:
type: DynamicLossScaleUpdateCell
loss_scale_value: 32768
scale_factor: 2
scale_window: 1000
use_clip_grad: True

# lr sechdule
lr_schedule:
type: noam
learning_rate: 1.e-4
warmup_iter: 1
end_iter: 2000

# optimizer
optimizer:
type: AdamWeightDecayWithScale
weight_decay: 0.01
param_group: False

# parallel config
use_parallel: False
parallel:
parallel_mode: 2 # 0-dataset, 1-semi, 2-auto, 3-hybrid
gradients_mean: False
loss_repeated_mean: True
enable_alltoall: False
full_batch: True
search_mode: "sharding_propagation"
enable_parallel_optimizer: True # optimizer shard
strategy_ckpt_save_file: "./ckpt_strategy.ckpt"
parallel_config:
data_parallel: 8
model_parallel: 1
pipeline_stage: 1
expert_parallel: 1
optimizer_shard: True # optimizer shard
micro_batch_num: 1
vocab_emb_dp: True
gradient_aggregation_group: 8
micro_batch_interleave_num: 1

# moe
moe_config:
expert_num: 1
capacity_factor: 1.05
aux_loss_factor: 0.05
num_experts_chosen: 1

# recompute
recompute_config:
recompute: False
parallel_optimizer_comm_recompute: False
mp_comm_recompute: True
recompute_slice_activation: False

# autotune
auto_tune: False
filepath_prefix: './autotune'
autotune_per_step: 10

# profile
profile: False
profile_start_step: 1
profile_stop_step: 10
init_start_profile: True
profile_communication: True
profile_memory: True

# callbacks
callbacks:
- type: MFLossMonitor
- type: SummaryMonitor
keep_default_action: True
- type: CheckpointMointor
prefix: "cpm-2b"
save_checkpoint_steps: 500
keep_checkpoint_max: 2
integrated_save: False
async_save: False
- type: ObsMonitor
keep_last: False
eval_callbacks:
- type: ObsMonitor
keep_last: False
203 changes: 203 additions & 0 deletions configs/cpm/run_cpm_2b_finetune.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
seed: 0
run_mode: 'train'
output_dir: './output' # 当前不支持自定义修改,请勿修改该默认值
load_checkpoint: '/home/m30024275/cpm_model_2b.ckpt'
auto_trans_ckpt: False # If true, auto transform load_checkpoint to load in distributed model
only_save_strategy: False
resume_training: False

# ==== context config ====
context:
mode: 0 #0--Graph Mode; 1--Pynative Mode
device_target: "Ascend"
enable_graph_kernel: False
graph_kernel_flags: "--disable_expand_ops=Softmax,Dropout --enable_parallel_fusion=true --reduce_fuse_depth=8 --enable_auto_tensor_inplace=true"
max_call_depth: 10000
max_device_memory: "30GB"
save_graphs: False
device_id: 0

# aicc
remote_save_url: "Please input obs url on AICC platform."

# ==== model config ====
model:
model_config:
type: CPMBeeConfig
vocab_size: 86592
dim_model: 4096
dim_ff: 5120
num_layers: 48
num_heads: 32
dim_head: 64
dropout_p: 0.0
position_bias_num_buckets: 256
position_bias_num_segment_buckets: 256
position_bias_max_distance: 2048
eps: 1.e-6
half: False
mask_modules: [[False, False], [True, False], [False, False], [True, False], [True, True], [True, False],
[True, True], [True, True], [False, False], [False, False], [True, True], [True, False],
[True, False], [True, True], [False, False], [True, True], [False, False], [False, True],
[True, False], [True, True], [False, False], [False, True], [True, True], [True, True],
[False, False], [True, True], [False, False], [True, True], [True, True], [False, False],
[True, True], [False, False], [True, True], [False, False], [True, True], [True, False],
[True, True], [True, True], [True, True], [False, False], [True, True], [False, False],
[True, True], [True, True], [False, False], [True, True], [False, False], [False, False]]
arch:
type: CPMForPreTraining

trainer:
type: CausalLanguageModelingTrainer
model_name: 'cpm_2b'
# if True, do evaluate during the training process. if false, do nothing.
# note that the task trainer should support _evaluate_in_training function.
do_eval: False
eval_step_interval: -1 # num of step intervals between each eval, -1 means no step end eval.
eval_epoch_interval: 1 # num of epoch intervals between each eval, 1 means eval on every epoch end.

metric:
type: ADGENMetric
tokenizer_type: "glm_6b" # use ChatGLMTokenizer

processor:
return_tensors: ms
tokenizer:
type: CPMBeeTokenizer
type: CPMProcessor

# ==== dataset config ====
train_dataset: &train_dataset
data_loader:
type: MindDataset
dataset_dir: "/home/m30024275/cpm_mindrecord"
shuffle: True
input_columns: [ "inputs", "inputs_sub", "length", "context", "sample_ids", "num_segments", "segment",
"segment_rel_offset", "segment_rel", "spans", "ext_table_ids", "ext_table_sub", "label" ]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: True
batch_size: 1
repeat: 1
numa_enable: False
prefetch_size: 1
seed: 0

train_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *train_dataset

eval_dataset: &eval_dataset
data_loader:
type: MindDataset
dataset_dir: ""
shuffle: True
input_columns: [ "inputs", "inputs_sub", "length", "context", "sample_ids", "num_segments", "segment_ids",
"segment_rel_offset", "segment_rel", "spans", "ext_ids", "ext_sub", "target" ]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: True
batch_size: 1
repeat: 1
numa_enable: False
prefetch_size: 1
seed: 0

eval_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *eval_dataset

# ==== runner config ====
runner_config:
epochs: 1
batch_size: 1
sink_mode: False
sink_size: -1

runner_wrapper:
type: ScaleTrainOneStepCell
scale_sense:
type: DynamicLossScaleUpdateCell
loss_scale_value: 32768
scale_factor: 2
scale_window: 1000
use_clip_grad: True

# lr sechdule
lr_schedule:
type: noam
learning_rate: 1.e-4
warmup_iter: 1
end_iter: 2000

# optimizer
optimizer:
type: AdamWeightDecayWithScale
weight_decay: 0.01
param_group: False

# parallel config
use_parallel: False
parallel:
parallel_mode: 2 # 0-dataset, 1-semi, 2-auto, 3-hybrid
gradients_mean: False
loss_repeated_mean: True
enable_alltoall: False
full_batch: True
search_mode: "sharding_propagation"
enable_parallel_optimizer: True # optimizer shard
strategy_ckpt_save_file: "./ckpt_strategy.ckpt"
parallel_config:
data_parallel: 4
model_parallel: 1
pipeline_stage: 1
expert_parallel: 1
optimizer_shard: True # optimizer shard
micro_batch_num: 1
vocab_emb_dp: True
gradient_aggregation_group: 4
micro_batch_interleave_num: 1

# moe
moe_config:
expert_num: 1
capacity_factor: 1.05
aux_loss_factor: 0.05
num_experts_chosen: 1

# recompute
recompute_config:
recompute: False
parallel_optimizer_comm_recompute: False
mp_comm_recompute: True
recompute_slice_activation: False

# autotune
auto_tune: False
filepath_prefix: './autotune'
autotune_per_step: 10

# profile
profile: False
profile_start_step: 1
profile_stop_step: 10
init_start_profile: True
profile_communication: True
profile_memory: True

# callbacks
callbacks:
- type: MFLossMonitor
- type: SummaryMonitor
keep_default_action: True
- type: CheckpointMointor
prefix: "cpm-2b"
save_checkpoint_steps: 500
keep_checkpoint_max: 2
integrated_save: False
async_save: False
- type: ObsMonitor
keep_last: False
eval_callbacks:
- type: ObsMonitor
keep_last: False
Loading

0 comments on commit 65c52d0

Please sign in to comment.