-
Notifications
You must be signed in to change notification settings - Fork 11
/
run_palm_topic_title_generation.sh
executable file
·64 lines (56 loc) · 2.18 KB
/
run_palm_topic_title_generation.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#!/usr/bin/env bash
source ~/.bashrc
conda activate modelscope
export CUDA_VISIBLE_DEVICES=0
NUM_GPU=1
#for seed in 42 88 100 199 666
for seed in 42
do
# Randomly set a port number
# If you encounter "address already used" error, just run again or manually set an available port id.
PORT_ID=$(expr $RANDOM + 1000)
TASK=topic_title_generation
OUTPUT_DIR=./output/$TASK/palm-topicTitleGeneration-seed${seed}
mkdir -p $OUTPUT_DIR
LOGFILE=$OUTPUT_DIR/`date +%Y%m%d%H`.log
echo "write logs to ${LOGFILE}"
# Allow multiple threads
export OMP_NUM_THREADS=8
MODEL_NAME_OR_PATH=damo/nlp_palm2.0_pretrained_chinese-base
echo $CUDA_VISIBLE_DEVICES >> ${LOGFILE}
#sleep 0.5h
# Use distributed data parallel
# If you only want to use one card, uncomment the following line and comment the line with "torch.distributed.launch"
# python -m torch.distributed.launch --nproc_per_node $NUM_GPU --master_port $PORT_ID ./src/topic_title_generation/palm_subtitle_generation.py \
python ./src/topic_title_generation/palm_subtitle_generation.py \
--model_name_or_path $MODEL_NAME_OR_PATH \
--dataset_name ./datasets/AMC \
--dataset_config_name $TASK \
--metric_name ./metrics/topic_seg_eval \
--task_name topic_segment \
--save_total_limit 2 \
--do_train True \
--seed ${seed} \
--do_eval True \
--do_predict \
--evaluation_strategy steps \
--logging_steps 100 \
--eval_steps 100 \
--save_steps 100 \
--load_best_model_at_end \
--metric_for_best_model overall_f1 \
--per_device_train_batch_size 8 \
--gradient_accumulation_steps 1 \
--per_device_eval_batch_size 8 \
--overwrite_output_dir \
--learning_rate 5e-4 \
--num_train_epochs 2 \
--max_seq_length 4096 \
--max_eval_samples 4000 \
--max_predict_samples 4000 \
--preprocessing_num_workers 5 \
--return_entity_level_metrics True \
--output_dir $OUTPUT_DIR >> ${LOGFILE} 2>&1
python ./src/topic_title_generation/submit_file_generation.py ${OUTPUT_DIR}/test_predict_result.txt ${OUTPUT_DIR}/ttg_submit.json
done
# --per_device_train_batch_size 16 \