Skip to content

Commit

Permalink
llama factory llama2-13b and mistral-7b pipeline (#93)
Browse files Browse the repository at this point in the history
* added llama-factory under llm_rl

* added sft training bash

* added datasets from llama-factory; will delete later

* finished llama-2-13b train and inference

* fixed minor errors

* changed config

* added deepspeed config

* added more training config to train bash

* adding fix for wandb tags and distributed ranks

* added fastchat data to replicate training for 2k

* tyring to replicate fastchat as close as possible

* before merging

* changed finetue scripts for better performance

* added new data

* example bash

* example bash for mistral
  • Loading branch information
Jasonqi146 authored and ruiyiw committed Nov 14, 2023
1 parent eb68a69 commit 07ee480
Show file tree
Hide file tree
Showing 11 changed files with 52 additions and 27 deletions.

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions llm_rl/data/dataset_info.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
{
"sotopia_no_slide_no_filter_format_sft": {
"file_name": "fastchat-ft-gp4-gpt4-easy-truncated.json",
"file_sha1": ""
},
"fastchat-sft": {
"file_name": "fastchat-ft-gp4-gpt4-easy-truncated.json",
"file_sha1": ""
Expand Down
27 changes: 14 additions & 13 deletions llm_rl/finetune-llama-2-13b.sh
Original file line number Diff line number Diff line change
@@ -1,46 +1,47 @@
deepspeed src/train_bash.py \
--stage sft \
--model_name_or_path meta-llama/Llama-2-13b-hf \
--dataset sotopia_easy_sft \
--dataset fastchat-sft \
--dataset_dir ./data/ \
--val_size 0.1 \
--cutoff_len 4096 \
--template llama2-sotopia \
--wandb_project "llama-factory-sft" \
--wandb_tags "['llama-2-13b-hf']" \
--use_fast_tokenizer False \
--do_train \
--num_train_epochs 15.0 \
--per_device_train_batch_size 8 \
--gradient_accumulation_steps 8 \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 32 \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--lora_rank 8 \
--lora_alpha 16 \
--lora_dropout 0.05 \
--qlora_compute_dtype bf16 \
--learning_rate 5e-5 \
--lr_scheduler_type cosine \
--weight_decay 0. \
--warmup_ratio 0.03 \
--quantization_bit 4 \
--quantization_type nf4 \
--double_quantization \
--double_quantization True \
--flash_attn True \
--gradient_checkpointing True \
--bf16 \
--bf16 True \
--cache_dir ./model_cache \
--overwrite_cache \
--output_dir ./llama2-13b-sft_cache \
--overwrite_output_dir \
--logging_steps 1 \
--evaluation_strategy "steps" \
--per_device_eval_batch_size 32 \
--eval_accumulation_steps 32 \
--save_strategy "epoch" \
--save_total_limit 5 \
--use_auth_token True \
--wandb_token "99caa13ec9552adf0e92e5c30021307ce3cf7fa4" \
--hf_auth_token "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" \
--deepspeed ./deepspeed_config_s2.json

# --dataset alpaca_gpt4_en \
# --dataset alpaca_gpt4_en \
# --val_size 0.1 \
# --evaluation_strategy "steps" \
# --per_device_eval_batch_size 32 \
# --eval_accumulation_steps 32 \
# --lora_rank 8 \
# --lora_alpha 16 \
# --lora_dropout 0.05 \
19 changes: 9 additions & 10 deletions llm_rl/finetune-mistral-7b.sh
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@
deepspeed src/train_bash.py \
--stage sft \
--model_name_or_path mistralai/Mistral-7B-v0.1 \
--dataset dummy_convs \
--dataset sotopia_no_slide_no_filter_format_sft \
--dataset_dir ./data/ \
--val_size 0.1 \
--cutoff_len 4096 \
--template llama2-sotopia \
--wandb_project "llama-factory-sft" \
--wandb_tags "['mistral-7b']" \
--use_fast_tokenizer False \
--do_train \
--num_train_epochs 15.0 \
--per_device_train_batch_size 8 \
--gradient_accumulation_steps 8 \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 32 \
--finetuning_type lora \
--lora_target q_proj,v_proj \
--learning_rate 5e-5 \
Expand All @@ -21,19 +20,15 @@ deepspeed src/train_bash.py \
--warmup_ratio 0.03 \
--quantization_bit 4 \
--quantization_type nf4 \
--double_quantization \
--double_quantization True \
--flash_attn True \
--gradient_checkpointing True \
--bf16 \
--tf32 True \
--bf16 True \
--cache_dir ./model_cache \
--overwrite_cache \
--output_dir ./mistral-7b-sft_cache \
--overwrite_output_dir \
--logging_steps 1 \
--evaluation_strategy "steps" \
--per_device_eval_batch_size 32 \
--eval_accumulation_steps 32 \
--save_strategy "epoch" \
--save_total_limit 5 \
--use_auth_token True \
Expand All @@ -42,6 +37,10 @@ deepspeed src/train_bash.py \
--deepspeed ./deepspeed_config_s2.json

# --dataset alpaca_gpt4_en \
# --val_size 0.1 \
# --evaluation_strategy "steps" \
# --per_device_eval_batch_size 32 \
# --eval_accumulation_steps 32 \
# --lora_rank 8 \
# --lora_alpha 16 \
# --lora_dropout 0.05 \
4 changes: 2 additions & 2 deletions llm_rl/preprocess/create_sft_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,6 @@ def join_json_files(directory_path):
joined_data.append(new_data)
return joined_data

joined_data = join_json_files("./GPT4-4_Redis_Easy_No_Slide/")
with open("../data/GPT4-4_Redis_Easy_No_Slide.json", "w") as f:
joined_data = join_json_files("./GPT4-4_Redis_Easy_No_Slide_No_Filter_Format/")
with open("../data/GPT4-4_Redis_Easy_No_Slide_No_Filter_Format.json", "w") as f:
json.dump(joined_data, f)
3 changes: 2 additions & 1 deletion llm_rl/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
packaging
wheel
torch>=1.13.1
transformers>=4.31.0,<4.35.0
datasets>=2.12.0
Expand All @@ -17,7 +19,6 @@ uvicorn
pydantic
fastapi
sse-starlette
packaging
matplotlib
py-cpuinfo
deepspeed
Expand Down
4 changes: 4 additions & 0 deletions llm_rl/src/llmtuner/hparams/finetuning_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,10 @@ class FinetuningArguments:
Phi-1.5 choices: [\"Wqkv\", \"out_proj\", \"fc1\", \"fc2\"], \
LLaMA-2, InternLM, XVERSE choices: the same as LLaMA."}
)
lora_bias: Optional[str] = field(
default="none",
metadata={"help": "The lora_bias option from bitsandbytes."}
)
additional_target: Optional[str] = field(
default=None,
metadata={"help": "Name(s) of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint."}
Expand Down
13 changes: 12 additions & 1 deletion llm_rl/src/llmtuner/hparams/model_args.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from torch import float16, bfloat16, float32
from typing import Literal, Optional
from dataclasses import dataclass, field

Expand Down Expand Up @@ -74,9 +75,12 @@ class ModelArguments:
default=None,
metadata={"help": "Path to the directory to save the exported model."}
)
qlora_compute_dtype: Optional[str] = field(
default="fp32",
metadata={"help": "The compute_dtype option from bitsandbytes."}
)

def __post_init__(self):
self.compute_dtype = None
self.model_max_length = None

if self.split_special_tokens and self.use_fast_tokenizer:
Expand All @@ -91,3 +95,10 @@ def __post_init__(self):
if self.use_auth_token == True and self.hf_auth_token is not None:
from huggingface_hub.hf_api import HfFolder # lazy load
HfFolder.save_token(self.hf_auth_token)

if self.qlora_compute_dtype == "bf16":
self.compute_dtype = bfloat16
elif self.qlora_compute_dtype == "fp16":
self.compute_dtype = float16
else:
self.compute_dtype = float32
1 change: 1 addition & 0 deletions llm_rl/src/llmtuner/tuner/core/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def init_adapter(
lora_alpha=finetuning_args.lora_alpha,
lora_dropout=finetuning_args.lora_dropout,
target_modules=target_modules,
bias=finetuning_args.lora_bias,
modules_to_save=finetuning_args.additional_target
)
model = get_peft_model(model, lora_config)
Expand Down
1 change: 1 addition & 0 deletions llm_rl/src/llmtuner/tuner/core/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ def load_model_and_tokenizer(
padding_side="right", # training with left-padded tensors in fp16 precision may cause overflow
**config_kwargs
)
tokenizer.pad_token = tokenizer.unk_token

if finetuning_args.finetuning_type != "lora" and model_args.checkpoint_dir is not None:
model_to_load = model_args.checkpoint_dir[0]
Expand Down
2 changes: 2 additions & 0 deletions llm_rl/src/llmtuner/tuner/sft/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ def run_sft(

if training_args.gradient_checkpointing:
model.enable_input_require_grads()
model.gradient_checkpointing_enable()
model.config.use_cache = False

if training_args.predict_with_generate:
tokenizer.padding_side = "left" # use left-padding in generation
Expand Down

0 comments on commit 07ee480

Please sign in to comment.