Skip to content

Commit

Permalink
Add training scripts for Baichuan & Clean BOS/EOS tokens during data …
Browse files Browse the repository at this point in the history
…cleaning (lm-sys#1940)
  • Loading branch information
Trangle authored Jul 16, 2023
1 parent 47a4647 commit 70e01d3
Show file tree
Hide file tree
Showing 3 changed files with 411 additions and 1 deletion.
82 changes: 82 additions & 0 deletions fastchat/data/optional_replace.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
"""
Do optional replace of bos/eos/pad/unk.
Usage:
python3 -m fastchat.data.optional_replace --in input.json --out output.json --model-name-or-path <your_token_path>
Requirement:
pip3 install transformers tqdm
"""
import argparse
import json
import traceback

import transformers
from tqdm import tqdm


def replace_special_tokens(
tokenizer: transformers.PreTrainedTokenizer, text: str
) -> str:
if not text:
return text

def _insert_vline(token: str) -> str:
if len(token) < 2:
return " "
elif len(token) == 2:
return f"{token[0]}|{token[1]}"
else:
return f"{token[:1]}|{token[1:-1]}|{token[-1:]}"

if tokenizer.bos_token:
text = text.replace(tokenizer.bos_token, _insert_vline(tokenizer.bos_token))
if tokenizer.eos_token:
text = text.replace(tokenizer.eos_token, _insert_vline(tokenizer.eos_token))
if tokenizer.pad_token:
text = text.replace(tokenizer.pad_token, _insert_vline(tokenizer.pad_token))
if tokenizer.unk_token:
text = text.replace(tokenizer.unk_token, _insert_vline(tokenizer.unk_token))
return text


def replace(conv, tokenizer):
# Replace bos/eos/pad/unk tokens
if tokenizer:
try:
for sentence in conv["conversations"]:
sentence["value"] = replace_special_tokens(tokenizer, sentence["value"])
except Exception as e:
traceback.print_exc()


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--in-file", type=str, required=True)
parser.add_argument("--out-file", type=str)
parser.add_argument(
"--model-name-or-path",
type=str,
help="The directory or address where the model token is stored.",
)
args = parser.parse_args()

in_file = args.in_file
out_file = args.out_file
tokenizer = None
if args.model_name_or_path:
tokenizer = transformers.AutoTokenizer.from_pretrained(
args.model_name_or_path,
trust_remote_code=True,
use_fast=False,
)

if out_file is None:
out_file = f"{in_file}_replace.json"

content = json.load(open(in_file, "r"))

for conv in tqdm(content):
replace(conv, tokenizer)

json.dump(content, open(out_file, "w"), indent=2, ensure_ascii=False)
3 changes: 2 additions & 1 deletion fastchat/data/prepare_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ def run_cmd(cmd):
cmd_list = [
f"python3 -m fastchat.data.clean_sharegpt --in {prefix}_html.json --out {prefix}_clean.json",
f"python3 -m fastchat.data.optional_clean --in {prefix}_clean.json --out {prefix}_clean_lang.json --skip-lang ko",
f"python3 -m fastchat.data.split_long_conversation --in {prefix}_clean_lang.json --out {prefix}_clean_lang_split.json --model-name {llama_weights}",
f"python3 -m fastchat.data.optional_replace --in {prefix}_clean_lang.json --out {prefix}_clean_lang_replace.json --model-name-or-path {llama_weights}",
f"python3 -m fastchat.data.split_long_conversation --in {prefix}_clean_lang_replace.json --out {prefix}_clean_lang_split.json --model-name {llama_weights}",
f"python3 -m fastchat.data.filter_wrong_format --in {prefix}_clean_lang_split.json --out {prefix}_clean_lang_split.json",
f"python3 -m fastchat.data.split_train_test --in {prefix}_clean_lang_split.json --ratio 0.99",
f"python3 -m fastchat.data.hardcoded_questions",
Expand Down
Loading

0 comments on commit 70e01d3

Please sign in to comment.