Skip to content

Commit

Permalink
update prepare libritts script
Browse files Browse the repository at this point in the history
  • Loading branch information
ZhikangNiu committed Nov 22, 2024
1 parent 837c158 commit 58f7835
Show file tree
Hide file tree
Showing 2 changed files with 95 additions and 0 deletions.
3 changes: 3 additions & 0 deletions src/f5_tts/train/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ python src/f5_tts/train/datasets/prepare_emilia.py

# Prepare the Wenetspeech4TTS dataset
python src/f5_tts/train/datasets/prepare_wenetspeech4tts.py

# Prepare the LibriTTS dataset
python src/f5_tts/train/datasets/prepare_libritts.py
```

### 2. Create custom dataset with metadata.csv
Expand Down
92 changes: 92 additions & 0 deletions src/f5_tts/train/datasets/prepare_libritts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import os
import sys

sys.path.append(os.getcwd())

import json
from concurrent.futures import ProcessPoolExecutor
from importlib.resources import files
from pathlib import Path
from tqdm import tqdm
import soundfile as sf
from datasets.arrow_writer import ArrowWriter


def deal_with_audio_dir(audio_dir):
sub_result, durations = [], []
vocab_set = set()
audio_lists = list(audio_dir.rglob("*.wav"))

for line in audio_lists:
text_path = line.with_suffix(".normalized.txt")
text = open(text_path, "r").read().strip()
duration = sf.info(line).duration
if duration < 0.4 or duration > 30:
continue
sub_result.append({"audio_path": str(line), "text": text, "duration": duration})
durations.append(duration)
vocab_set.update(list(text))
return sub_result, durations, vocab_set


def main():
result = []
duration_list = []
text_vocab_set = set()

# process raw data
executor = ProcessPoolExecutor(max_workers=max_workers)
futures = []

for subset in tqdm(SUB_SET):
dataset_path = Path(os.path.join(dataset_dir, subset))
[
futures.append(executor.submit(deal_with_audio_dir, audio_dir))
for audio_dir in dataset_path.iterdir()
if audio_dir.is_dir()
]
for future in tqdm(futures, total=len(futures)):
sub_result, durations, vocab_set = future.result()
result.extend(sub_result)
duration_list.extend(durations)
text_vocab_set.update(vocab_set)
executor.shutdown()

# save preprocessed dataset to disk
if not os.path.exists(f"{save_dir}"):
os.makedirs(f"{save_dir}")
print(f"\nSaving to {save_dir} ...")

with ArrowWriter(path=f"{save_dir}/raw.arrow") as writer:
for line in tqdm(result, desc="Writing to raw.arrow ..."):
writer.write(line)

# dup a json separately saving duration in case for DynamicBatchSampler ease
with open(f"{save_dir}/duration.json", "w", encoding="utf-8") as f:
json.dump({"duration": duration_list}, f, ensure_ascii=False)

# vocab map, i.e. tokenizer
with open(f"{save_dir}/vocab.txt", "w") as f:
for vocab in sorted(text_vocab_set):
f.write(vocab + "\n")

print(f"\nFor {dataset_name}, sample count: {len(result)}")
print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}")
print(f"For {dataset_name}, total {sum(duration_list)/3600:.2f} hours")


if __name__ == "__main__":
max_workers = 36

tokenizer = "char" # "pinyin" | "char"

SUB_SET = ["train-clean-100", "train-clean-360", "train-other-500"]
dataset_dir = "<SOME_PATH>/LibriTTS"
dataset_name = f"LibriTTS_{'_'.join(SUB_SET)}_{tokenizer}".replace("train-clean-", "").replace("train-other-", "")
save_dir = str(files("f5_tts").joinpath("../../")) + f"/data/{dataset_name}"
print(f"\nPrepare for {dataset_name}, will save to {save_dir}\n")
main()

# For LibriTTS_100_360_500_char, sample count: 354218
# For LibriTTS_100_360_500_char, vocab size is: 78
# For LibriTTS_100_360_500_char, total 554.09 hours

0 comments on commit 58f7835

Please sign in to comment.