Skip to content
This repository has been archived by the owner on Oct 13, 2022. It is now read-only.

WIP: huggingface tokenizer and Neural LM training pipeline. #139

Open
wants to merge 28 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
f038e60
hugginface tokenizer and Neural LM training pipeline.
glynpu Mar 25, 2021
e9482d2
draft of class LMDataset
glynpu Mar 29, 2021
135bfdb
a dummy implementation of LMDataset
glynpu Mar 29, 2021
88e0d49
collate function of NNLM
glynpu Mar 30, 2021
27b1863
add scripts to process word piece lexicons.
csukuangfj Mar 30, 2021
212b79b
Merge pull request #2 from csukuangfj/fangjun-rnnlm
glynpu Mar 30, 2021
47bf358
trainer
glynpu Mar 30, 2021
d8aaabd
generate lexicon
glynpu Mar 30, 2021
c44f99d
check text length in dataset.py
glynpu Mar 30, 2021
b13954d
remove shuf/comm commands
glynpu Mar 30, 2021
775d477
beta version of training pipeline
glynpu Mar 30, 2021
3b83338
Merge pull request #1 from glynpu/lyg_dev
glynpu Mar 30, 2021
d415ed0
remove unused file
glynpu Mar 30, 2021
4937232
add dependency and fix known bugs
glynpu Apr 1, 2021
61863db
fix various bugs
glynpu Apr 2, 2021
d4dccae
compute word_ppl from token_ppl
glynpu Apr 2, 2021
a4d5f1b
add results.md
glynpu Apr 3, 2021
53e2d1e
compute word_ppl from token_ppl
glynpu Apr 3, 2021
b226a3a
support yaml configuration
glynpu Apr 9, 2021
89ece61
update results with nvocab=5000
glynpu Apr 9, 2021
c3f8811
fix reviews
glynpu Apr 9, 2021
d1b803b
fixed reviews
glynpu Apr 9, 2021
c45d31f
support multi-gpu training with ddp
glynpu Apr 10, 2021
1d38c21
n-best rescoring result with 8-layer transformer lm
glynpu Apr 14, 2021
f6914cd
Merge remote-tracking branch 'dan/master' into nnlm
glynpu Apr 20, 2021
d847b28
filter train data by length to increase batch_size
glynpu Apr 20, 2021
52300df
use Noam optimizer
glynpu Apr 20, 2021
e61a9d1
add rescore scripts
glynpu Apr 20, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
[flake8]
show-source=true
statistics=true
max-line-length=80
exclude =
.git,

ignore =
# E127 continuation line over-indented for visual indent
E127,
# F401, import but not used
F401,
# W504, line break after binary operator
W504,
100 changes: 100 additions & 0 deletions egs/librispeech/asr/nnlm/local/dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
#!/usr/bin/env python3

# Copyright (c) 2020 Xiaomi Corporation (author: Liyong Guo)
# Apache 2.0

from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from typing import List
from util import convert_tokens_to_ids

import numpy as np
import os
import torch


class CollateFunc(object):
'''Collate function for LMDataset
'''

def __init__(self, pad_index=0):
# pad_index should be identical to ignore_index of torch.nn.NLLLoss
self.pad_index = pad_index

def __call__(self, batch: List[List[int]]):
'''batch contains token_id.
batch can be viewd as a ragged 2-d array, with a row represents a token_id.
token_id reprents a tokenized text, whose format is:
<bos_id> token_id token_id token_id *** <eos_id>
'''
data_pad = pad_sequence(
[torch.from_numpy(np.array(x)).long() for x in batch], True,
self.pad_index)
xs_pad = data_pad[:, :-1]
ys_pad = data_pad[:, 1:]
return xs_pad, ys_pad


class LMDataset(Dataset):

def __init__(self, text_file: str, lexicon):
'''Dataset to load Language Model train/dev text data

Args:
text_file: text file, text for one utt per line.
'''
self.lexicon = lexicon
assert os.path.exists(
text_file), "text_file: {} does not exist, please check that."
self.data = []
with open(text_file, 'r') as f:
# a line represent a piece of text, e.g.
# DELAWARE IS NOT AFRAID OF DOGS
for line in f:
# import pdb
# pdb.set_trace()
text = line.strip().lower().split()
# print(text)
if len(text) == 0:
continue
word_id = convert_tokens_to_ids(text, self.lexicon.word2id)
if len(word_id) == 0:
continue
word_id = torch.from_numpy(np.array(word_id, dtype="int32"))

token_id = self.lexicon.word_seq_to_word_piece_seq(word_id)
# token_id format:
# <bos_id> token_id token_id token_id *** <eos_id>
if len(token_id) >= 2:
self.data.append(token_id)

def __len__(self):
return len(self.data)

def __getitem__(self, idx):
return self.data[idx]

def text2id(self, text: List[str]) -> List[int]:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The following two methods can be removed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

# A dumpy implementation
return [i for i in range(len(text))]

def text_id2token_id(self, text_id: List[int]) -> List[int]:
# A dumpy implementation
return [i for i in range(len(text_id))]


if __name__ == '__main__':
# train_file = "./data/nnlm/text/librispeech.txt"
dev_file = "./data/nnlm/text/dev.txt"
dataset = LMDataset(dev_file)
collate_func = CollateFunc()
data_loader = DataLoader(dataset,
batch_size=2,
shuffle=True,
num_workers=0,
collate_fn=collate_func)
for i, batch in enumerate(data_loader):
xs, ys = batch
print(xs)
print(ys)
print(batch)
85 changes: 85 additions & 0 deletions egs/librispeech/asr/nnlm/local/generate_lexicon.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
#!/usr/bin/env python3

# Copyright (c) 2020 Xiaomi Corporation (author: Liyong Guo)
# Apache 2.0

import argparse
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers import decoders


def get_args():
parser = argparse.ArgumentParser(
description='generate words.txt tokens.txt and lexicon.txt')
parser.add_argument('--lexicon-path',
default='data/nnlm/lexicon',
type=str,
help="path to save lexicon files")
parser.add_argument('--tokenizer-path',
type=str,
default='./data/lm_train/tokenizer-librispeech.json',
help="path to load tokenizer")
parser.add_argument('--train-file',
default='data/nnlm/text/librispeech.txt',
type=str,
help="""file to be tokenized""")
args = parser.parse_args()
return args


def generate_tokens(args):
tokenizer = Tokenizer.from_file(args.tokenizer_path)
symbols = tokenizer.get_vocab()
tokens_file = '{}/tokens.txt'.format(args.lexicon_path)
tokens_f = open(tokens_file, 'w')
for idx, sym in enumerate(symbols):
tokens_f.write('{} {}\n'.format(sym.lower(), idx))

tokens_f.close()


def generate_lexicon(args, words):
special_words = [
'<eps>', '!SIL', '<SPOKEN_NOISE>', '<UNK>', '<s>', '</s>', '#0'
]
lexicon_file = '{}/lexicon.txt'.format(args.lexicon_path)
lf = open(lexicon_file, 'w')
tokenizer = Tokenizer.from_file(args.tokenizer_path)
tokenizer.decoder = decoders.WordPiece()
for word in words:
if word not in special_words:
output = tokenizer.encode(word)
tokens = ' '.join(output.tokens)
else:
tokens = '[unk]'
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a difference between [unk] and <UNK>?
I find that you're using <UNK> in the above special_words, but [unk] here.

BTW: what are special_words for?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

special tokens is a heritage of words.txt: simple_v1/data/lang_nosp/words.txt. whose head is:

<eps> 0
!SIL 1
<SPOKEN_NOISE> 2
<UNK> 3
A 4
...
#0 200004
<s> 200005
</s> 200006

I just want to make sure every word in words.txt could be tokenized. As thoses special workds not "real" words, I think map them to [unk] is better than tokenized by a trained tokenizer.

In short, [UNK] amother with other special words is a heritage from upstream asr pipeline. and [unk] is a token by huggingface tokenizer.

lf.write("{}\t{}\n".format(word.lower(), tokens.lower()))
lf.close()


def load_words(args):
words = []
tokens_file = '{}/words.txt'.format(args.lexicon_path)
# special_words = [
# '<eps>', '!SIL', '<SPOKEN_NOISE>', '<UNK>', '<s>', '</s>', '#0'
# ]
# special_words = []

with open(tokens_file) as f:
for line in f:
arr = line.strip().split()
# if arr[0] not in special_words:
words.append(arr[0])

return words


def main():
args = get_args()
generate_tokens(args)
words = load_words(args)
generate_lexicon(args, words)


if __name__ == '__main__':
main()
98 changes: 98 additions & 0 deletions egs/librispeech/asr/nnlm/local/huggingface_tokenizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
#!/usr/bin/env python3

# Copyright (c) 2020 Xiaomi Corporation (author: Liyong Guo)
# Apache 2.0

# reference: https://huggingface.co/docs/tokenizers/python/latest/quicktour.html
import argparse
import logging
import os
import shutil
from pathlib import Path
from tokenizers import Tokenizer
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you add some documentation describing how the environment is set up?
I assume that you have run pip install tokenizers beforehand.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No problem. A Readme.md will be added.

from tokenizers.models import WordPiece
from tokenizers import normalizers
from tokenizers.normalizers import Lowercase, NFD, StripAccents
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import WordPieceTrainer
from tokenizers import decoders


def get_args():
parser = argparse.ArgumentParser(
description='train and tokenize with huggingface tokenizer')
parser.add_argument('--train-file',
type=str,
help="""file to train tokenizer""")
parser.add_argument('--vocab-size',
type=int,
default=10000,
help="""number of tokens of the tokenizer""")
parser.add_argument('--tokenizer-path',
type=str,
help="path to save or load tokenizer")
parser.add_argument('--test-file',
type=str,
help="""file to be tokenized""")
args = parser.parse_args()
return args


def train_tokenizer(train_files, save_path, vocab_size):
if os.path.exists(save_path):
logging.warning(
"{} already exists. Please check that.".format(save_path))
return
else:
Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)

tokenizer = Tokenizer(WordPiece(unk_token='[UNK]'))
tokenizer.normalizer = normalizers.Sequence(
[NFD(), Lowercase(), StripAccents()])
tokenizer.pre_tokenizer = Whitespace()

# default vocab_size=30000
# here set vocab_size=1000 for accelerating
trainer = WordPieceTrainer(vocab_size=vocab_size, special_tokens=['[UNK]'])
tokenizer.train(train_files, trainer)
tokenizer.save(save_path)


def tokenize_text(test_file, tokenizer_path):
if not os.path.exists(tokenizer_path):
logging.warning(
"Tokenizer {} does not exist. Please check that.".format(
tokenizer_path))
return
tokenizer = Tokenizer.from_file(tokenizer_path)
tokenizer.decoder = decoders.WordPiece()
tokenized_file = "{}.tokens".format(test_file)
# tokenized_ids = "{}.ids".format(test_file)
if os.path.exists(tokenized_file):
logging.warning(
"The input file seems already tokenized. Buckupping previous result"
)
shutil.copyfile(tokenized_file, "{}.bk".format(tokenized_file))
logging.warning("Tokenizing {}.".format(test_file))
fout = open(tokenized_file, 'w')
with open(test_file) as f:
for line in f:
line = line.strip()
output = tokenizer.encode(line)
fout.write(" ".join(output.tokens) + '\n')

fout.close()


def main():
args = get_args()
if args.train_file is not None:
train_files = [args.train_file]
train_tokenizer(train_files, args.tokenizer_path, args.vocab_size)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

methods like these (train_tokenizer, tokenize_text) would be good candidates to put into the "library" part of snowfall so anybody can import them easily for all the recipes.

Candidate for future work in snowfall: actually this whole script could be easily re-used across recipes had we added a mechanism for auto-registering scripts in PATH (can be done via setup.py)


if args.test_file is not None:
tokenize_text(args.test_file, args.tokenizer_path)


if __name__ == '__main__':
main()
Loading