-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathword2vec.py
67 lines (55 loc) · 1.94 KB
/
word2vec.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import argparse
import pandas as pd
from gensim.models import Word2Vec
from tqdm import tqdm
from config import TQDM, EMBEDDING_SIZE, Yelp, Yahoo, Amazon, Synthetic
from utils import sent_word_tokenize
class SentenceIterator:
def __init__(self, dataset):
self.dataset = dataset
def __iter__(self):
for _, doc in tqdm(
self.dataset.itertuples(index=False),
total=len(self.dataset),
disable=(not TQDM),
):
tokenized_doc = sent_word_tokenize(doc)
for sentence in tokenized_doc:
yield sentence
def train_word2vec_model(dataset, dim_embedding, min_count=5):
model = Word2Vec(min_count=min_count, size=dim_embedding)
model.build_vocab([["PAD"] * min_count]) # add PAD word
model.build_vocab([["UNK"] * min_count], update=True) # add OOV word
sentence_iter = SentenceIterator(dataset)
model.build_vocab(sentence_iter, update=True)
model.train(
sentence_iter, total_examples=model.corpus_count, epochs=model.epochs,
)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a word2vec model")
parser.add_argument(
"dataset",
choices=["yelp", "yahoo", "amazon", "synthetic"],
help="Choose the dataset",
)
args = parser.parse_args()
if args.dataset == "yelp":
dataset_config = Yelp
elif args.dataset == "yahoo":
dataset_config = Yahoo
elif args.dataset == "amazon":
dataset_config = Amazon
elif args.dataset == "synthetic":
dataset_config = Synthetic
else:
# should not end there
exit()
dataset = pd.concat(
[
pd.read_csv(dataset_config.TRAIN_DATASET).fillna(""),
pd.read_csv(dataset_config.VAL_DATASET).fillna(""),
]
).sample(frac=1)
model = train_word2vec_model(dataset, EMBEDDING_SIZE)
model.wv.save(dataset_config.EMBEDDING_FILE)