-
Notifications
You must be signed in to change notification settings - Fork 17
/
prepro.py
110 lines (93 loc) · 3.43 KB
/
prepro.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 18 17:57:15 2018
@author: Peter
"""
import numpy as np
import re
import json
from collections import defaultdict
def get_max_length(filename):
max_question_len = 0
max_evidence_len = 0
with open(filename) as f:
for line in f:
data = json.loads(line)
que_len = len(data['question_tokens'])
evi_len = len(data['evidence_tokens'])
if que_len > max_question_len:
max_question_len = que_len
if evi_len > max_evidence_len:
max_evidence_len = evi_len
if max_evidence_len > max_question_len:
return max_evidence_len
else:
return max_question_len
def load_embedding(filename):
embeddings = []
word2idx = defaultdict(list)
print("Start loading word embedding")
with open(filename, mode='r', encoding='utf-8') as f:
for line in f:
arr = line.split(" ")
embedding = [float(val) for val in arr[1:len(arr)]]
word2idx[arr[0]] = len(word2idx)
embeddings.append(embedding)
embedding_size = len(arr) - 1
word2idx["UNKNOWN"] = len(word2idx)
embeddings.append([0] * embedding_size)
word2idx["NUM"] = len(word2idx)
embeddings.append([0] * embedding_size)
print("Word embedding loaded")
return embeddings, word2idx
def sentence2index(sentence, word2idx, max_len):
unknown = word2idx.get("UNKNOWN")
num = word2idx.get("NUM")
index = [unknown] * max_len
i = 0
for word in sentence:
if word in word2idx:
index[i] = word2idx[word]
else:
if re.match("\d+", word):
index[i] = num
else:
index[i] = unknown
if i >= max_len - 1:
break
i += 1
return index
def load_data(filename, word2idx, max_len):
questions, evidences, y1, y2 = [], [], [], []
print("Start loading data")
with open(filename, 'r') as f:
for line in f:
data = json.loads(line)
question = data['question_tokens']
questionIdx = sentence2index(question, word2idx, max_len)
evidence = data['evidence_tokens']
evidenceIdx = sentence2index(evidence, word2idx, max_len)
start_index = data['answer_start']
# end_index = data['answer_start'] + len(data['golden_answers']) - 1
end_index = data['answer_end']
as_temp = np.zeros(max_len)
ae_temp = np.zeros(max_len)
as_temp[start_index] = 1
ae_temp[end_index] = 1
questions.append(questionIdx)
evidences.append(evidenceIdx)
y1.append(as_temp)
y2.append(ae_temp)
print("Data loaded")
return np.array(questions), np.array(evidences), np.array(y1), np.array(y2)
def next_batch(questions, evidences, y1, y2, batch_size):
data_len = len(questions)
batch_num = int(np.ceil(data_len / batch_size))
for batch in range(batch_num):
result_questions, result_evidences, result_y1, result_y2 = [], [], [], []
for i in range(batch * batch_size, min((batch + 1) * batch_size, data_len)):
result_questions.append(questions[i])
result_evidences.append(evidences[i])
result_y1.append(y1[i])
result_y2.append(y2[i])
yield np.array(result_questions), np.array(result_evidences), np.array(result_y1), np.array(result_y2)