-
Notifications
You must be signed in to change notification settings - Fork 1
/
mining-carigold.py
105 lines (82 loc) · 2.63 KB
/
mining-carigold.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
from glob import glob
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor, as_completed
from scipy.spatial import KDTree
import orjson as json
import random
import numpy as np
import pickle
import os
import mp
class Pointer:
def __init__(self, filename):
self.filename = filename
self.index = -1
def _save(self):
with open(self.filename, 'wb') as fopen:
pickle.dump(self.index, fopen)
def increment(self):
self.index += 1
self._save()
def load(self):
if not os.path.exists(self.filename):
return
with open(self.filename, 'rb') as fopen:
self.index = pickle.load(fopen)
def dedup(strings):
unique_neg = []
elements = set()
for n in strings:
x_lower = n.lower()
if x_lower not in elements:
elements.add(x_lower)
unique_neg.append(n)
return unique_neg
data = []
for f in glob('carigold-*.jsonl'):
with open(f) as fopen:
for x in tqdm(fopen):
try:
data.append(json.loads(x))
except:
pass
vectors, texts = [], []
for d in data:
vectors.append(d['v'])
texts.append(d['text'])
concat = np.array(vectors)
kd_tree = KDTree(concat, leafsize = 40)
os.system('mkdir carigold-hard')
lower_bound = 0.6
upper_bound = 1.2
def loop(data):
data, index = data
filename = f'carigold-hard/{index}.jsonl'
fopen = open(filename, 'a')
pointer = Pointer(f'{filename}.pickle')
pointer.load()
for n in tqdm(range(len(data))):
x = data[n]
if n > pointer.index:
dist, ind = kd_tree.query(concat[x], k=len(concat))
query = texts[x]
pos_indices = [k for k in ind[dist < lower_bound]]
neg_indices = [k for k in ind[dist > upper_bound]]
if len(pos_indices) > 6:
pos_indices = random.sample(pos_indices,6)
if len(neg_indices) > 5:
neg_indices = random.sample(neg_indices,5)
pos = [texts[i] for i in pos_indices if texts[i] != query and len(texts[i]) > 1]
pos = dedup(pos)
if len(pos) == 0:
continue
neg = [texts[i] for i in neg_indices if texts[i] != query and len(texts[i]) > 1]
neg = dedup(neg)
if len(neg) == 0:
continue
d = {'query':query,'pos':pos,'neg':neg}
fopen.write(f'{json.dumps(d).decode()}\n')
fopen.flush()
pointer.index = n
pointer._save()
mp.multiprocessing(range(len(data)), loop, cores = 30, returned = False)