-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinferral_engine.py
41 lines (28 loc) · 972 Bytes
/
inferral_engine.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
"""
Accepts a string (corresponding to a fragment) and returns
a vector inferred based on an instance of a doc2vec model.
"""
#Import required packages and modules
from flask import Flask, json, request
import os
#import configparser
import numpy as np
import gensim
import import_ipynb
import filepaths as fp
import tokenizer
import corpus_reinferral
model=[] # this is a placeholder for the global 'model' variable
app = Flask(__name__)
@app.before_first_request
def load_model():
global model
model = gensim.utils.SaveLoad.load(fp.model_output_file_path)
print('loaded model')
@app.route('/infer_vector',methods=['POST'])
def infer_vector():
global model
query_fragment = request.json['text']
processed_query_fragment = tokenizer.preprocess_fragment(query_fragment).split(' ')
fragment_vector = corpus_reinferral.reinfer_fragment_vector(processed_query_fragment,model).tolist()
return json.dumps({"vector":fragment_vector})