-
Notifications
You must be signed in to change notification settings - Fork 1
/
app.py
312 lines (249 loc) · 11.6 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
from flask import Flask, render_template, request, jsonify, url_for
import PyPDF2
import speech_recognition as sr
import os
from gtts import gTTS
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
import os
from spacy.matcher import PhraseMatcher
from PyPDF2 import PdfWriter, PdfFileReader, PageObject
import spacy
import re
import fitz
from flask import redirect, url_for
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from flask import jsonify
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string
from sentence_transformers import SentenceTransformer, util
import google.generativeai as genai
import markdown
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/interview', methods=['GET','POST'])
def speech_to_text():
if request.method=='POST':
recognizer = sr.Recognizer()
with sr.Microphone() as source:
recognizer.adjust_for_ambient_noise(source, duration=0.2)
print("Speak something...")
audio = recognizer.listen(source, timeout=10)
try:
text = recognizer.recognize_google(audio)
print(text)
clean_text = sanitize(text)
gTTS(text=clean_text, lang='en', slow=False).save("static/Answer1.mp3")
audio_url = url_for('static', filename='Answer1.mp3')
save_text_to_file(clean_text,"static/answer.txt")
return clean_text
except sr.UnknownValueError:
return "Could not understand audio"
except sr.RequestError as e:
return f"Error: {str(e)}"
return render_template('interview.html')
def save_text_to_file(text, filename):
with open(filename, 'w') as file:
file.write(text)
UPLOAD_FOLDER = 'uploads'
SANTIZED_FOLDER = 'sanitized'
ALLOWED_EXTENSIONS = {'pdf'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SANTIZED_FOLDER'] = SANTIZED_FOLDER
nlp = spacy.load('en_core_web_sm')
PHRASES = [
"male", "female", "non-binary", "transgender",
"United States", "USA", "America", "China", "India", # Add more countries as needed
"New York", "California", "Texas", "London", "Paris", "Mumbai" # Add more cities or regions as needed
"Mr.", "Ms.", "Mrs.", "Miss", "Dr.", # Honorific titles
"male", "female", "other", "prefer not to say", # Gender identities
"African", "Asian", "Caucasian", "Hispanic", "Latino", "Middle Eastern", "Native American", "Pacific Islander", # Ethnicities
"Christian", "Muslim", "Jewish", "Buddhist", "Hindu", "Atheist", "Agnostic", "other religion", # Religious affiliations
"married", "single", "divorced", "widowed", "separated", "partnered", # Marital status
"heterosexual", "homosexual", "bisexual", "pansexual", "asexual", # Sexual orientations
"USA", "UK", "United Kingdom", "Australia", "Canada", "Germany", "France", "Italy", "Spain", "Japan", "Brazil", # Additional countries
"NY", "LA", "SF", "UK", "JP", "CA", "TX", "FL", "WA", # Abbreviations for states, cities, and countries
"North", "South", "East", "West", "Northeast", "Northwest", "Southeast", "Southwest", # Directions
"the United States", "the USA", "the UK", "the United Kingdom", # Country prefixes
"Europe", "Asia", "Africa", "North America", "South America", "Australia", "Antarctica", # Continents
"Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua and Barbuda", "Argentina", "Armenia", "Australia", "Austria", # Countries
"Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bhutan",
"Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil", "Brunei", "Bulgaria", "Burkina Faso", "Burundi", "Cabo Verde",
"Cambodia", "Cameroon", "Canada", "Central African Republic", "Chad", "Chile", "China", "Colombia", "Comoros", "Congo, Democratic Republic of the",
"Congo, Republic of the", "Costa Rica", "Cote d'Ivoire", "Croatia", "Cuba", "Cyprus", "Czech Republic", "Denmark", "Djibouti",
"Dominica", "Dominican Republic", "East Timor (Timor-Leste)", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia",
"Eswatini", "Ethiopia", "Fiji", "Finland", "France", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Greece", "Grenada",
"Guatemala", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq",
"Ireland", "Israel", "Italy", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Korea, North", "Korea, South",
"Kosovo", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Liechtenstein", "Lithuania",
]
matcher = PhraseMatcher(nlp.vocab)
matcher.add("PHRASES", None, *[nlp(text) for text in PHRASES])
PROPER_NOUNS = ["PROPN"]
def sanitize_text(text):
doc = nlp(text)
spans = []
for match_id, start, end in matcher(doc):
span = doc[start:end]
spans.append(span)
for span in spans:
text = text.replace(span.text, "REDACTED")
return text
def remove_propn(text):
# Remove email addresses
text = re.sub(r'\S+@\S+', 'REDACTED_EMAIL', text)
# Remove phone numbers (supports common formats)
text = re.sub(r'(\+?(\d{1,2})?[ -]?\(?\d{3}\)?[ -]?\d{3}[ -]?\d{4}\b)', 'REDACTED_PHONE', text)
# Remove URLs
text = re.sub(r'https?://(?:www\.)?linkedin\.com/(?:in|pub|profile)/\S+', 'REDACTED_URL', text)
# remove .com
text = re.sub(r'\b\S*\.com\S*\b', 'REDACTED', text)
doc = nlp(text)
sanitized_text = []
for token in doc:
if token.ent_type_ == 'PERSON':
sanitized_text.append("REDACTED_NAME")
else:
sanitized_text.append(token.text)
return ' '.join(sanitized_text)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
# Handle login form submission
# Validate credentials (you will implement this)
# Redirect to appropriate page
return render_template('index.html')
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
# Handle register form submission
# Register user (you will implement this)
# Redirect to appropriate page
return render_template('index.html')
return render_template('register.html')
@app.route('/form')
def upload_form():
return render_template('apply.html')
@app.route('/job_listing.html')
def job_listing():
return render_template('job-list.html')
@app.route('/job_details.html')
def job_details():
return render_template('job-detail.html')
@app.route('/result')
def result():
interview_relevance = compare_texts('static/job.txt', 'static/answer.txt')
# score = round(score, 2)
return render_template('Result.html', interview_relevance=interview_relevance)
@app.route('/display_text')
def display_text():
sanitized_file_path = os.path.join(app.config['SANTIZED_FOLDER'], 'sanitized_resume.txt')
with open(sanitized_file_path, 'r', encoding='utf-8') as f:
sanitized_resume_content = f.read()
#print(sanitized_resume_content)
return render_template('display.html', resume_content=sanitized_resume_content)
def sanitize(text):
sanitized_text = sanitize_text(text)
clean_txt = remove_propn(sanitized_text)
return clean_txt
@app.route('/upload', methods=['POST'])
def upload_file():
if 'file' not in request.files:
return 'No file part'
file = request.files['file']
if file.filename == '':
return 'No selected file'
if file and file.filename.endswith('.pdf'):
filename = secure_filename(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
pdf_text = extract_text_from_pdf(file_path)
clean_txt = sanitize(pdf_text)
sanitized_file_path = os.path.join(app.config['SANTIZED_FOLDER'], 'sanitized_' + 'resume.txt')
with open(sanitized_file_path, 'w', encoding='utf-8') as f:
f.write(clean_txt)
#txt_to_pdf('sanitized/sanitized_resume.txt', 'sanitized/sanitized_resume.pdf')
return redirect(url_for('display_text'))
else:
return 'Invalid file type'
def extract_text_from_pdf(pdf_path):
text = ''
with fitz.open(pdf_path) as doc:
for page in doc:
text += page.get_text()
return text
def txt_to_pdf(input_txt, output_pdf):
c = canvas.Canvas(output_pdf, pagesize=letter)
with open(input_txt, 'r') as f:
text = f.read()
c.setFont("Helvetica", 12)
lines = text.split('\t')
for line in lines:
c.drawString(50, 750, line)
c.showPage()
c.save()
def display_pdf_text(pdf_text):
return render_template('display.html', pdf_text=pdf_text)
# def compare_texts(job_description_file, answer_1_file):
# def read_text_from_file(file_path):
# with open(file_path, 'r', encoding='utf-8') as file:
# return file.read().strip()
# model = SentenceTransformer('all-MiniLM-L6-v2')
# job_description = read_text_from_file(job_description_file)
# answer_1 = read_text_from_file(answer_1_file)
# # Encode the texts to get their embeddings
# job_description_embedding = model.encode(job_description)
# answer_1_embedding = model.encode(answer_1)
# similarity_1 = util.cos_sim(job_description_embedding, answer_1_embedding)
# print(similarity_1)
# return similarity_1.item()
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
def compare_texts(job_description_file, answer_1_file):
def read_text_from_file(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
return file.read().strip()
# Read job description and answer from files
job_description = read_text_from_file(job_description_file)
answer_1 = read_text_from_file(answer_1_file)
# Create prompt for relevance evaluation
prompt = f"""
Job Description: {job_description}
Interview Question: Tell us about a time when you had to handle a difficult client.
Candidate's Answer: {answer_1}
Instructions:
1. Evaluate how well the candidate's answer aligns with the job description.
2. Assess the relevance of the candidate's answer to the interview question.
3. Consider the following criteria:
- How effectively did the candidate handle the difficult client?
- Does the answer demonstrate skills relevant to a sales role, such as problem-solving, empathy, effective communication, and the ability to maintain a positive relationship?
- How well does the candidate’s answer reflect the specific needs of the sales position described in the job description?
4. Provide a relevance score from 1 to 10, where 10 means the answer is highly relevant and well-suited for the role.
5. Provide a brief explanation of why you gave this score, touching on specific strengths or areas for improvement in the answer.
"""
# Create the model
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
}
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
generation_config=generation_config,
)
# Start a chat session and send the prompt
chat_session = model.start_chat(history=[])
response = chat_session.send_message(prompt)
relevance_response = response.text
html_response = markdown.markdown(relevance_response)
return html_response
if __name__ == '__main__':
app.run(debug=True)