-
Notifications
You must be signed in to change notification settings - Fork 66
/
Copy pathtools.py
147 lines (128 loc) · 4.47 KB
/
tools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
# define tools
import os
import asyncio
from dotenv import load_dotenv
from langchain.pydantic_v1 import Field
from langchain.tools import BaseTool, tool, StructuredTool
from data_loader import load_resume, write_cover_letter_to_doc
from schemas import JobSearchInput
from search import get_job_ids, fetch_all_jobs
from utils import FireCrawlClient, SerperClient
load_dotenv()
# Job search tools
def linkedin_job_search(
keywords: str,
location_name: str = None,
job_type: str = None,
limit: int = 5,
employment_type: str = None,
listed_at=None,
experience=None,
distance=None,
) -> dict: # type: ignore
"""
Search LinkedIn for job postings based on specified criteria. Returns detailed job listings.
"""
job_ids = get_job_ids(
keywords=keywords,
location_name=location_name,
employment_type=employment_type,
limit=limit,
job_type=job_type,
listed_at=listed_at,
experience=experience,
distance=distance,
)
job_desc = asyncio.run(fetch_all_jobs(job_ids))
return job_desc
def get_job_search_tool():
"""
Create a tool for the JobPipeline function.
Returns:
StructuredTool: A structured tool for the JobPipeline function.
"""
job_pipeline_tool = StructuredTool.from_function(
func=linkedin_job_search,
name="JobSearchTool",
description="Search LinkedIn for job postings based on specified criteria. Returns detailed job listings",
args_schema=JobSearchInput,
)
return job_pipeline_tool
# Resume Extraction Tool
class ResumeExtractorTool(BaseTool):
"""
Extract the content of a resume from a PDF file.
Returns:
dict: The extracted content of the resume.
"""
name: str = "ResumeExtractor"
description: str = "Extract the content of uploaded resume from a PDF file."
def extract_resume(self) -> str:
"""
Extract resume content from a PDF file.
Extract and structure job-relevant information from an uploaded CV.
Returns:
str: The content of the highlight skills, experience, and qualifications relevant to job applications, omitting personal information
"""
text = load_resume("temp/resume.pdf")
return text
def _run(self) -> dict:
return self.extract_resume()
# Cover Letter Generation Tool
@tool
def generate_letter_for_specific_job(resume_details: str, job_details: str) -> dict:
"""
Generate a tailored cover letter using the provided CV and job details. This function constructs the letter as plain text.
returns: A dictionary containing the job and resume details for generating the cover letter.
"""
return {"job_details": job_details, "resume_details": resume_details}
@tool
def save_cover_letter_for_specific_job(
cover_letter_content: str, company_name: str
) -> str:
"""
Returns a download link for the generated cover letter.
Params:
cover_letter_content: The combine information of resume and job details to tailor the cover letter.
"""
filename = f"temp/{company_name}_cover_letter.docx"
file = write_cover_letter_to_doc(cover_letter_content, filename)
abs_path = os.path.abspath(file)
return f"Here is the download link: {abs_path}"
# Web Search Tools
@tool("google_search")
def get_google_search_results(
query: str = Field(..., description="Search query for web")
) -> str:
"""
search the web for the given query and return the search results.
"""
response = SerperClient().search(query)
items = response.get("items")
string = []
for result in items:
try:
string.append(
"\n".join(
[
f"Title: {result['title']}",
f"Link: {result['link']}",
f"Snippet: {result['snippet']}",
"---",
]
)
)
except KeyError:
continue
content = "\n".join(string)
return content
@tool("scrape_website")
def scrape_website(url: str = Field(..., description="Url to be scraped")) -> str:
"""
Scrape the content of a website and return the text.
"""
try:
content = FireCrawlClient().scrape(url)
except Exception as exc:
return f"Failed to scrape {url}"
return content