-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathOllama.py
144 lines (117 loc) · 4.62 KB
/
Ollama.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# Ollama.py
# This script interacts with the Ollama API to generate summaries from text content.
# It reads a text file, sends the content to Ollama for summarization, and saves the result.
# The script also includes error handling, retries, and status checking for the Ollama service.
# Required packages:
# pip install requests python-dotenv
# Note: If Ollama is not running, execute the following commands:
# net stop winnat
# net start winnat
import os
import requests
import time
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
def read_txt_file(file_path='pdf_to_text_temp.txt'):
"""
Read the content of a text file.
Args:
file_path (str): Path to the text file. Defaults to 'pdf_to_text_temp.txt'.
Returns:
str: Content of the text file.
"""
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
def get_ollama_response(prompt, model, temperature, max_tokens):
"""
Send a request to the Ollama API and get the response.
Args:
prompt (str): The input prompt for the model.
model (str): The name of the model to use.
temperature (float): The temperature setting for text generation.
max_tokens (int): The maximum number of tokens to generate.
Returns:
str: The generated response from Ollama, or None if the request fails.
"""
url = os.getenv('OLLAMA_API_URL', 'http://localhost:11434/api/generate')
data = {
"model": model,
"prompt": prompt,
"stream": False,
"options": {
"temperature": temperature,
"num_predict": max_tokens
}
}
max_retries = int(os.getenv('MAX_RETRIES', 3))
retry_delay = int(os.getenv('RETRY_DELAY', 2))
for attempt in range(max_retries):
try:
response = requests.post(url, json=data)
response.raise_for_status()
return response.json()['response']
except requests.exceptions.RequestException as e:
print(f"Request error (Attempt {attempt + 1}/{max_retries}): {e}")
if attempt < max_retries - 1:
time.sleep(retry_delay)
else:
print("Maximum retry attempts reached. Abandoning request.")
return None
def generate_summary(content, model, temperature, max_tokens, language):
"""
Generate a summary using the Ollama API.
Args:
content (str): The text content to summarize.
model (str): The name of the model to use.
temperature (float): The temperature setting for text generation.
max_tokens (int): The maximum number of tokens to generate.
language (str): The language for the summary.
Returns:
str: The generated summary.
"""
prompt_template = os.getenv('PROMPT_TEMPLATE')
if not prompt_template:
raise ValueError("PROMPT_TEMPLATE is not set in the .env file")
prompt = prompt_template.format(language=language, content=content)
return get_ollama_response(prompt, model, temperature, max_tokens)
def check_ollama_status():
"""
Check if the Ollama service is running.
Returns:
bool: True if Ollama is running, False otherwise.
"""
url = os.getenv('OLLAMA_API_URL', 'http://localhost:11434/api/generate')
try:
response = requests.post(url, json={"model": "dummy", "prompt": "test"}, timeout=5)
return response.status_code in [200, 404]
except requests.exceptions.RequestException:
return False
def main():
"""
Main function to orchestrate the summarization process.
"""
if not check_ollama_status():
print("Warning: Ollama doesn't seem to be running. Please ensure the Ollama service is started.")
return
model = os.getenv('DEFAULT_MODEL')
temperature = float(os.getenv('DEFAULT_TEMPERATURE'))
max_tokens = int(os.getenv('DEFAULT_MAX_TOKENS'))
language = os.getenv('DEFAULT_SUMMARY_LANGUAGE')
if not all([model, temperature, max_tokens, language]):
raise ValueError("Missing required environment variable settings. Please check the .env file.")
content = read_txt_file()
if not content:
print("No content found or file is empty")
return
print("Generating summary...")
summary = generate_summary(content, model, temperature, max_tokens, language)
if summary:
output_file = os.getenv('OUTPUT_FILE', 'output.txt')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(summary)
print(f"Summary has been saved to {output_file}")
else:
print("Failed to generate summary")
if __name__ == "__main__":
main()