From 3a3fe15c009a281281bb6802aa36dfaf1974515d Mon Sep 17 00:00:00 2001 From: RCW2000 <70165226+RCW2000@users.noreply.github.com> Date: Fri, 1 Mar 2024 23:54:31 -0500 Subject: [PATCH] Update client.py --- ollama/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ollama/client.py b/ollama/client.py index 9f4e3363..ea082e98 100644 --- a/ollama/client.py +++ b/ollama/client.py @@ -2,14 +2,14 @@ import json import requests -BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434') +#BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434') # Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses. # The final response object will include statistics and additional data from the request. Use the callback function to override # the default handler. def generate(model_name, prompt, system=None, template=None, context=None, options=None, callback=None): try: - url = f"{BASE_URL}/api/generate" + url = "http://localhost:11434/api/generate" payload = { "model": model_name, "prompt": prompt,