diff --git a/sample.config.toml b/sample.config.toml
index f6c69436b..895edee16 100644
--- a/sample.config.toml
+++ b/sample.config.toml
@@ -9,4 +9,8 @@ ANTHROPIC = "" # Anthropic API key - sk-ant-1234567890abcdef1234567890abcdef
 
 [API_ENDPOINTS]
 SEARXNG = "http://localhost:32768" # SearxNG API URL
-OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434
\ No newline at end of file
+OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434
+
+[OLLAMA_PARAMS]
+TEMPERATURE = 0.7 # ollama default temp is 0.8
+NUM_CTX = 2_048 # ollama num_ctx default is 2048
\ No newline at end of file
diff --git a/src/config.ts b/src/config.ts
index bb6933595..c3f0dfbc7 100644
--- a/src/config.ts
+++ b/src/config.ts
@@ -18,6 +18,10 @@ interface Config {
     SEARXNG: string;
     OLLAMA: string;
   };
+  OLLAMA_PARAMS: {
+    TEMPERATURE: number;
+    NUM_CTX: number;
+  }
 }
 
 type RecursivePartial<T> = {
@@ -45,6 +49,10 @@ export const getSearxngApiEndpoint = () =>
 
 export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
 
+export const getModelTemperature = () => loadConfig().OLLAMA_PARAMS.TEMPERATURE;
+
+export const getModelNumCtx = () => loadConfig().OLLAMA_PARAMS.NUM_CTX;
+
 export const updateConfig = (config: RecursivePartial<Config>) => {
   const currentConfig = loadConfig();
 
diff --git a/src/lib/providers/ollama.ts b/src/lib/providers/ollama.ts
index ed68bfafa..b3753eb31 100644
--- a/src/lib/providers/ollama.ts
+++ b/src/lib/providers/ollama.ts
@@ -1,10 +1,11 @@
 import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
-import { getOllamaApiEndpoint } from '../../config';
+import { getModelNumCtx, getModelTemperature, getOllamaApiEndpoint } from '../../config';
 import logger from '../../utils/logger';
 import { ChatOllama } from '@langchain/community/chat_models/ollama';
 
 export const loadOllamaChatModels = async () => {
   const ollamaEndpoint = getOllamaApiEndpoint();
+  
 
   if (!ollamaEndpoint) return {};
 
@@ -16,20 +17,24 @@ export const loadOllamaChatModels = async () => {
     });
 
     const { models: ollamaModels } = (await response.json()) as any;
-
+    
     const chatModels = ollamaModels.reduce((acc, model) => {
+      const modelTemperature = getModelTemperature();
+      const modelNumCtx = getModelNumCtx();
       acc[model.model] = {
         displayName: model.name,
         model: new ChatOllama({
           baseUrl: ollamaEndpoint,
           model: model.model,
-          temperature: 0.7,
+          temperature: modelTemperature,
+          numCtx: modelNumCtx,
         }),
       };
 
       return acc;
     }, {});
-
+    
+    
     return chatModels;
   } catch (err) {
     logger.error(`Error loading Ollama models: ${err}`);
diff --git a/ui/components/SettingsDialog.tsx b/ui/components/SettingsDialog.tsx
index 716dd7d49..abe7c76f2 100644
--- a/ui/components/SettingsDialog.tsx
+++ b/ui/components/SettingsDialog.tsx
@@ -281,7 +281,7 @@ const SettingsDialog = ({
                                   ? chatModelProvider.map((model) => ({
                                       value: model.name,
                                       label: model.displayName,
-                                    }))
+                                    })).sort((a, b) => a.label.localeCompare(b.label))
                                   : [
                                       {
                                         value: '',
@@ -392,7 +392,7 @@ const SettingsDialog = ({
                                 ? embeddingModelProvider.map((model) => ({
                                     label: model.displayName,
                                     value: model.name,
-                                  }))
+                                  })).sort((a, b) => a.label.localeCompare(b.label))
                                 : [
                                     {
                                       label: 'No embedding models available',