Skip to content

Commit

Permalink
Feature/Add multi modal to chat ollama (#3499)
Browse files Browse the repository at this point in the history
* add multi modal to chat ollama

* update JSON mode description
  • Loading branch information
HenryHengZJ authored Nov 10, 2024
1 parent 1e2dc03 commit 51e5591
Show file tree
Hide file tree
Showing 6 changed files with 60 additions and 1,001 deletions.
36 changes: 32 additions & 4 deletions packages/components/nodes/chatmodels/ChatOllama/ChatOllama.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import { ChatOllama, ChatOllamaInput } from '@langchain/ollama'
import { ChatOllamaInput } from '@langchain/ollama'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { BaseCache } from '@langchain/core/caches'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ChatOllama } from './FlowiseChatOllama'

class ChatOllama_ChatModels implements INode {
label: string
Expand All @@ -19,7 +20,7 @@ class ChatOllama_ChatModels implements INode {
constructor() {
this.label = 'ChatOllama'
this.name = 'chatOllama'
this.version = 3.0
this.version = 4.0
this.type = 'ChatOllama'
this.icon = 'Ollama.svg'
this.category = 'Chat Models'
Expand Down Expand Up @@ -54,6 +55,23 @@ class ChatOllama_ChatModels implements INode {
default: 0.9,
optional: true
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
type: 'boolean',
description: 'Allow image uploads for multimodal models. e.g. llama3.2-vision',
default: false,
optional: true
},
{
label: 'JSON Mode',
name: 'jsonMode',
type: 'boolean',
description:
'Coerces model outputs to only return JSON. Specify in the system prompt to return JSON. Ex: Format all responses as JSON object',
optional: true,
additionalParams: true
},
{
label: 'Keep Alive',
name: 'keepAlive',
Expand Down Expand Up @@ -203,6 +221,8 @@ class ChatOllama_ChatModels implements INode {
const repeatLastN = nodeData.inputs?.repeatLastN as string
const repeatPenalty = nodeData.inputs?.repeatPenalty as string
const tfsZ = nodeData.inputs?.tfsZ as string
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const jsonMode = nodeData.inputs?.jsonMode as boolean

const cache = nodeData.inputs?.cache as BaseCache

Expand All @@ -225,8 +245,16 @@ class ChatOllama_ChatModels implements INode {
if (tfsZ) obj.tfsZ = parseFloat(tfsZ)
if (keepAlive) obj.keepAlive = keepAlive
if (cache) obj.cache = cache
if (jsonMode) obj.format = 'json'

const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: allowImageUploads ?? false
}
}

const model = new ChatOllama(obj)
const model = new ChatOllama(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
return model
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import { ChatOllama as LCChatOllama, ChatOllamaInput } from '@langchain/ollama'
import { IMultiModalOption, IVisionChatModal } from '../../../src'

export class ChatOllama extends LCChatOllama implements IVisionChatModal {
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string

constructor(id: string, fields?: ChatOllamaInput) {
super(fields)
this.id = id
this.configuredModel = fields?.model ?? ''
}

revertToOriginalModel(): void {
this.model = this.configuredModel
}

setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}

setVisionModel(): void {
// pass
}
}
Loading

0 comments on commit 51e5591

Please sign in to comment.