Skip to content

Commit

Permalink
changes according to comments and feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
Intex32 committed Sep 20, 2023
1 parent 0a27174 commit 4fcbc6e
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 36 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ sealed interface LLM : AutoCloseable {
): Int { // TODO: naive implementation with magic numbers
fun Encoding.countTokensFromMessages(tokensPerMessage: Int, tokensPerName: Int): Int =
messages.sumOf { message ->
message.role.name.length
countTokens(message.role.name) +
countTokens(message.content) +
tokensPerMessage +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,40 +117,6 @@ class ConversationSpec :
messagesSizePlusMessageResponse shouldBe memories.size
}

// considered obsolete since Chat and ChatWithFunctions was separated
// "functionCall should be null when the model doesn't support functions" {
// val messages = generateRandomMessages(1, 40, 60)
// val conversationId = ConversationId(UUID.generateUUID().toString())
// val scope = Conversation(LocalVectorStore(TestEmbeddings()), conversationId =
// conversationId)
//
// val model = TestModel(modelType = ModelType.ADA, name = "fake-model")
//
// model.promptMessage(prompt = Prompt(messages.keys.first()), scope = scope)
//
// val lastRequest = model.requests.last()
//
// lastRequest.functionCall shouldBe null
// }

// considered obsolete since Chat and ChatWithFunctions was separated
// "functionCall should be null when the model support functions and the prompt doesn't
// contain a function" {
// val messages = generateRandomMessages(1, 40, 60)
// val conversationId = ConversationId(UUID.generateUUID().toString())
// val scope = Conversation(LocalVectorStore(TestEmbeddings()), conversationId =
// conversationId)
//
// val model =
// TestFunctionsModel(modelType = ModelType.GPT_3_5_TURBO_FUNCTIONS, name = "fake-model")
//
// model.createChatCompletion(prompt = Prompt(messages.keys.first()), scope = scope)
//
// val lastRequest = model.requests.last()
//
// lastRequest.functionCall shouldBe null
// }

"functionCall shouldn't be null when the model support functions and the prompt contain a function" {
val question = "fake-question"
val answer = Answer("fake-answer")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class OpenAI(internal var token: String? = null, internal var host: String? = nu
val GPT_4 by lazy { autoClose(OpenAIChat(ModelType.GPT_4, defaultClient)) }

val GPT_4_0314 by lazy {
autoClose(OpenAIFunChat(ModelType.GPT_4, defaultClient)) // legacy
autoClose(OpenAIFunChat(ModelType.GPT_4_0314, defaultClient)) // legacy
}

val GPT_4_32K by lazy { autoClose(OpenAIChat(ModelType.GPT_4_32K, defaultClient)) }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ sealed class ModelType(
data class LocalModel(override val name: String, override val encodingType: EncodingType, override val maxContextLength: Int) : ModelType(name, encodingType, maxContextLength)
// chat
object GPT_4 : ModelType("gpt-4", CL100K_BASE, 8192, tokensPerMessage = 3, tokensPerName = 2, tokenPadding = 5)

object GPT_4_0314 : ModelType("gpt-4-0314", CL100K_BASE, 8192, tokensPerMessage = 3, tokensPerName = 2, tokenPadding = 5)
object GPT_4_32K : ModelType("gpt-4-32k", CL100K_BASE, 32768, tokensPerMessage = 3, tokensPerName = 2, tokenPadding = 5)
object GPT_3_5_TURBO : ModelType("gpt-3.5-turbo", CL100K_BASE, 4097, tokensPerMessage = 4, tokensPerName = 0, tokenPadding = 5)
object GPT_3_5_TURBO_16_K : ModelType("gpt-3.5-turbo-16k", CL100K_BASE, 4097 * 4)
Expand Down

0 comments on commit 4fcbc6e

Please sign in to comment.