Skip to content

Commit

Permalink
Merge pull request #57 from animalnots/dev
Browse files Browse the repository at this point in the history
1.8.3
  • Loading branch information
animalnots authored Sep 8, 2024
2 parents 8c4f606 + 547acfe commit c9b6490
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 32 deletions.
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "better-chatgpt",
"private": true,
"version": "1.8.2",
"version": "1.8.3",
"type": "module",
"homepage": "./",
"main": "electron/index.cjs",
Expand Down
5 changes: 4 additions & 1 deletion src/components/Chat/ChatContent/Message/View/ContentView.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import MarkdownModeButton from './Button/MarkdownModeButton';

import CodeBlock from '../CodeBlock';
import PopupModal from '@components/PopupModal';
import { preprocessLaTeX } from '@utils/chat';

const ContentView = memo(
({
Expand Down Expand Up @@ -144,7 +145,9 @@ const ContentView = memo(
p,
}}
>
{(content[0] as TextContentInterface).text}
{inlineLatex
? preprocessLaTeX((content[0] as TextContentInterface).text)
: (content[0] as TextContentInterface).text}
</ReactMarkdown>
) : (
<span className='whitespace-pre-wrap'>
Expand Down
15 changes: 15 additions & 0 deletions src/utils/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,18 @@ export const downloadMarkdown = (markdown: string, fileName: string) => {
link.click();
link.remove();
};

export const preprocessLaTeX = (content: string) => {
// Replace block-level LaTeX delimiters \[ \] with $$ $$

const blockProcessedContent = content.replace(
/\\\[(.*?)\\\]/gs,
(_, equation) => `$$${equation}$$`
);
// Replace inline LaTeX delimiters \( \) with $ $
const inlineProcessedContent = blockProcessedContent.replace(
/\\\((.*?)\\\)/gs,
(_, equation) => `$${equation}$`
);
return inlineProcessedContent;
};
74 changes: 44 additions & 30 deletions src/utils/messageUtils.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@

import useStore from '@store/store';

import { Tiktoken } from '@dqbd/tiktoken/lite';
import { isImageContent, isTextContent, MessageInterface, TextContentInterface, TotalTokenUsed } from '@type/chat';
import {
isImageContent,
isTextContent,
MessageInterface,
TextContentInterface,
TotalTokenUsed,
} from '@type/chat';
import { ModelOptions } from './modelReader';
const cl100k_base = await import('@dqbd/tiktoken/encoders/cl100k_base.json');

Expand Down Expand Up @@ -30,7 +35,9 @@ export const getChatGPTEncoding = (
const serialized = [
messages
.map(({ role, content }) => {
return `<|im_start|>${role}${roleSep}${(content[0] as TextContentInterface).text}<|im_end|>`;
return `<|im_start|>${role}${roleSep}${
(content[0] as TextContentInterface).text
}<|im_end|>`;
})
.join(msgSep),
`<|im_start|>assistant${roleSep}`,
Expand All @@ -52,38 +59,38 @@ export const limitMessageTokens = (
const limitedMessages: MessageInterface[] = [];
let tokenCount = 0;

const isSystemFirstMessage = messages[0]?.role === 'system';
let retainSystemMessage = false;
// const isSystemFirstMessage = messages[0]?.role === 'system';
// let retainSystemMessage = false;

// Check if the first message is a system message and if it fits within the token limit
if (isSystemFirstMessage) {
const systemTokenCount = countTokens([messages[0]], model);
if (systemTokenCount < limit) {
tokenCount += systemTokenCount;
retainSystemMessage = true;
}
}
// // Check if the first message is a system message and if it fits within the token limit
// if (isSystemFirstMessage) {
// const systemTokenCount = countTokens([messages[0]], model);
// if (systemTokenCount < limit) {
// tokenCount += systemTokenCount;
// retainSystemMessage = true;
// }
// }

// Iterate through messages in reverse order, adding them to the limitedMessages array
// until the token limit is reached (excludes first message)
for (let i = messages.length - 1; i >= 1; i--) {
for (let i = messages.length - 1; i >= 0; i--) {
const count = countTokens([messages[i]], model);
if (count + tokenCount > limit) break;
tokenCount += count;
limitedMessages.unshift({ ...messages[i] });
}

// Process first message
if (retainSystemMessage) {
// Insert the system message in the third position from the end
limitedMessages.splice(-3, 0, { ...messages[0] });
} else if (!isSystemFirstMessage && messages.length > 0) {
// Check if the first message (non-system) can fit within the limit
const firstMessageTokenCount = countTokens([messages[0]], model);
if (firstMessageTokenCount + tokenCount < limit) {
limitedMessages.unshift({ ...messages[0] });
}
}
// // Process first message
// if (retainSystemMessage) {
// // Insert the system message in the third position from the end
// limitedMessages.splice(-3, 0, { ...messages[0] });
// } else if (!isSystemFirstMessage && messages.length > 0) {
// // Check if the first message (non-system) can fit within the limit
// const firstMessageTokenCount = countTokens([messages[0]], model);
// if (firstMessageTokenCount + tokenCount < limit) {
// limitedMessages.unshift({ ...messages[0] });
// }
// }

return limitedMessages;
};
Expand All @@ -99,23 +106,30 @@ export const updateTotalTokenUsed = (
);

// Filter text and image prompts
const textPrompts = promptMessages.filter(e => e.content.some(isTextContent));
const imgPrompts = promptMessages.filter(e => e.content.some(isImageContent));
const textPrompts = promptMessages.filter((e) =>
e.content.some(isTextContent)
);
const imgPrompts = promptMessages.filter((e) =>
e.content.some(isImageContent)
);

// Count tokens
const newPromptTokens = countTokens(textPrompts, model);
const newImageTokens = countTokens(imgPrompts, model);
const newCompletionTokens = countTokens([completionMessage], model);

// Destructure existing token counts or default to 0
const { promptTokens = 0, completionTokens = 0, imageTokens = 0 } =
updatedTotalTokenUsed[model] ?? {};
const {
promptTokens = 0,
completionTokens = 0,
imageTokens = 0,
} = updatedTotalTokenUsed[model] ?? {};

// Update token counts
updatedTotalTokenUsed[model] = {
promptTokens: promptTokens + newPromptTokens,
completionTokens: completionTokens + newCompletionTokens,
imageTokens: imageTokens + newImageTokens
imageTokens: imageTokens + newImageTokens,
};

// Set the updated token counts in the store
Expand Down

0 comments on commit c9b6490

Please sign in to comment.