@@ -124,7 +124,7 @@ export async function generateOpenAICompletions(
124
124
markdown ?: string ,
125
125
previousWarning ?: string ,
126
126
isExtractEndpoint ?: boolean ,
127
- model : TiktokenModel = ( process . env . MODEL_NAME as TiktokenModel ) ??
127
+ model : TiktokenModel = ( process . env . MODEL_NAME as TiktokenModel ) ||
128
128
"gpt-4o-mini" ,
129
129
) : Promise < {
130
130
extract : any ;
@@ -151,15 +151,21 @@ export async function generateOpenAICompletions(
151
151
152
152
// count number of tokens
153
153
let numTokens = 0 ;
154
- const encoder = encoding_for_model ( model as TiktokenModel ) ;
155
154
try {
156
155
// Encode the message into tokens
157
- const tokens = encoder . encode ( markdown ) ;
158
-
159
- // Return the number of tokens
160
- numTokens = tokens . length ;
156
+ const encoder = encoding_for_model ( model as TiktokenModel ) ;
157
+
158
+ try {
159
+ const tokens = encoder . encode ( markdown ) ;
160
+ numTokens = tokens . length ;
161
+ } catch ( e ) {
162
+ throw e ;
163
+ } finally {
164
+ // Free the encoder resources after use
165
+ encoder . free ( ) ;
166
+ }
161
167
} catch ( error ) {
162
- logger . warn ( "Calculating num tokens of string failed" , { error, markdown } ) ;
168
+ logger . warn ( "Calculating num tokens of string failed" , { error } ) ;
163
169
164
170
markdown = markdown . slice ( 0 , maxTokensSafe * modifier ) ;
165
171
@@ -168,9 +174,6 @@ export async function generateOpenAICompletions(
168
174
maxTokensSafe +
169
175
") we support." ;
170
176
warning = previousWarning === undefined ? w : w + " " + previousWarning ;
171
- } finally {
172
- // Free the encoder resources after use
173
- encoder . free ( ) ;
174
177
}
175
178
176
179
if ( numTokens > maxTokensSafe ) {
@@ -247,7 +250,7 @@ export async function generateOpenAICompletions(
247
250
: { type : "json_object" } ,
248
251
} ) ;
249
252
250
- if ( jsonCompletion . choices [ 0 ] . message . refusal !== null ) {
253
+ if ( jsonCompletion . choices [ 0 ] . message . refusal !== null && jsonCompletion . choices [ 0 ] . message . refusal !== undefined ) {
251
254
throw new LLMRefusalError ( jsonCompletion . choices [ 0 ] . message . refusal ) ;
252
255
}
253
256
@@ -351,7 +354,7 @@ export async function generateSchemaFromPrompt(prompt: string): Promise<any> {
351
354
for ( const temp of temperatures ) {
352
355
try {
353
356
const result = await openai . beta . chat . completions . parse ( {
354
- model : "gpt-4o" ,
357
+ model : process . env . MODEL_NAME || "gpt-4o" ,
355
358
temperature : temp ,
356
359
messages : [
357
360
{
@@ -392,7 +395,7 @@ Return a valid JSON schema object with properties that would capture the informa
392
395
} ,
393
396
} ) ;
394
397
395
- if ( result . choices [ 0 ] . message . refusal !== null ) {
398
+ if ( result . choices [ 0 ] . message . refusal !== null && result . choices [ 0 ] . message . refusal !== undefined ) {
396
399
throw new Error ( "LLM refused to generate schema" ) ;
397
400
}
398
401
0 commit comments