-
-
Notifications
You must be signed in to change notification settings - Fork 740
/
settings.example.js
110 lines (110 loc) · 6.24 KB
/
settings.example.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
export default {
// Options for the Keyv cache, see https://www.npmjs.com/package/keyv.
// This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default).
// Only necessary when using `ChatGPTClient`, or `BingAIClient` in jailbreak mode.
cacheOptions: {},
// If set, `ChatGPTClient` and `BingAIClient` will use `keyv-file` to store conversations to this JSON file instead of in memory.
// However, `cacheOptions.store` will override this if set
storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json',
chatGptClient: {
// Your OpenAI API key (for `ChatGPTClient`)
openaiApiKey: process.env.OPENAI_API_KEY || '',
// (Optional) Support for a reverse proxy for the completions endpoint (private API server).
// Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
// reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
// (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
modelOptions: {
// You can override the model name and any other parameters here.
// The default model is `gpt-3.5-turbo`.
model: 'gpt-3.5-turbo',
// Set max_tokens here to override the default max_tokens of 1000 for the completion.
// max_tokens: 1000,
},
// (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
// maxContextTokens: 4097,
// (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
// Earlier messages will be dropped until the prompt is within the limit.
// maxPromptTokens: 3097,
// (Optional) Set custom instructions instead of "You are ChatGPT...".
// (Optional) Set a custom name for the user
// userLabel: 'User',
// (Optional) Set a custom name for ChatGPT ("ChatGPT" by default)
// chatGptLabel: 'Bob',
// promptPrefix: 'You are Bob, a cowboy in Western times...',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
// Options for the Bing client
bingAiClient: {
// Necessary for some people in different countries, e.g. China (https://cn.bing.com)
host: '',
// The "_U" cookie value from bing.com
userToken: '',
// If the above doesn't work, provide all your cookies as a string instead
cookies: '',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set 'x-forwarded-for' for the request. You can use a fixed IPv4 address or specify a range using CIDR notation,
// and the program will randomly select an address within that range. The 'x-forwarded-for' is not used by default now.
// xForwardedFor: '13.104.0.0/14',
// (Optional) Set 'genImage' to true to enable bing to create images for you. It's disabled by default.
// features: {
// genImage: true,
// },
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
chatGptBrowserClient: {
// (Optional) Support for a reverse proxy for the conversation endpoint (private API server).
// Warning: This will expose your access token to a third party. Consider the risks before using this.
reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation',
// Access token from https://chat.openai.com/api/auth/session
accessToken: '',
// Cookies from chat.openai.com (likely not required if using reverse proxy server).
cookies: '',
// A proxy string like "http://<ip>:<port>"
proxy: '',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
},
// Options for the API server
apiOptions: {
port: process.env.API_PORT || 3000,
host: process.env.API_HOST || 'localhost',
// (Optional) Set to true to enable `console.debug()` logging
debug: false,
// (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt")
clientToUse: 'chatgpt',
// (Optional) Generate titles for each conversation for clients that support it (only ChatGPTClient for now).
// This will be returned as a `title` property in the first response of the conversation.
generateTitles: false,
// (Optional) Set this to allow changing the client or client options in POST /conversation.
// To disable, set to `null`.
perMessageClientOptionsWhitelist: {
// The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set.
// To allow switching clients per message, you must set `validClientsToUse` to a non-empty array.
validClientsToUse: ['bing', 'chatgpt', 'chatgpt-browser'], // values from possible `clientToUse` options above
// The Object key, e.g. "chatgpt", is a value from `validClientsToUse`.
// If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above,
// so all options for `bingAiClient` will be allowed to be changed.
// If set, ONLY the options listed here will be allowed to be changed.
// In this example, each array element is a string representing a property in `chatGptClient` above.
chatgpt: [
'promptPrefix',
'userLabel',
'chatGptLabel',
// Setting `modelOptions.temperature` here will allow changing ONLY the temperature.
// Other options like `modelOptions.model` will not be allowed to be changed.
// If you want to allow changing all `modelOptions`, define `modelOptions` here instead of `modelOptions.temperature`.
'modelOptions.temperature',
],
},
},
// Options for the CLI app
cliOptions: {
// (Optional) Possible options: "chatgpt", "bing".
// clientToUse: 'bing',
},
};