Skip to content

Commit

Permalink
🌿 Fern Regeneration -- March 12, 2024 (#141)
Browse files Browse the repository at this point in the history
* SDK regeneration

* Add tool use test

* Drop test

---------

Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Co-authored-by: billytrend-cohere <[email protected]>
  • Loading branch information
fern-api[bot] and billytrend-cohere authored Mar 12, 2024
1 parent 48fd526 commit fb2fe57
Show file tree
Hide file tree
Showing 70 changed files with 1,261 additions and 240 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,4 @@ jobs:
npm config set //registry.npmjs.org/:_authToken ${NPM_TOKEN}
npm publish --access public
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "cohere-ai",
"version": "7.7.7",
"version": "7.8.0",
"private": false,
"repository": "https://github.com/cohere-ai/cohere-typescript",
"main": "./index.js",
Expand Down
50 changes: 31 additions & 19 deletions src/Client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,12 @@ import * as errors from "./errors";
import { EmbedJobs } from "./api/resources/embedJobs/client/Client";
import { Datasets } from "./api/resources/datasets/client/Client";
import { Connectors } from "./api/resources/connectors/client/Client";
import { Models } from "./api/resources/models/client/Client";

export declare namespace CohereClient {
interface Options {
environment?: core.Supplier<environments.CohereEnvironment | string>;
token: core.Supplier<core.BearerToken>;
token?: core.Supplier<core.BearerToken | undefined>;
clientName?: core.Supplier<string | undefined>;
}

Expand All @@ -27,12 +28,11 @@ export declare namespace CohereClient {
}

export class CohereClient {
constructor(protected readonly _options: CohereClient.Options) {}
constructor(protected readonly _options: CohereClient.Options = {}) {}

/**
* The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter.
*
* The endpoint features additional parameters such as [connectors](https://docs.cohere.com/docs/connectors) and `documents` that enable conversations enriched by external knowledge. We call this ["Retrieval Augmented Generation"](https://docs.cohere.com/docs/retrieval-augmented-generation-rag), or "RAG". For a full breakdown of the Chat API endpoint, document and connector modes, and streaming (with code samples), see [this guide](https://docs.cohere.com/docs/cochat-beta).
* Generates a text response to a user message.
* To learn how to use Chat with Streaming and RAG follow [this guide](https://docs.cohere.com/docs/cochat-beta#various-ways-of-using-the-chat-endpoint).
*/
public async chatStream(
request: Cohere.ChatStreamRequest,
Expand All @@ -52,7 +52,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -109,9 +109,8 @@ export class CohereClient {
}

/**
* The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter.
*
* The endpoint features additional parameters such as [connectors](https://docs.cohere.com/docs/connectors) and `documents` that enable conversations enriched by external knowledge. We call this ["Retrieval Augmented Generation"](https://docs.cohere.com/docs/retrieval-augmented-generation-rag), or "RAG". For a full breakdown of the Chat API endpoint, document and connector modes, and streaming (with code samples), see [this guide](https://docs.cohere.com/docs/cochat-beta).
* Generates a text response to a user message.
* To learn how to use Chat with Streaming and RAG follow [this guide](https://docs.cohere.com/docs/cochat-beta#various-ways-of-using-the-chat-endpoint).
* @throws {@link Cohere.TooManyRequestsError}
*
* @example
Expand Down Expand Up @@ -150,7 +149,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -220,7 +219,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -311,7 +310,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -392,7 +391,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -471,7 +470,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -579,7 +578,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -656,7 +655,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -732,7 +731,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -809,7 +808,7 @@ export class CohereClient {
: undefined,
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.7.7",
"X-Fern-SDK-Version": "7.8.0",
"X-Fern-Runtime": core.RUNTIME.type,
"X-Fern-Runtime-Version": core.RUNTIME.version,
},
Expand Down Expand Up @@ -873,7 +872,20 @@ export class CohereClient {
return (this._connectors ??= new Connectors(this._options));
}

protected _models: Models | undefined;

public get models(): Models {
return (this._models ??= new Models(this._options));
}

protected async _getAuthorizationHeader() {
return `Bearer ${await core.Supplier.get(this._options.token)}`;
const bearer = (await core.Supplier.get(this._options.token)) ?? process.env["CO_API_KEY"];
if (bearer == null) {
throw new errors.CohereError({
message: "Please specify CO_API_KEY when instantiating the client.",
});
}

return `Bearer ${bearer}`;
}
}
84 changes: 66 additions & 18 deletions src/api/client/requests/ChatRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,34 +25,31 @@ import * as Cohere from "../..";
*/
export interface ChatRequest {
/**
* Accepts a string.
* The chat message from the user to the model.
* Text input for the model to respond to.
*
*/
message: string;
/**
* Defaults to `command`.
*
* The identifier of the model, which can be one of the existing Cohere models or the full ID for a [fine-tuned custom model](https://docs.cohere.com/docs/chat-fine-tuning).
*
* Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models).
* The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
*
*/
model?: string;
/**
* When specified, the default Cohere preamble will be replaced with the provided one.
* When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
*
*/
preambleOverride?: string;
preamble?: string;
/**
* A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`.
*
*/
chatHistory?: Cohere.ChatMessage[];
/**
* An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation.
* An alternative to `chat_history`.
*
* If a conversation with this id does not already exist, a new conversation will be created.
* Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string.
*
*/
conversationId?: string;
Expand All @@ -61,7 +58,9 @@ export interface ChatRequest {
*
* Dictates how the prompt will be constructed.
*
* With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit.
* With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
*
* With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
*
* With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
*
Expand All @@ -82,17 +81,26 @@ export interface ChatRequest {
*/
searchQueriesOnly?: boolean;
/**
* A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
* A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary.
*
*/
documents?: Cohere.ChatDocument[];
/**
* Defaults to `"accurate"`.
* Example:
* `[
* { "title": "Tall penguins", "text": "Emperor penguins are the tallest." },
* { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica." },
* ]`
*
* Keys and values from each document will be serialized to a string and passed to the model. The resulting generation will include citations that reference some of these documents.
*
* Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
* Some suggested keys are "text", "author", and "date". For better generation quality, it is recommended to keep the total word count of the strings in the dictionary to under 300 words.
*
* An `id` field (string) can be optionally supplied to identify the document in the citations. This field will not be passed to the model.
*
* An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model.
*
* See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
*
*/
citationQuality?: Cohere.ChatRequestCitationQuality;
documents?: Cohere.ChatDocument[];
/**
* Defaults to `0.3`.
*
Expand Down Expand Up @@ -120,10 +128,50 @@ export interface ChatRequest {
*/
p?: number;
/**
* Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
*
* Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
*
*/
frequencyPenalty?: number;
/** Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. */
/**
* Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
*
* Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
*
*/
presencePenalty?: number;
/** When enabled, the user's prompt will be sent to the model without any pre-processing. */
rawPrompting?: boolean;
/**
* A list of available tools (functions) that the model may suggest invoking before producing a text response.
*
* When `tools` is passed, The `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made
* the `tool_calls` array will be empty.
*
*/
tools?: Cohere.Tool[];
/**
* A list of results from invoking tools. Results are used to generate text and will be referenced in citations. When using `tool_results`, `tools` must be passed as well.
* Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries.
*
* ```
* tool_results = [
* {
* "call": {
* "name": <tool name>,
* "parameters": {
* <param name>: <param value>
* }
* },
* "outputs": [{
* <key>: <value>
* }]
* },
* ...
* ]
* ```
*
*/
toolResults?: Cohere.ChatRequestToolResultsItem[];
}
Loading

0 comments on commit fb2fe57

Please sign in to comment.