From b4370ba840c0ae1b2ac2f1b4b2911b94a8100ee0 Mon Sep 17 00:00:00 2001 From: boxmoji Date: Wed, 30 Oct 2024 12:34:04 -0700 Subject: [PATCH] Moji: Update translations --- guides/api-calls/api-versioning-strategy.md | 4 + .../box-ai/ai-agents/ai-agent-versioning.md | 233 ++++++++++ .../ai-agents/get-agent-default-config.md | 10 +- guides/box-ai/ai-agents/index.md | 10 +- guides/box-ai/ai-agents/overrides-tutorial.md | 10 +- .../aws-claude-3-5-sonnet-model-card.md | 50 +++ .../aws-claude-3-haiku-model-card.md | 50 +++ .../aws-claude-3-sonnet-model-card.md | 48 +++ .../aws-titan-text-lite-model-card.md | 48 +++ ...ure-openai-gpt-4o-2024-05-13-model-card.md | 48 +++ .../azure-openai-gpt-4o-mini-model-card.md | 65 +++ ...azure-text-embedding-ada-002-model-card.md | 48 +++ .../google-gemini-1-5-flash-001-model-card.md | 48 +++ .../google-gemini-1-5-pro-001-model-card.md | 48 +++ .../google-text-bison-32-model-card.md | 48 +++ .../ai-models/google-text-bison-model-card.md | 48 +++ .../google-text-unicorn-model-card.md | 48 +++ ...ogle-textembedding-gecko-002-model-card.md | 48 +++ ...ogle-textembedding-gecko-003-model-card.md | 48 +++ .../google-textembedding-gecko-model-card.md | 48 +++ guides/box-ai/ai-models/index.md | 408 ++++++++++++++++++ guides/box-ai/ask-questions.md | 4 +- guides/box-ai/extract-metadata-structured.md | 6 +- guides/box-ai/extract-metadata.md | 4 +- guides/box-ai/generate-text.md | 4 +- guides/box-ai/index.md | 14 +- guides/box-ai/prerequisites.md | 4 +- guides/box-ai/supported-models.md | 85 ---- guides/embed/box-embed.md | 60 ++- .../ui-elements/images/box-ai-ui-element.jpg | Bin 0 -> 71915 bytes guides/embed/ui-elements/preview.md | 377 +++++++++------- pages/ai-dev-zone/index.md | 18 +- 32 files changed, 1704 insertions(+), 288 deletions(-) create mode 100644 guides/box-ai/ai-agents/ai-agent-versioning.md create mode 100644 guides/box-ai/ai-models/aws-claude-3-5-sonnet-model-card.md create mode 100644 guides/box-ai/ai-models/aws-claude-3-haiku-model-card.md create mode 100644 guides/box-ai/ai-models/aws-claude-3-sonnet-model-card.md create mode 100644 guides/box-ai/ai-models/aws-titan-text-lite-model-card.md create mode 100644 guides/box-ai/ai-models/azure-openai-gpt-4o-2024-05-13-model-card.md create mode 100644 guides/box-ai/ai-models/azure-openai-gpt-4o-mini-model-card.md create mode 100644 guides/box-ai/ai-models/azure-text-embedding-ada-002-model-card.md create mode 100644 guides/box-ai/ai-models/google-gemini-1-5-flash-001-model-card.md create mode 100644 guides/box-ai/ai-models/google-gemini-1-5-pro-001-model-card.md create mode 100644 guides/box-ai/ai-models/google-text-bison-32-model-card.md create mode 100644 guides/box-ai/ai-models/google-text-bison-model-card.md create mode 100644 guides/box-ai/ai-models/google-text-unicorn-model-card.md create mode 100644 guides/box-ai/ai-models/google-textembedding-gecko-002-model-card.md create mode 100644 guides/box-ai/ai-models/google-textembedding-gecko-003-model-card.md create mode 100644 guides/box-ai/ai-models/google-textembedding-gecko-model-card.md create mode 100644 guides/box-ai/ai-models/index.md delete mode 100644 guides/box-ai/supported-models.md create mode 100644 guides/embed/ui-elements/images/box-ai-ui-element.jpg diff --git a/guides/api-calls/api-versioning-strategy.md b/guides/api-calls/api-versioning-strategy.md index 09ba36e45..5870ee76c 100644 --- a/guides/api-calls/api-versioning-strategy.md +++ b/guides/api-calls/api-versioning-strategy.md @@ -220,6 +220,10 @@ Boxでは、[oasdiff](https://github.com/Tufin/oasdiff/blob/main/BREAKING-CHANGE +## AI agent configuration versioning + +[AI agent](g://box-ai/ai-agents) versioning gives the developers more control over model version management and ensures consistent responses. For details, see [AI agent configuration versioning guide](g://box-ai/ai-agents/ai-agent-versioning). + ## サポートポリシーと非推奨情報 Box APIとBox SDKの新しいバージョンがリリースされると、それより前のバージョンは廃止されます。Boxでは、バージョンを廃止する少なくとも24か月前に、そのバージョンを`deprecated`とマークします。つまり、非推奨バージョンの公式サポートが24か月経たずに終了することはありません。同様に、正式リリース (GA) されている個々のAPIについても、GAバージョンから削除する少なくとも24か月前にそのAPIを`deprecated`として宣言します。 diff --git a/guides/box-ai/ai-agents/ai-agent-versioning.md b/guides/box-ai/ai-agents/ai-agent-versioning.md new file mode 100644 index 000000000..2d94256d9 --- /dev/null +++ b/guides/box-ai/ai-agents/ai-agent-versioning.md @@ -0,0 +1,233 @@ +--- +rank: 4 +related_endpoints: + - get_ai_agent_default + - post_ai_text_gen + - post_ai_ask +related_guides: + - box-ai/prerequisites + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured +category_id: box-ai +subcategory_id: box-ai/ai-agents +is_index: false +id: box-ai/ai-agents/ai-agent-versioning +type: guide +total_steps: 3 +sibling_id: box-ai/ai-agents +parent_id: box-ai/ai-agents +next_page_id: '' +previous_page_id: box-ai/ai-agents/get-agent-default-config +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-agents/ai-agent-versioning.md +fullyTranslated: true +--- +# AI agent configuration versioning + + + +Box updates the default models across the endpoints on a regular basis in order to stay up to date with the most advanced options. If a default model is updated, it will be posted in the developer changelog. + + + +AI agent configuration versioning gives the developers more control over AI agent versioning and ensures consistent responses. + +AI agent configuration versioning adopts the following principles: + +* Each AI agent snapshot is supported for at least 12 months, unless there are factors outside of Box's control. For example, a Large Language Model (LLM) may get deprecated. +* An AI agent snapshot is available unless a new, stable agent version is released +* A 6-month window is provided to test and transition to the new snapshot. + +## Historical AI agent configuration + +The values in the [default agent configuration][default-config] used by the LLM gateway often change to achieve the best possible answer quality. + +To make sure your configurations are not affected in a negative way, you can use the historical AI agent configuration provided below to [override the default one][overrides]. + +``````json +{ + "ask": { + "type": "ai_agent_ask", + "longText": { + "model": "azure__openai__gpt_4o_mini", + "systemMessage": "", + "promptTemplate": "Reply as if it's {current_date}.\nI will ask you for help and provide subsections of one document delimited by five backticks (`````) at the beginning and at the end.\nIf I make a reference to \"this\", I am referring to the document I provided between the five backticks. I may ask you a question where the answer is contained within the document. In that case, do your best to answer using only the document, but if you cannot, feel free to mention that you couldn't find an answer in the document, but you have some answer from your general knowledge.\nI may ask you to perform some kind of computation or symbol manipulation such as filtering a list, counting something, summing, averaging, and other aggregation/grouping functions or some combination of them. In these cases, first list the plan of how you plan to perform such a computation, then follow that plan step by step, keeping track of intermediate results, and at the end tell me the final answer.\nI may ask you to enumerate or somehow list people, places, characters, or other important things from the document, if I do so, please only use the document provided to list them.\nTEXT FROM DOCUMENT STARTS\n`````\n{content}\n + +`````\nTEXT FROM DOCUMENT ENDS\nNever mention five backticks in your response. Unless you are told otherwise, a one paragraph response is sufficient for any requested summarization tasks.\nHere is how I need help from you: {user_question}", +"numTokensForCompletion": 6000, +"llmEndpointParams": { +"type": "openai_params", +"temperature": 0.0, +"topP": 1.0, +"frequencyPenalty": 0.0, +"presencePenalty": 1.5, +"stop": "<|im_end|> + +" +}, +"embeddings": { +"model": "azure__openai__text_embedding_ada_002", +"strategy": { +"id": "basic", +"numTokensPerChunk": 64 +} +} +}, +"basicText": { +"model": "azure__openai__gpt_4o_mini", +"systemMessage": "", +"promptTemplate": "Reply as if it's {current_date}.\nI will ask you for help and provide the entire text of one document delimited by five backticks (`````) at the beginning and at the end.\nIf I make a reference to \"this\", I am referring to the document I provided between the five backticks. I may ask you a question where the answer is contained within the document. In that case, do your best to answer using only the document, but if you cannot, feel free to mention that you couldn't find an answer in the document, but you have some answer from your general knowledge.\nI may ask you to perform some kind of computation or symbol manipulation such as filtering a list, counting something, summing, averaging, and other aggregation/grouping functions or some combination of them. In these cases, first list the plan of how you plan to perform such a computation, then follow that plan step by step, keeping track of intermediate results, and at the end tell me the final answer.\nI may ask you to enumerate or somehow list people, places, characters, or other important things from the document, if I do so, please only use the document provided to list them.\nTEXT FROM DOCUMENT STARTS\n + +`````\n{content}\n`````\nTEXT FROM DOCUMENT ENDS\nNever mention five backticks in your response. Unless you are told otherwise, a one paragraph response is sufficient for any requested summarization tasks.\nHere is how I need help from you: {user_question}", + "numTokensForCompletion": 6000, + "llmEndpointParams": { + "type": "openai_params", + "temperature": 0.0, + "topP": 1.0, + "frequencyPenalty": 0.0, + "presencePenalty": 1.5, + "stop": "<|im_end|>" + } + }, + "longTextMulti": { + "model": "azure__openai__gpt_4o_mini", + "systemMessage": "Role and Goal: You are an assistant designed to analyze and answer a question based on provided snippets from multiple documents, which can include business-oriented documents like docs, presentations, PDFs, etc. The assistant will respond concisely, using only the information from the provided documents.\n\nConstraints: The assistant should avoid engaging in chatty or extensive conversational interactions and focus on providing direct answers. It should also avoid making assumptions or inferences not supported by the provided document snippets.\n\nGuidelines: When answering, the assistant should consider the file's name and path to assess relevance to the question. In cases of conflicting information from multiple documents, it should list the different answers with citations. For summarization or comparison tasks, it should concisely answer with the key points. It should also consider the current date to be the date given.\n\nPersonalization: The assistant's tone should be formal and to-the-point, suitable for handling business-related documents and queries.\n", + "promptTemplate": "Current date: {current_date}\n\nTEXT FROM DOCUMENTS STARTS\n{content}\nTEXT FROM DOCUMENTS ENDS\n\nHere is how I need help from you: {user_question}\n.", + "numTokensForCompletion": 6000, + "llmEndpointParams": { + "type": "openai_params", + "temperature": 0.0, + "topP": 1.0, + "frequencyPenalty": 0.0, + "presencePenalty": 1.5, + "stop": "<|im_end|>" + }, + "embeddings": { + "model": "azure__openai__text_embedding_ada_002", + "strategy": { + "id": "basic", + "numTokensPerChunk": 64 + } + } + }, + "basicTextMulti": { + "model": "azure__openai__gpt_4o_mini", + "systemMessage": "", + "promptTemplate": "Current date: {current_date}\n\nTEXT FROM DOCUMENTS STARTS\n{content}\nTEXT FROM DOCUMENTS ENDS\n\nHere is how I need help from you: {user_question}\n.", + "numTokensForCompletion": 6000, + "llmEndpointParams": { + "type": "openai_params", + "temperature": 0.0, + "topP": 1.0, + "frequencyPenalty": 0.0, + "presencePenalty": 1.5, + "stop": "<|im_end|>" + } + }, +}, +"extract": { + "type": "ai_agent_extract", + "longText": { + "model": "google__gemini_1_5_flash_001", + "systemMessage": "Respond only in valid json. You are extracting metadata that is name, value pairs from a document. Only output the metadata in valid json form, as {\"name1\":\"value1\",\"name2\":\"value2\"} and nothing else. You will be given the document data and the schema for the metadata, that defines the name, description and type of each of the fields you will be extracting. Schema is of the form {\"fields\": [{\"key\": \"key_name\", \"displayName\": \"key display name\", \"type\": \"string\", \"description\": \"key description\"}]}. Leverage key description and key display name to identify where the key and value pairs are in the document. In certain cases, key description can also indicate the instructions to perform on the document to obtain the value. Prompt will be in the form of Schema is ``schema`` \n document is````document````", +"promptTemplate": "If you need to know today's date to respond, it is {current_date}. Schema is ``{user_question}`` \n document is````{content}````", + "numTokensForCompletion": 4096, + "llmEndpointParams": { + "type": "google_params", + "temperature": 0.0, + "topP": 1.0, + "frequencyPenalty": 0.0, + "presencePenalty": 0.0 + }, + "embeddings": { + "model": "azure__openai__text_embedding_ada_002", + "strategy": { + "id": "basic", + "numTokensPerChunk": 64 + } + } +}, +"basicText": { + "model": "google__gemini_1_5_flash_001", + "systemMessage": "Respond only in valid json. You are extracting metadata that is name, value pairs from a document. Only output the metadata in valid json form, as {\"name1\":\"value1\",\"name2\":\"value2\"} and nothing else. You will be given the document data and the schema for the metadata, that defines the name, description and type of each of the fields you will be extracting. Schema is of the form {\"fields\": [{\"key\": \"key_name\", \"displayName\": \"key display name\", \"type\": \"string\", \"description\": \"key description\"}]}. Leverage key description and key display name to identify where the key and value pairs are in the document. In certain cases, key description can also indicate the instructions to perform on the document to obtain the value. Prompt will be in the form of Schema is ``schema`` \n document is````document````", +"promptTemplate": "If you need to know today's date to respond, it is {current_date}. Schema is ``{user_question}`` \n document is````{content}````", + "numTokensForCompletion": 4096, + "llmEndpointParams": { + "type": "google_params", + "temperature": 0.0, + "topP": 1.0, + "frequencyPenalty": 0.0, + "presencePenalty": 0.0 + } + } +}, +"textGen": { + "type": "ai_agent_text_gen", + "basicGen": { + "model": "azure__openai__gpt_3_5_turbo_16k", + "systemMessage": "\nIf you need to know today's date to respond, it is {current_date}.\nThe user is working in a collaborative document creation editor called Box Notes.\nAssume that you are helping a business user create documents or to help the user revise existing text.\nYou can help the user in creating templates to be reused or update existing documents, you can respond with text that the user can use to place in the document that the user is editing.\nIf the user simply asks to \"improve\" the text, then simplify the language and remove jargon, unless the user specifies otherwise.\nDo not open with a preamble to the response, just respond.\n", + "promptTemplate": "{user_question}", + "numTokensForCompletion": 12000, + "llmEndpointParams": { + "type": "openai_params", + "temperature": 0.1, + "topP": 1.0, + "frequencyPenalty": 0.75, + "presencePenalty": 0.75, + "stop": "<|im_end|>" + }, + "embeddings": { + "model": "azure__openai__text_embedding_ada_002", + "strategy": { + "id": "basic", + "numTokensPerChunk": 64 + } + }, + "contentTemplate": "`````{content}`````" + } +}, +"extractStructured": { + "type": "ai_agent_extract_structured", + "longText": { + "model": "google__gemini_1_5_flash_001", + "systemMessage": "Respond only in valid json. You are extracting metadata that is name, value pairs from a document. Only output the metadata in valid json form, as {\"name1\":\"value1\",\"name2\":\"value2\"} and nothing else. You will be given the document data and the schema for the metadata, that defines the name, description and type of each of the fields you will be extracting. Schema is of the form {\"fields\": [{\"key\": \"key_name\", \"prompt\": \"prompt to extract the value\", \"type\": \"date\"}]}. Leverage prompt for each key to identify where the key and value pairs are in the document. In certain cases, prompt can also indicate the instructions to perform on the document to obtain the value. Prompt will be in the form of Schema is ``schema`` \n document is````document````", +"promptTemplate": "If you need to know today's date to respond, it is {current_date}. Schema is ``{user_question}`` \n document is````{content}````", + "numTokensForCompletion": 4096, + "llmEndpointParams": { + "type": "google_params", + "temperature": 0.0, + "topP": 1.0, + "frequencyPenalty": 0.0, + "presencePenalty": 0.0 + }, + "embeddings": { + "model": "google__textembedding_gecko_003", + "strategy": { + "id": "basic", + "numTokensPerChunk": 64 + } + } +}, +"basicText": { + "model": "google__gemini_1_5_flash_001", + "systemMessage": "Respond only in valid json. You are extracting metadata that is name, value pairs from a document. Only output the metadata in valid json form, as {\"name1\":\"value1\",\"name2\":\"value2\"} and nothing else. You will be given the document data and the schema for the metadata, that defines the name, description and type of each of the fields you will be extracting. Schema is of the form {\"fields\": [{\"key\": \"key_name\", \"prompt\": \"prompt to extract the value\", \"type\": \"date\"}]}. Leverage prompt for each key to identify where the key and value pairs are in the document. In certain cases, prompt can also indicate the instructions to perform on the document to obtain the value. Prompt will be in the form of Schema is ``schema`` \n document is````document````", +"promptTemplate": "If you need to know today's date to respond, it is {current_date}. Schema is ``{user_question}`` \n document is````{content}````", + "numTokensForCompletion": 4096, + "llmEndpointParams": { + "type": "google_params", + "temperature": 0.0, + "topP": 1.0, + "frequencyPenalty": 0.0, + "presencePenalty": 0.0 + } + } + } +} + +`````` + +[default-config]: g://box-ai/ai-agents/get-agent-default-config + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-agents/get-agent-default-config.md b/guides/box-ai/ai-agents/get-agent-default-config.md index 2a568041d..b4f716f8f 100644 --- a/guides/box-ai/ai-agents/get-agent-default-config.md +++ b/guides/box-ai/ai-agents/get-agent-default-config.md @@ -8,15 +8,17 @@ related_guides: - box-ai/prerequisites - box-ai/ask-questions - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured category_id: box-ai subcategory_id: box-ai/ai-agents is_index: false id: box-ai/ai-agents/get-agent-default-config type: guide -total_steps: 2 +total_steps: 3 sibling_id: box-ai/ai-agents parent_id: box-ai/ai-agents -next_page_id: box-ai/ai-agents/overrides-tutorial +next_page_id: box-ai/ai-agents/ai-agent-versioning previous_page_id: box-ai/ai-agents source_url: >- https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-agents/get-agent-default-config.md @@ -26,7 +28,7 @@ fullyTranslated: true -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 +Endpoints related to metadata extraction are currently a beta feature offered subject to Box’s Main Beta Agreement, and the available capabilities may change. Box AI API is available to all Enterprise Plus customers. @@ -270,7 +272,7 @@ Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベー [prereq]: g://box-ai/prerequisites -[models]: g://box-ai/supported-models +[models]: g://box-ai/ai-models [ai-agent-config]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-agents/index.md b/guides/box-ai/ai-agents/index.md index cf1b95f0a..fa4edafbb 100644 --- a/guides/box-ai/ai-agents/index.md +++ b/guides/box-ai/ai-agents/index.md @@ -14,7 +14,7 @@ subcategory_id: box-ai/ai-agents is_index: true id: box-ai/ai-agents type: guide -total_steps: 2 +total_steps: 3 sibling_id: box-ai parent_id: box-ai next_page_id: box-ai/ai-agents/get-agent-default-config @@ -25,6 +25,12 @@ fullyTranslated: true --- # AIモデルの上書き + + +Endpoints related to metadata extraction are currently a beta feature offered subject to Box’s Main Beta Agreement, and the available capabilities may change. Box AI API is available to all Enterprise Plus customers. + + + Boxでは、常に最先端のオプションに対応するため、定期的にデフォルトモデルをエンドポイント全体で更新しています。 Box AIに基づいた実装の場合、新しいデフォルトモデルによって、ダウンストリームプロセスを中断または変更するように結果が変更される可能性があります。特定のバージョンに切り替えることで、問題の発生を防止できる場合があります。 @@ -43,4 +49,4 @@ Box AIに基づいた実装の場合、新しいデフォルトモデルによ [overrides]: g://box-ai/ai-agents/overrides-tutorial -[models]: g://box-ai/supported-models +[models]: g://box-ai/ai-models diff --git a/guides/box-ai/ai-agents/overrides-tutorial.md b/guides/box-ai/ai-agents/overrides-tutorial.md index 15398c374..a668485a0 100644 --- a/guides/box-ai/ai-agents/overrides-tutorial.md +++ b/guides/box-ai/ai-agents/overrides-tutorial.md @@ -13,7 +13,7 @@ subcategory_id: box-ai/ai-agents is_index: false id: box-ai/ai-agents/overrides-tutorial type: guide -total_steps: 2 +total_steps: 3 sibling_id: box-ai/ai-agents parent_id: box-ai/ai-agents next_page_id: '' @@ -26,7 +26,7 @@ fullyTranslated: true -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 +Endpoints related to metadata extraction are currently a beta feature offered subject to Box’s Main Beta Agreement, and the available capabilities may change. Box AI API is available to all Enterprise Plus customers. @@ -140,11 +140,11 @@ Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベー ### LLMエンドポイントパラメータ -`llm_endpoint_params`構成のオプションは、全体的なAIモデルが[Google][google-params]ベースか[OpenAI][openai-params]ベースかによって異なります。 +The `llm_endpoint_params` configuration options differ depending on the overall AI model being [Google][google-params], [OpenAI][openai-params] or [AWS][aws-params] based. たとえば、どちらの`llm_endpoint_params`オブジェクトも`temperature`パラメータを受け入れますが、モデルによって結果が異なります。 -Googleモデルの場合、[`temperature`][google-temp]はレスポンス生成時のサンプリングに使用されます。レスポンス生成は`top-P`と`top-K`が適用された場合に発生します。temperatureは、トークン選択におけるランダム性の程度を制御します。 +For Google and AWS models, the [`temperature`][google-temp] is used for sampling during response generation, which occurs when `top-P` and `top-K` are applied. Temperature controls the degree of randomness in the token selection. OpenAIモデルの場合、[`temperature`][openai-temp]は、値が0~2の間のサンプリングtemperatureとなります。0.8のような高い値を指定すると、出力がよりランダムになるのに対し、0.2のような低い値を指定すると、出力はより焦点を絞った、決定的なものになります。独自の構成を導入する場合は、`temperature`と`top_p`の両方ではなく、いずれかを使用してください。 @@ -384,3 +384,5 @@ curl -i -L 'https://api.box.com/2.0/ai/extract' \ [google-temp]: https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters [openai-temp]: https://community.openai.com/t/temperature-top-p-and-top-k-for-chatbot-responses/295542 + +[aws-params]: r://ai-llm-endpoint-params-aws diff --git a/guides/box-ai/ai-models/aws-claude-3-5-sonnet-model-card.md b/guides/box-ai/ai-models/aws-claude-3-5-sonnet-model-card.md new file mode 100644 index 000000000..97ecdc64b --- /dev/null +++ b/guides/box-ai/ai-models/aws-claude-3-5-sonnet-model-card.md @@ -0,0 +1,50 @@ +--- +rank: 14 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/aws-claude-3-5-sonnet-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/aws-claude-3-haiku-model-card +previous_page_id: box-ai/ai-models/google-textembedding-gecko-003-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/aws-claude-3-5-sonnet-model-card.md +fullyTranslated: true +--- +# AWS Claude 3.5 Sonnet + +## 概要 + +**AWS Claude 3.5 Sonnet** model is designed to enhance language understanding and generation tasks. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **AWS Claude 3.5 Sonnet** | The name of the model. | +| API model name | `aws__claude_3_5_sonnet` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Amazon Web Services (AWS)** | The trusted organization that securely hosts LLM. | +| Model provider | **AWS Bedrock** | The organization that provides this model. | +| Release date | **June 20th, 2024** | The release date for the model. | +| Knowledge cutoff date | **April 2024** | The date after which the model does not get any information updates. | +| Input context window | **200k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **4k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **Not specified** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official AWS Claude 3.5 Sonnet documentation][aws-claude]. + +[aws-claude]: https://aws.amazon.com/bedrock/claude/ + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/aws-claude-3-haiku-model-card.md b/guides/box-ai/ai-models/aws-claude-3-haiku-model-card.md new file mode 100644 index 000000000..d578b774e --- /dev/null +++ b/guides/box-ai/ai-models/aws-claude-3-haiku-model-card.md @@ -0,0 +1,50 @@ +--- +rank: 15 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/aws-claude-3-haiku-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/aws-claude-3-sonnet-model-card +previous_page_id: box-ai/ai-models/aws-claude-3-5-sonnet-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/aws-claude-3-haiku-model-card.md +fullyTranslated: true +--- +# AWS Claude 3 Haiku + +## 概要 + +**AWS Claude 3 Haiku** model is tailored for various language tasks, including creative writing and conversational AI. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **AWS Claude 3 Haiku** | The name of the model. | +| API model name | `aws__claude_3_haiku` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Amazon Web Services (AWS)** | The trusted organization that securely hosts LLM. | +| Model provider | **Anthropic** | The organization that provides this model. | +| Release date | **March 13th, 2024** | The release date for the model. | +| Knowledge cutoff date | **August 2023** | The date after which the model does not get any information updates. | +| Input context window | **200k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **4k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **117** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official AWS Claude 3 Haiku documentation][aws-claude]. + +[aws-claude]: https://aws.amazon.com/bedrock/claude/ + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/aws-claude-3-sonnet-model-card.md b/guides/box-ai/ai-models/aws-claude-3-sonnet-model-card.md new file mode 100644 index 000000000..9a95a2e3f --- /dev/null +++ b/guides/box-ai/ai-models/aws-claude-3-sonnet-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 16 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/aws-claude-3-sonnet-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/aws-titan-text-lite-model-card +previous_page_id: box-ai/ai-models/aws-claude-3-haiku-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/aws-claude-3-sonnet-model-card.md +fullyTranslated: true +--- +# AWS Claude 3 Sonnet + +**AWS Claude 3 Sonnet** model is designed for advanced language tasks, focusing on comprehension and context handling. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **AWS Claude 3 Sonnet** | The name of the model. | +| API model name | `aws__claude_3_sonnet` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Amazon Web Services (AWS)** | The trusted organization that securely hosts LLM. | +| Model provider | **Anthropic** | The organization that provides this model. | +| Release date | **March 4th 2024** | The release date for the model. | +| Knowledge cutoff date | **August 2023** | The date after which the model does not get any information updates. | +| Input context window | **200k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **4k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **49.8** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official AWS Claude 3 Sonnet documentation][aws-claude]. + +[aws-claude]: https://aws.amazon.com/bedrock/claude/ + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/aws-titan-text-lite-model-card.md b/guides/box-ai/ai-models/aws-titan-text-lite-model-card.md new file mode 100644 index 000000000..4e4691e91 --- /dev/null +++ b/guides/box-ai/ai-models/aws-titan-text-lite-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 17 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/aws-titan-text-lite-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: '' +previous_page_id: box-ai/ai-models/aws-claude-3-sonnet-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/aws-titan-text-lite-model-card.md +fullyTranslated: true +--- +# AWS Titan Text Lite + +**AWS Titan Text Lite** model is designed for advanced language processing, capable of handling extensive contexts, making it suitable for complex tasks, although the model itself is lightweight. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **AWS Titan Text Lite** | The name of the model. | +| API model name | `aws__titan_text_lite` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Amazon Web Services (AWS)** | The trusted organization that securely hosts LLM. | +| Model provider | **Anthropic** | The organization that provides this model. | +| Release date | **September 2024** | The release date for the model. | +| Knowledge cutoff date | **Not provided** | The date after which the model does not get any information updates. | +| Input context window | **128k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **4k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **Not specified** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official AWS Titan documentation][aws-titan]. + +[aws-titan]: https://aws.amazon.com/bedrock/titan/ + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/azure-openai-gpt-4o-2024-05-13-model-card.md b/guides/box-ai/ai-models/azure-openai-gpt-4o-2024-05-13-model-card.md new file mode 100644 index 000000000..7265d7255 --- /dev/null +++ b/guides/box-ai/ai-models/azure-openai-gpt-4o-2024-05-13-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 3 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/azure-openai-gpt-4o-2024-05-13-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/azure-openai-gpt-4o-mini-model-card +previous_page_id: box-ai/ai-models +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/azure-openai-gpt-4o-2024-05-13-model-card.md +fullyTranslated: true +--- +# Azure OpenAI GPT-4o-2024-05-13 + +**Azure OpenAI GPT-4o-2024-05-13** is a multimodal model designed to handle lightweight tasks. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **GPT-4o 2024-05-13** | The name of the model. | +| API model name | `azure__openai__gpt_4o_2024-05-13` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Microsoft Azure** | The trusted organization that securely hosts LLM. | +| Model provider | **OpenAI** | The organization that provides this model. | +| Release date | **May 13th, 2024** | The release date for the model. | +| Knowledge cutoff date | **September 2023** | The date after which the model does not get any information updates. | +| Input context window | **128k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **2k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **87.5** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Azure OpenAI GPT-4o-2024-05-13 documentation][azure-ai-mini-4o-model]. + +[azure-ai-mini-4o-model]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=python-secure#gpt-4o-and-gpt-4-turbo + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/azure-openai-gpt-4o-mini-model-card.md b/guides/box-ai/ai-models/azure-openai-gpt-4o-mini-model-card.md new file mode 100644 index 000000000..98ef44a59 --- /dev/null +++ b/guides/box-ai/ai-models/azure-openai-gpt-4o-mini-model-card.md @@ -0,0 +1,65 @@ +--- +rank: 4 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/azure-openai-gpt-4o-mini-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/azure-text-embedding-ada-002-model-card +previous_page_id: box-ai/ai-models/azure-openai-gpt-4o-2024-05-13-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/azure-openai-gpt-4o-mini-model-card.md +fullyTranslated: true +--- +# Azure OpenAI GPT-4o Mini + +**Azure OpenAI GPT-4o Mini** is a multimodal model designed to handle lightweight tasks. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **GPT-4o Mini** | The name of the model. | +| API model name | `azure__openai__gpt_4o_mini` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Microsoft Azure** | The trusted organization that securely hosts LLM. | +| Model provider | **OpenAI** | The organization that provides this model. | +| Release date | **July 18th, 2024** | The release date for the model. | +| Knowledge cutoff date | **October 2023** | The date after which the model does not get any information updates. | +| Input context window | **128k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **16k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **85.4** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## 使用方法 + +Box Webapp uses this model to cover the following use cases: + +* Creating content +* Editing content +* Creating summaries +* Single doc Q&A on text + +Box AI API uses this model to cover the following use cases: + +* Creating content +* Editing content +* Creating summaries +* Single doc Q&A on text +* Extracting metadata + +## Additional documentation + +For additional information, see [official Azure OpenAI GPT-4o Mini documentation][azure-ai-mini-4o-model]. + +[azure-ai-mini-4o-model]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=python-secure#gpt-4o-and-gpt-4-turbo + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/azure-text-embedding-ada-002-model-card.md b/guides/box-ai/ai-models/azure-text-embedding-ada-002-model-card.md new file mode 100644 index 000000000..9b0456dd0 --- /dev/null +++ b/guides/box-ai/ai-models/azure-text-embedding-ada-002-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 5 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/azure-text-embedding-ada-002-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/google-gemini-1-5-flash-001-model-card +previous_page_id: box-ai/ai-models/azure-openai-gpt-4o-mini-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/azure-text-embedding-ada-002-model-card.md +fullyTranslated: true +--- +# Azure text-embedding-ada-002 + +**Azure text-embedding-ada-002** is a multimodal model designed to handle lightweight tasks. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **text-embedding-ada-002** | The name of the model. | +| API model name | `azure__openai__text_embedding_ada_002` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Microsoft Azure** | The trusted organization that securely hosts LLM. | +| Model provider | **OpenAI** | The organization that provides this model. | +| Release date | **December 2022** | The release date for the model. | +| Knowledge cutoff date | **September 2021** | The date after which the model does not get any information updates. | +| Input context window | **8k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **Not applicable** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **1000** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Azure Embeddings models documentation][azure-ai-embeddings]. + +[azure-ai-embeddings]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#embeddings + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/google-gemini-1-5-flash-001-model-card.md b/guides/box-ai/ai-models/google-gemini-1-5-flash-001-model-card.md new file mode 100644 index 000000000..63156990e --- /dev/null +++ b/guides/box-ai/ai-models/google-gemini-1-5-flash-001-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 6 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/google-gemini-1-5-flash-001-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/google-gemini-1-5-pro-001-model-card +previous_page_id: box-ai/ai-models/azure-text-embedding-ada-002-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/google-gemini-1-5-flash-001-model-card.md +fullyTranslated: true +--- +# Google Gemini 1.5 Flash + +**Google Gemini 1.5 Flash** is a multimodal model designed to handle lightweight tasks. It is designed for high-volume, low-latency tasks, making it highly efficient for large-scale use cases like summarization, multimodal processing, and categorization + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **Google Gemini 1.5 Flash** | The name of the model. | +| API model name | `google__gemini_1_5_flash_001` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Google** | The trusted organization that securely hosts LLM. | +| Model provider | **Google** | The organization that provides this model. | +| Release date | **May 14th 2024** | The release date for the model. | +| Knowledge cutoff date | **November 2023** | The date after which the model does not get any information updates. | +| Input context window | **1m tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **8k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **176** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Google Gemini 1.5 Flash documentation][vertex-ai-gemini-models]. + +[vertex-ai-gemini-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/google-gemini-1-5-pro-001-model-card.md b/guides/box-ai/ai-models/google-gemini-1-5-pro-001-model-card.md new file mode 100644 index 000000000..9138f9ec1 --- /dev/null +++ b/guides/box-ai/ai-models/google-gemini-1-5-pro-001-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 7 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/google-gemini-1-5-pro-001-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/google-text-bison-32-model-card +previous_page_id: box-ai/ai-models/google-gemini-1-5-flash-001-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/google-gemini-1-5-pro-001-model-card.md +fullyTranslated: true +--- +# Google Gemini 1.5 Pro 001 + +**Google Gemini 1.5 Pro 001** is a multimodal model designed to handle lightweight tasks. This model mid-size multimodal model capable of handling extensive inputs like long videos, hours of audio, large data sets, and complex reasoning tasks + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **Google Gemini 1.5 Pro 001** | The name of the model. | +| API model name | `google__gemini_1_5_pro_001` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Google** | The trusted organization that securely hosts LLM. | +| Model provider | **Google** | The organization that provides this model. | +| Release date | **February 15th 2024** | The release date for the model. | +| Knowledge cutoff date | **November 2023** | The date after which the model does not get any information updates. | +| Input context window | **1m tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **8k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **45.5** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Google Gemini 1.5 Pro documentation][vertex-ai-gemini-models]. + +[vertex-ai-gemini-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/google-text-bison-32-model-card.md b/guides/box-ai/ai-models/google-text-bison-32-model-card.md new file mode 100644 index 000000000..70b3691ef --- /dev/null +++ b/guides/box-ai/ai-models/google-text-bison-32-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 8 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/google-text-bison-32-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/google-text-bison-model-card +previous_page_id: box-ai/ai-models/google-gemini-1-5-pro-001-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/google-text-bison-32-model-card.md +fullyTranslated: true +--- +# Google Text Bison 32k + +**Google Text Bison 32k** is a multimodal model designed to handle lightweight tasks. This model supports a long context window, making it highly suitable for tasks involving large text input, such as summarization or code analysis. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **Google Text Bison 32k** | The name of the model. | +| API model name | `google__text_bison_32k` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Google** | The trusted organization that securely hosts LLM. | +| Model provider | **Google** | The organization that provides this model. | +| Release date | **September 2023** | The release date for the model. | +| Knowledge cutoff date | **2023** | The date after which the model does not get any information updates. | +| Input context window | **32k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **8k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **Not specified** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Google Text Bison documentation][vertex-text-models]. + +[vertex-text-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/google-text-bison-model-card.md b/guides/box-ai/ai-models/google-text-bison-model-card.md new file mode 100644 index 000000000..ae75b4a0d --- /dev/null +++ b/guides/box-ai/ai-models/google-text-bison-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 9 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/google-text-bison-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/google-text-unicorn-model-card +previous_page_id: box-ai/ai-models/google-text-bison-32-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/google-text-bison-model-card.md +fullyTranslated: true +--- +# Google Text Bison + +**Google Text Bison** is a multimodal model designed to handle lightweight tasks. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **Google Text Bison** | The name of the model. | +| API model name | `google__text_bison` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Google** | The trusted organization that securely hosts LLM. | +| Model provider | **Google** | The organization that provides this model. | +| Release date | **May 2023** | The release date for the model. | +| Knowledge cutoff date | **February 2023** | The date after which the model does not get any information updates. | +| Input context window | **8k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **1k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **Not specified** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Google Text Bison documentation][vertex-text-models]. + +[vertex-text-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/google-text-unicorn-model-card.md b/guides/box-ai/ai-models/google-text-unicorn-model-card.md new file mode 100644 index 000000000..a349fb214 --- /dev/null +++ b/guides/box-ai/ai-models/google-text-unicorn-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 10 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/google-text-unicorn-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/google-textembedding-gecko-model-card +previous_page_id: box-ai/ai-models/google-text-bison-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/google-text-unicorn-model-card.md +fullyTranslated: true +--- +# Google Text Unicorn + +**Google Text Unicorn** is a multimodal model designed to handle lightweight tasks. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **Google Text Unicorn** | The specific name of the model. | +| API model name | `google__text_unicorn` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Google** | The trusted organization that securely hosts LLM. | +| Model provider | **Google** | The organization that provides this model. | +| Release date | **June 2023** | The release date for the model. | +| Knowledge cutoff date | **middle of 2021** | The date after which the model does not get any information updates. | +| Input context window | **8k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **1k tokens** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **Not specified, similar models offer up to 100 tokens per second** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Google Text Unicorn documentation][vertex-text-models]. + +[vertex-text-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/google-textembedding-gecko-002-model-card.md b/guides/box-ai/ai-models/google-textembedding-gecko-002-model-card.md new file mode 100644 index 000000000..f754c4319 --- /dev/null +++ b/guides/box-ai/ai-models/google-textembedding-gecko-002-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 12 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/google-textembedding-gecko-002-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/google-textembedding-gecko-003-model-card +previous_page_id: box-ai/ai-models/google-textembedding-gecko-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/google-textembedding-gecko-002-model-card.md +fullyTranslated: true +--- +# Google textembedding-gecko@002 + +Text embedding models convert textual data into numerical vectors that can be processed by machine learning algorithms. The **Google textembedding-gecko@002** model features enhanced AI quality, optimized for embedding tasks, converting text into vector representations, useful in various AI applications. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **Google textembedding-gecko@002** | The name of the model. | +| API model name | `google__textembedding_gecko_002` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Google** | The trusted organization that securely hosts LLM. | +| Model provider | **Google** | The organization that provides this model. | +| Release date | **2023** | The release date for the model. | +| Knowledge cutoff date | **March 2024** | The date after which the model does not get any information updates. | +| Input context window | **8k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **Not applicable** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **Not specified** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Google textembedding models documentation][vertex-ai-model]. + +[vertex-ai-model]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#models + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/google-textembedding-gecko-003-model-card.md b/guides/box-ai/ai-models/google-textembedding-gecko-003-model-card.md new file mode 100644 index 000000000..e94a5c051 --- /dev/null +++ b/guides/box-ai/ai-models/google-textembedding-gecko-003-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 13 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/google-textembedding-gecko-003-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/aws-claude-3-5-sonnet-model-card +previous_page_id: box-ai/ai-models/google-textembedding-gecko-002-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/google-textembedding-gecko-003-model-card.md +fullyTranslated: true +--- +# Google textembedding-gecko@003 + +Text embedding models convert textual data into numerical vectors that can be processed by machine learning algorithms. The **Google textembedding-gecko@003** model features enhanced AI quality, and is designed for advanced embedding tasks, enhancing the performance of text-related applications. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **Google textembedding-gecko@003** | The name of the model. | +| API model name | `google__textembedding_gecko__003` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Google** | The trusted organization that securely hosts LLM. | +| Model provider | **Google** | The organization that provides this model. | +| Release date | **March 29, 2024** | The release date for the model. | +| Knowledge cutoff date | **March 2024** | The date after which the model does not get any information updates. | +| Input context window | | The number of tokens supported by the input context window. | +| Maximum output tokens | **Not applicable** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **Not specified** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Google textembedding models documentation][vertex-ai-model]. + +[vertex-ai-model]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#models + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/google-textembedding-gecko-model-card.md b/guides/box-ai/ai-models/google-textembedding-gecko-model-card.md new file mode 100644 index 000000000..222df4268 --- /dev/null +++ b/guides/box-ai/ai-models/google-textembedding-gecko-model-card.md @@ -0,0 +1,48 @@ +--- +rank: 11 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/extract-metadata + - box-ai/extract-metadata-structured + - box-ai/ai-agents/get-agent-default-config +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: false +id: box-ai/ai-models/google-textembedding-gecko-model-card +type: guide +total_steps: 15 +sibling_id: box-ai/ai-models +parent_id: box-ai/ai-models +next_page_id: box-ai/ai-models/google-textembedding-gecko-002-model-card +previous_page_id: box-ai/ai-models/google-text-unicorn-model-card +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/google-textembedding-gecko-model-card.md +fullyTranslated: true +--- +# Google textembedding-gecko + +Text embedding models convert textual data into numerical vectors that can be processed by machine learning algorithms. The **Google textembedding-gecko** model features enhanced AI quality, excels in text embeddings, offering strong retrieval performance while being compact. + +## Model details + +| 項目 | Value | 説明 | +| --------------------- | ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model name | **Google textembedding-gecko** | The name of the model. | +| API model name | `google__textembedding_gecko` | The name of the model that is used in the [Box AI API for model overrides][overrides]. The user must provide this exact name for the API to work. | +| Hosting layer | **Google** | The trusted organization that securely hosts LLM. | +| Model provider | **Google** | The organization that provides this model. | +| Release date | **August 2024** | The release date for the model. | +| Knowledge cutoff date | **Not applicable** | The date after which the model does not get any information updates. | +| Input context window | **8k tokens** | The number of tokens supported by the input context window. | +| Maximum output tokens | **Not applicable** | The number of tokens that can be generated by the model in a single request. | +| Empirical throughput | **Not specified** | The number of tokens the model can generate per second. | +| Open source | **いいえ** | Specifies if the model's code is available for public use. | + +## Additional documentation + +For additional information, see [official Google textembedding models documentation][vertex-ai-model]. + +[vertex-ai-model]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#models + +[overrides]: g://box-ai/ai-agents/overrides-tutorial diff --git a/guides/box-ai/ai-models/index.md b/guides/box-ai/ai-models/index.md new file mode 100644 index 000000000..9fdc51423 --- /dev/null +++ b/guides/box-ai/ai-models/index.md @@ -0,0 +1,408 @@ +--- +rank: 1 +related_guides: + - box-ai/ask-questions + - box-ai/generate-text + - box-ai/ai-agents/get-agent-default-config +alias_paths: + - guides/box-ai/supported-models +category_id: box-ai +subcategory_id: box-ai/ai-models +is_index: true +id: box-ai/ai-models +type: guide +total_steps: 15 +sibling_id: box-ai +parent_id: box-ai +next_page_id: box-ai/ai-models/azure-openai-gpt-4o-2024-05-13-model-card +previous_page_id: '' +source_url: >- + https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/ai-models/index.md +fullyTranslated: true +--- +# サポートされているAIモデル + + + +Endpoints related to metadata extraction are currently a beta feature offered subject to Box’s Main Beta Agreement, and the available capabilities may change. Box AI API is available to all Enterprise Plus customers. + + + +## Using models + +You can use the supported AI models: + +* [AIエージェントのデフォルト構成][agent]を取得する +* override the AI agent configuration used in [`POST 2.0/ai/ask`][ask], [`POST 2.0/ai/text_gen`][text-gen], [`POST 2.0/ai/extract`][extract], [`POST 2.0/ai/extract_structured`][extract-structured] endpoints. + +When using the `model` parameter your API calls, use the **API Name** visible on each tile and model card. + +For example, to get the AI agent configuration for a specific model, use the [model][ai-model] parameter and provide the `azure__openai__gpt_4o_mini` API name. Make sure you use **two underscores** after the provider name. + + + +The list may change depending on the model availability. Models offered in **Preview** mode have not been fully performance-tested at scale and are made available on an as-is basis. You may experience variability in model/output quality, availability, and accuracy. + + + + + + + +A multimodal model designed to handle lightweight tasks. + +
+ + + +Default for Box AI for Docs + + + + + +Available + + + +
+ +
+ + + +A multimodal model, highly efficient in handling complex, multi-step tasks. + +
+ + + +Default for Box AI for Box Hubs + + + + + +Available + + + +
+ +
+ + + +The fastest Gemini multimodal model, built for high volume tasks and latency-sensitive applications. + +
+ + + +Default for Box AI Extract + + + + + +プレビュー + + + +
+ +
+ + + +A foundation model that performs well at a variety of multimodal tasks. + +
+ + + +チャット + + + + + +プレビュー + + + +
+ +
+ + + +A most capable 2nd generation text embedding model. Skilled in text search, code search, and sentence similarity. + +
+ + + +埋め込み + + + + + +Available + + + +
+ +
+ + + +A text embedding model, converting textual data into numerical vectors that machine learning algorithms can process. + +
+ + + +埋め込み + + + + + +Available + + + +
+ +
+ + + +A text embedding model converting textual data into numerical vectors that machine learning algorithms can process. + +
+ + + +埋め込み + + + + + +Available + + + +
+ +
+ + + +A text embedding model converting textual data into numerical vectors that machine learning algorithms can process. + +
+ + + +埋め込み + + + + + +Available + + + +
+ +
+ + + +A model that can handle complex tasks, such as coding, due to the extensive embedded knowledge. + +
+ + + +チャット + + + + + +Available + + + +
+ +
+ + + +A model capable of creating document summaries, answers to questions, and content classification labels. + +
+ + + +チャット + + + + + +Available + + + +
+ +
+ + + +An enhanced **text-bison** model capable of creating document summaries, answers to questions, and content classification labels. + +
+ + + +チャット + + + + + +Available + + + +
+ +
+ + + +A model tailored for various language tasks, including creative writing and conversational AI. + +
+ + + +チャット + + + + + +プレビュー + + + +
+ +
+ + + +A model designed for advanced language tasks, focusing on comprehension and context handling. + +
+ + + +チャット + + + + + +プレビュー + + + +
+ +
+ + + +A model designed to enhance language understanding and generation tasks. + +
+ + + +チャット + + + + + +プレビュー + + + +
+ +
+ + + +A model capable of advanced language processing, handling extensive contexts, making it suitable for complex tasks. + +
+ + + +チャット + + + + + +プレビュー + + + +
+ +
+ +
+ +[ask]: e://post_ai_ask + +[text-gen]: e://post_ai_text_gen + +[extract]: e://post_ai_extract + +[extract-structured]: e://post_ai_extract_structured + +[agent]: e://get_ai_agent_default + +[azure-ai-gpt-3-5-model]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35 + +[azure-ai-mini-4o-model]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=python-secure#gpt-4o-and-gpt-4-turbo + +[vertex-ai-model]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#models + +[vertex-ai-gemini-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models + +[vertex-text-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text + +[azure-ai-embeddings]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#embeddings + +[ai-model]: e://get-ai-agent-default#param-model + +[aws-claude]: https://aws.amazon.com/bedrock/claude/ + +[aws-titan]: https://aws.amazon.com/bedrock/titan/ diff --git a/guides/box-ai/ask-questions.md b/guides/box-ai/ask-questions.md index 5427d887a..bcaedd773 100644 --- a/guides/box-ai/ask-questions.md +++ b/guides/box-ai/ask-questions.md @@ -12,7 +12,7 @@ subcategory_id: null is_index: false id: box-ai/ask-questions type: guide -total_steps: 6 +total_steps: 5 sibling_id: box-ai parent_id: box-ai next_page_id: box-ai/generate-text @@ -25,7 +25,7 @@ fullyTranslated: true -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 +Box AI API is available to all Enterprise Plus customers. diff --git a/guides/box-ai/extract-metadata-structured.md b/guides/box-ai/extract-metadata-structured.md index 8062ebf39..fb7fced09 100644 --- a/guides/box-ai/extract-metadata-structured.md +++ b/guides/box-ai/extract-metadata-structured.md @@ -12,10 +12,10 @@ subcategory_id: null is_index: false id: box-ai/extract-metadata-structured type: guide -total_steps: 6 +total_steps: 5 sibling_id: box-ai parent_id: box-ai -next_page_id: box-ai/supported-models +next_page_id: '' previous_page_id: box-ai/extract-metadata source_url: >- https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/extract-metadata-structured.md @@ -25,7 +25,7 @@ fullyTranslated: true -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 +Endpoints related to metadata extraction are currently a beta feature offered subject to Box’s Main Beta Agreement, and the available capabilities may change. Box AI API is available to all Enterprise Plus customers. diff --git a/guides/box-ai/extract-metadata.md b/guides/box-ai/extract-metadata.md index 5fc6f0051..9951b6002 100644 --- a/guides/box-ai/extract-metadata.md +++ b/guides/box-ai/extract-metadata.md @@ -10,7 +10,7 @@ subcategory_id: null is_index: false id: box-ai/extract-metadata type: guide -total_steps: 6 +total_steps: 5 sibling_id: box-ai parent_id: box-ai next_page_id: box-ai/extract-metadata-structured @@ -23,7 +23,7 @@ fullyTranslated: true -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 +Endpoints related to metadata extraction are currently a beta feature offered subject to Box’s Main Beta Agreement, and the available capabilities may change. Box AI API is available to all Enterprise Plus customers. diff --git a/guides/box-ai/generate-text.md b/guides/box-ai/generate-text.md index bcd82e6ee..1c2d45151 100644 --- a/guides/box-ai/generate-text.md +++ b/guides/box-ai/generate-text.md @@ -11,7 +11,7 @@ subcategory_id: null is_index: false id: box-ai/generate-text type: guide -total_steps: 6 +total_steps: 5 sibling_id: box-ai parent_id: box-ai next_page_id: box-ai/extract-metadata @@ -24,7 +24,7 @@ fullyTranslated: true -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 +Box AI API is available to all Enterprise Plus customers. diff --git a/guides/box-ai/index.md b/guides/box-ai/index.md index 125436937..24d181b3a 100644 --- a/guides/box-ai/index.md +++ b/guides/box-ai/index.md @@ -14,7 +14,7 @@ subcategory_id: null is_index: true id: box-ai type: guide -total_steps: 6 +total_steps: 5 sibling_id: guides parent_id: guides next_page_id: box-ai/prerequisites @@ -27,7 +27,7 @@ fullyTranslated: true -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 +Endpoints related to metadata extraction are currently a beta feature offered subject to Box’s Main Beta Agreement, and the available capabilities may change. Box AI API is available to all Enterprise Plus customers. @@ -68,7 +68,7 @@ Box AI APIを使用すると、テキストをゼロから生成したり、Box ### 構成の上書き -デフォルトのエージェント構成を上書きし、独自のカスタム設定を導入するには、[`POST /2.0/ai/ask`][ask]リクエストおよび[`POST /2.0/ai/text_gen`][text-gen]リクエストで利用可能な`ai_agent`パラメータを使用できます。 +You can use the `ai_agent` parameter available in the Box AI API requests to override the default agent configuration and introduce your own custom settings. 詳細については、[AIエージェントのデフォルト構成][agent-default]を参照してください。 @@ -92,8 +92,8 @@ Box AIは、英語、日本語、フランス語、スペイン語など、多 [\[ユーザーアクティビティ\] レポート][uar]には、ユーザーがBoxで実行している操作の概要が示されます。Box管理者は、このレポートを使用して、所定の期間内にユーザーが行った操作を確認できますが、これにはBox AIに対する操作も含まれます。レポートには、Box管理者がBox AIの詳細を取得するために選択できる以下の操作が用意されています。 -* `AI query`: ユーザーがBox AIに対してクエリを実行し、レスポンスを受け取りました。 -* `Failed AI query`: ユーザーがBox AIに対してクエリを実行しましたが、レスポンスがありませんでした。 +* **AI query**: The user queried Box AI and received a response. +* **Failed AI query**: The user queried Box AI but did not receive a response. [boxainotes]: https://support.box.com/hc/en-us/articles/22198577315347-Box-AI-for-Notes @@ -105,10 +105,6 @@ Box AIは、英語、日本語、フランス語、スペイン語など、多 [agent-default]: g://box-ai/ai-agents/get-agent-default-config -[ask]: e://post_ai_ask#param_ai_agent - -[text-gen]: e://post_ai_text_gen#param_ai_agent - [extract]: e://post_ai_extract [extract-structured]: e://post_ai_extract_structured diff --git a/guides/box-ai/prerequisites.md b/guides/box-ai/prerequisites.md index ad9631c61..a5a56899f 100644 --- a/guides/box-ai/prerequisites.md +++ b/guides/box-ai/prerequisites.md @@ -10,7 +10,7 @@ subcategory_id: null is_index: false id: box-ai/prerequisites type: guide -total_steps: 6 +total_steps: 5 sibling_id: box-ai parent_id: box-ai next_page_id: box-ai/ask-questions @@ -23,7 +23,7 @@ fullyTranslated: true -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 +Endpoints related to metadata extraction are currently a beta feature offered subject to Box’s Main Beta Agreement, and the available capabilities may change. Box AI API is available to all Enterprise Plus customers. diff --git a/guides/box-ai/supported-models.md b/guides/box-ai/supported-models.md deleted file mode 100644 index cb0659549..000000000 --- a/guides/box-ai/supported-models.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -rank: 12 -related_guides: - - box-ai/ask-questions - - box-ai/generate-text - - box-ai/ai-agents/get-agent-default-config -category_id: box-ai -subcategory_id: null -is_index: false -id: box-ai/supported-models -type: guide -total_steps: 6 -sibling_id: box-ai -parent_id: box-ai -next_page_id: '' -previous_page_id: box-ai/extract-metadata-structured -source_url: >- - https://github.com/box/developer.box.com/blob/main/content/guides/box-ai/supported-models.md -fullyTranslated: true ---- -# サポートされているAIモデル - - - -Box AI APIは、現在、BoxのMain Beta Agreementに従い提供されるベータ機能のため、利用可能な機能は変更される可能性があります。Box AI APIは、Enterprise Plusをご利用のすべてのお客様が利用できます。 - - - -次の操作に使用できる、サポートされているAIモデルのリストを表に示します。 - -* [AIエージェントのデフォルト構成][agent]を取得する -* [`POST 2.0/ai/ask`][ask]エンドポイントおよび[`POST 2.0/ai/text_gen`][text-gen]エンドポイントで使用されるAIエージェントの構成を上書きする - -APIコールで`model`パラメータを使用する際は、表に示されている**API名**を使用します。たとえば、特定のモデルのAIエージェントの構成を取得するには、[model][ai-model]パラメータを使用して、API名`openai__gpt_3_5_turbo_16k`を指定します。プロバイダ名の後に**2つのアンダースコア**を使用していることを確認してください。 - - - -このリストはモデルの提供状況により変更される可能性があります。**プレビュー**の場合、そのモデルを使用することはできるものの、一部の機能へのアクセスが制限される可能性があります。 - - - -| プロバイダ | ファミリ | 提供状況 | API名 | 外部のドキュメント | 機能 | -| --------------- | ------ | ----------- | --------------------------------------- | --------------------------------------------------------------- | ---- | -| Microsoft Azure | GPT | 利用可能 | `azure__openai__gpt_3_5_turbo_16k` | [Azure OpenAI GPT-3.5モデルに関するドキュメント][azure-ai-model-gpt35] | チャット | -| Microsoft Azure | GPT | 利用可能 | `azure__openai__gpt_4o_mini` | [Azure OpenAI GPT-3.5モデルに関するドキュメント][azure-ai-model-gpt40] | チャット | -| Microsoft Azure | GPT | 利用可能 | `azure__openai__text_embedding_ada_002` | [Azure OpenAIの埋め込みモデルに関するドキュメント][azure-ai-embeddings] | 埋め込み | -| GCP Vertex | Gecko | 利用可能 | `google__textembedding_gecko` | [Google Vertex AIの埋め込みモデルに関するドキュメント][vertex-ai-model] | 埋め込み | -| GCP Vertex | Gecko | 利用可能 | `google__textembedding_gecko_002` | [Google Vertex AIの埋め込みモデルに関するドキュメント][vertex-ai-model] | 埋め込み | -| GCP Vertex | Gecko | 利用可能 | `google__textembedding_gecko_003` | [Google Vertex AIの埋め込みモデルに関するドキュメント][vertex-ai-model] | 埋め込み | -| GCP Vertex | Gemini | プレビュー | `google__gemini_1_5_pro_001` | [Google Vertex AIのGeminiモデルに関するドキュメント][vertex-ai-gemini-models] | チャット | -| GCP Vertex | Gemini | プレビュー | `google__gemini_1_5_flash_001` | [Google Vertex AIのGeminiモデルに関するドキュメント][vertex-ai-gemini-models] | チャット | -| GCP Vertex | PaLM | 利用可能 | `google__text_unicorn` | [Googleのテキスト用PaLM 2モデルに関するドキュメント][vertex-text-models] | チャット | -| GCP Vertex | PaLM | 利用可能 | `google__text_bison` | [Googleのテキスト用PaLM 2モデルに関するドキュメント][vertex-text-models] | チャット | -| GCP Vertex | PaLM | 利用可能 | `google__text_bison_32k` | [Googleのテキスト用PaLM 2モデルに関するドキュメント][vertex-text-models] | チャット | -| OpenAI | GPT | ベータ版でのみ利用可能 | `openai__gpt_3_5_turbo_16k` | [OpenAI GPT-3.5モデルに関するドキュメント][openai-gpt-3-5-model] | チャット | -| OpenAI | GPT | ベータ版でのみ利用可能 | `openai__gpt_4_1106_preview` | [OpenAI GPT-4モデルに関するドキュメント][openai-gpt-4-models] | チャット | -| OpenAI | GPT | ベータ版でのみ利用可能 | `openai__gpt_4_turbo_preview` | [OpenAI GPT-4モデルに関するドキュメント][openai-gpt-4-models] | チャット | -| OpenAI | GPT | ベータ版でのみ利用可能 | `openai__gpt_4o_2024_05_13` | [OpenAI GPT-4モデルに関するドキュメント][openai-gpt-4-models] | チャット | -| OpenAI | GPT | ベータ版でのみ利用可能 | `openai__text_embedding_ada_002` | [Azure OpenAIの埋め込みモデルに関するドキュメント][openai-embeddings] | 埋め込み | - -[ask]: e://post_ai_ask - -[text-gen]: e://post_ai_text_gen - -[agent]: e://get_ai_agent_default - -[openai-gpt-3-5-model]: https://platform.openai.com/docs/models/gpt-3-5-turbo - -[azure-ai-model-gpt35]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35 - -[azure-ai-model-gpt40]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4o-and-gpt-4-turbo - -[vertex-ai-model]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#models - -[vertex-ai-gemini-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models - -[vertex-text-models]: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text - -[openai-gpt-4-models]: https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo - -[azure-ai-embeddings]: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#embeddings - -[openai-embeddings]: https://platform.openai.com/docs/models/embeddings - -[ai-model]: e://get-ai-agent-default#param-model diff --git a/guides/embed/box-embed.md b/guides/embed/box-embed.md index b81215cda..ba45884fb 100644 --- a/guides/embed/box-embed.md +++ b/guides/embed/box-embed.md @@ -22,20 +22,30 @@ fullyTranslated: true --- # Box Embed -Box EmbedはHTMLベースのフレームワークで、Boxウェブアプリの機能全体を埋め込み、場所を問わずに使えるようにします。Box Embedを使用すると、ファイルのアップロード、検索、コメント付け、共有、タグ付けに加え、Box Editを使用したファイルの編集も可能になります。 +Box Embed is a HTML-based framework that allows embedding the entire Box Web App experience in a custom-made application. Box Embed provides the ability to upload, search, comment, share, tag, and edit files using Box Edit. -## 構成 +## Before you start -ウィジェットを作成するには、共有用のフォルダを設定する必要があるほか、**ビューアー**以上の[権限][5]が必要です。 +To create a widget, you need to: -### ウェブから構成 +* Set an embeddable element, such as a **folder**, **file**, or **Hub** for sharing. +* Have at least **Viewer** [permissions][5]. -BoxウェブアプリからBox Embedのコードを取得するには: +## Using web app -* 任意のフォルダに移動します。 -* そのフォルダの横にある省略記号をクリックします。 -* \[**その他の操作**] に移動します。 -* \[**埋め込みウィジェット**] をクリックします。 +To fetch the Box Embed widget code from the Box web app, perform the following steps. + +### ファイルとフォルダ + +1. Navigate to the chosen file or folder. +2. Click on the ellipsis next to the folder. +3. Go to **More Actions** > **Embed Widget**. + +### Hubs + +1. Navigate to the chosen Hub. +2. Click on the ellipsis menu in the top right corner. +3. Click **Embed Widget**. @@ -43,7 +53,11 @@ BoxウェブアプリからBox Embedのコードを取得するには: -次の画面で、ウィジェットのサイズ、並べ替え、表示を設定できます。また、\[フォルダパスを非表示] や \[ナビゲーションとサイドバーを展開する] をデフォルトでオンにするよう選択することもできます。 +In the next step, configure the parameters of an embeddable element. + +| ファイル | フォルダ | Hubs | +| ------------------- | -------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | +| Size of the widget. | Size of the widget, sorting of the files in a folder, hiding the navigation path and sidebar | Size of the widget, hiding the parent navigation path and sidebar. | @@ -51,11 +65,11 @@ BoxウェブアプリからBox Embedのコードを取得するには: -埋め込みウィジェットのカスタマイズが完了したら、埋め込みコードをコピーしてサイトまたはウェブアプリケーションに貼り付ける必要があります。 +When you are done customizing the embed widget, copy and paste the embed code into your site or web application. ## プログラムを使用して構成 -Box Embedをさらにカスタマイズする場合は、プログラムを使用してカスタマイズできます。埋め込みのスニペットの形式は次のとおりです。 +If you want to customize Box Embed even further, you can do it programmatically. The format for an embed snippet is as follows: ```html