Skip to content

Auto-generated code for main #3029

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions elasticsearch/_async/client/esql.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,12 @@ async def async_query(
which has the name of all the columns.
:param filter: Specify a Query DSL query in the filter parameter to filter the
set of documents that an ES|QL query runs on.
:param format: A short version of the Accept header, for example `json` or `yaml`.
:param format: A short version of the Accept header, e.g. json, yaml. `csv`,
`tsv`, and `txt` formats will return results in a tabular format, excluding
other metadata fields from the response. For async requests, nothing will
be returned if the async query doesn't finish within the timeout. The query
ID and running status are available in the `X-Elasticsearch-Async-Id` and
`X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively.
:param include_ccs_metadata: When set to `true` and performing a cross-cluster
query, the response will include an extra `_clusters` object with information
about the clusters that participated in the search along with info such as
Expand Down Expand Up @@ -539,7 +544,9 @@ async def query(
`all_columns` which has the name of all columns.
:param filter: Specify a Query DSL query in the filter parameter to filter the
set of documents that an ES|QL query runs on.
:param format: A short version of the Accept header, e.g. json, yaml.
:param format: A short version of the Accept header, e.g. json, yaml. `csv`,
`tsv`, and `txt` formats will return results in a tabular format, excluding
other metadata fields from the response.
:param include_ccs_metadata: When set to `true` and performing a cross-cluster
query, the response will include an extra `_clusters` object with information
about the clusters that participated in the search along with info such as
Expand Down
107 changes: 107 additions & 0 deletions elasticsearch/_async/client/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ async def put(
<ul>
<li>AlibabaCloud AI Search (<code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
<li>Amazon Bedrock (<code>completion</code>, <code>text_embedding</code>)</li>
<li>Amazon SageMaker (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
<li>Anthropic (<code>completion</code>)</li>
<li>Azure AI Studio (<code>completion</code>, 'rerank', <code>text_embedding</code>)</li>
<li>Azure OpenAI (<code>completion</code>, <code>text_embedding</code>)</li>
Expand Down Expand Up @@ -659,6 +660,112 @@ async def put_amazonbedrock(
path_parts=__path_parts,
)

@_rewrite_parameters(
body_fields=(
"service",
"service_settings",
"chunking_settings",
"task_settings",
),
)
async def put_amazonsagemaker(
self,
*,
task_type: t.Union[
str,
t.Literal[
"chat_completion",
"completion",
"rerank",
"sparse_embedding",
"text_embedding",
],
],
amazonsagemaker_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["amazon_sagemaker"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
body: t.Optional[t.Dict[str, t.Any]] = None,
) -> ObjectApiResponse[t.Any]:
"""
.. raw:: html

<p>Create an Amazon SageMaker inference endpoint.</p>
<p>Create an inference endpoint to perform an inference task with the <code>amazon_sagemaker</code> service.</p>


`<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker>`_

:param task_type: The type of the inference task that the model will perform.
:param amazonsagemaker_inference_id: The unique identifier of the inference endpoint.
:param service: The type of service supported for the specified task type. In
this case, `amazon_sagemaker`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `amazon_sagemaker` service and `service_settings.api`
you specified.
:param chunking_settings: The chunking configuration object.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type and `service_settings.api` you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
if task_type in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'task_type'")
if amazonsagemaker_inference_id in SKIP_IN_PATH:
raise ValueError(
"Empty value passed for parameter 'amazonsagemaker_inference_id'"
)
if service is None and body is None:
raise ValueError("Empty value passed for parameter 'service'")
if service_settings is None and body is None:
raise ValueError("Empty value passed for parameter 'service_settings'")
__path_parts: t.Dict[str, str] = {
"task_type": _quote(task_type),
"amazonsagemaker_inference_id": _quote(amazonsagemaker_inference_id),
}
__path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonsagemaker_inference_id"]}'
__query: t.Dict[str, t.Any] = {}
__body: t.Dict[str, t.Any] = body if body is not None else {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if not __body:
if service is not None:
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
if chunking_settings is not None:
__body["chunking_settings"] = chunking_settings
if task_settings is not None:
__body["task_settings"] = task_settings
if not __body:
__body = None # type: ignore[assignment]
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return await self.perform_request( # type: ignore[return-value]
"PUT",
__path,
params=__query,
headers=__headers,
body=__body,
endpoint_id="inference.put_amazonsagemaker",
path_parts=__path_parts,
)

@_rewrite_parameters(
body_fields=(
"service",
Expand Down
2 changes: 1 addition & 1 deletion elasticsearch/_async/client/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ async def query(
keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
keep_on_completion: t.Optional[bool] = None,
page_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
params: t.Optional[t.Mapping[str, t.Any]] = None,
params: t.Optional[t.Sequence[t.Any]] = None,
pretty: t.Optional[bool] = None,
query: t.Optional[str] = None,
request_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
Expand Down
11 changes: 9 additions & 2 deletions elasticsearch/_sync/client/esql.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,12 @@ def async_query(
which has the name of all the columns.
:param filter: Specify a Query DSL query in the filter parameter to filter the
set of documents that an ES|QL query runs on.
:param format: A short version of the Accept header, for example `json` or `yaml`.
:param format: A short version of the Accept header, e.g. json, yaml. `csv`,
`tsv`, and `txt` formats will return results in a tabular format, excluding
other metadata fields from the response. For async requests, nothing will
be returned if the async query doesn't finish within the timeout. The query
ID and running status are available in the `X-Elasticsearch-Async-Id` and
`X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively.
:param include_ccs_metadata: When set to `true` and performing a cross-cluster
query, the response will include an extra `_clusters` object with information
about the clusters that participated in the search along with info such as
Expand Down Expand Up @@ -539,7 +544,9 @@ def query(
`all_columns` which has the name of all columns.
:param filter: Specify a Query DSL query in the filter parameter to filter the
set of documents that an ES|QL query runs on.
:param format: A short version of the Accept header, e.g. json, yaml.
:param format: A short version of the Accept header, e.g. json, yaml. `csv`,
`tsv`, and `txt` formats will return results in a tabular format, excluding
other metadata fields from the response.
:param include_ccs_metadata: When set to `true` and performing a cross-cluster
query, the response will include an extra `_clusters` object with information
about the clusters that participated in the search along with info such as
Expand Down
107 changes: 107 additions & 0 deletions elasticsearch/_sync/client/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ def put(
<ul>
<li>AlibabaCloud AI Search (<code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
<li>Amazon Bedrock (<code>completion</code>, <code>text_embedding</code>)</li>
<li>Amazon SageMaker (<code>chat_completion</code>, <code>completion</code>, <code>rerank</code>, <code>sparse_embedding</code>, <code>text_embedding</code>)</li>
<li>Anthropic (<code>completion</code>)</li>
<li>Azure AI Studio (<code>completion</code>, 'rerank', <code>text_embedding</code>)</li>
<li>Azure OpenAI (<code>completion</code>, <code>text_embedding</code>)</li>
Expand Down Expand Up @@ -659,6 +660,112 @@ def put_amazonbedrock(
path_parts=__path_parts,
)

@_rewrite_parameters(
body_fields=(
"service",
"service_settings",
"chunking_settings",
"task_settings",
),
)
def put_amazonsagemaker(
self,
*,
task_type: t.Union[
str,
t.Literal[
"chat_completion",
"completion",
"rerank",
"sparse_embedding",
"text_embedding",
],
],
amazonsagemaker_inference_id: str,
service: t.Optional[t.Union[str, t.Literal["amazon_sagemaker"]]] = None,
service_settings: t.Optional[t.Mapping[str, t.Any]] = None,
chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None,
error_trace: t.Optional[bool] = None,
filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None,
human: t.Optional[bool] = None,
pretty: t.Optional[bool] = None,
task_settings: t.Optional[t.Mapping[str, t.Any]] = None,
timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
body: t.Optional[t.Dict[str, t.Any]] = None,
) -> ObjectApiResponse[t.Any]:
"""
.. raw:: html

<p>Create an Amazon SageMaker inference endpoint.</p>
<p>Create an inference endpoint to perform an inference task with the <code>amazon_sagemaker</code> service.</p>


`<https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker>`_

:param task_type: The type of the inference task that the model will perform.
:param amazonsagemaker_inference_id: The unique identifier of the inference endpoint.
:param service: The type of service supported for the specified task type. In
this case, `amazon_sagemaker`.
:param service_settings: Settings used to install the inference model. These
settings are specific to the `amazon_sagemaker` service and `service_settings.api`
you specified.
:param chunking_settings: The chunking configuration object.
:param task_settings: Settings to configure the inference task. These settings
are specific to the task type and `service_settings.api` you specified.
:param timeout: Specifies the amount of time to wait for the inference endpoint
to be created.
"""
if task_type in SKIP_IN_PATH:
raise ValueError("Empty value passed for parameter 'task_type'")
if amazonsagemaker_inference_id in SKIP_IN_PATH:
raise ValueError(
"Empty value passed for parameter 'amazonsagemaker_inference_id'"
)
if service is None and body is None:
raise ValueError("Empty value passed for parameter 'service'")
if service_settings is None and body is None:
raise ValueError("Empty value passed for parameter 'service_settings'")
__path_parts: t.Dict[str, str] = {
"task_type": _quote(task_type),
"amazonsagemaker_inference_id": _quote(amazonsagemaker_inference_id),
}
__path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonsagemaker_inference_id"]}'
__query: t.Dict[str, t.Any] = {}
__body: t.Dict[str, t.Any] = body if body is not None else {}
if error_trace is not None:
__query["error_trace"] = error_trace
if filter_path is not None:
__query["filter_path"] = filter_path
if human is not None:
__query["human"] = human
if pretty is not None:
__query["pretty"] = pretty
if timeout is not None:
__query["timeout"] = timeout
if not __body:
if service is not None:
__body["service"] = service
if service_settings is not None:
__body["service_settings"] = service_settings
if chunking_settings is not None:
__body["chunking_settings"] = chunking_settings
if task_settings is not None:
__body["task_settings"] = task_settings
if not __body:
__body = None # type: ignore[assignment]
__headers = {"accept": "application/json"}
if __body is not None:
__headers["content-type"] = "application/json"
return self.perform_request( # type: ignore[return-value]
"PUT",
__path,
params=__query,
headers=__headers,
body=__body,
endpoint_id="inference.put_amazonsagemaker",
path_parts=__path_parts,
)

@_rewrite_parameters(
body_fields=(
"service",
Expand Down
2 changes: 1 addition & 1 deletion elasticsearch/_sync/client/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def query(
keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
keep_on_completion: t.Optional[bool] = None,
page_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
params: t.Optional[t.Mapping[str, t.Any]] = None,
params: t.Optional[t.Sequence[t.Any]] = None,
pretty: t.Optional[bool] = None,
query: t.Optional[str] = None,
request_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None,
Expand Down