diff --git a/VERSION b/VERSION index 660f3a232aa..94dc9891dd4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.11.442 \ No newline at end of file +1.11.443 \ No newline at end of file diff --git a/generated/src/aws-cpp-sdk-batch/include/aws/batch/model/LaunchTemplateSpecification.h b/generated/src/aws-cpp-sdk-batch/include/aws/batch/model/LaunchTemplateSpecification.h index 65959c119b7..36a6ac0784f 100644 --- a/generated/src/aws-cpp-sdk-batch/include/aws/batch/model/LaunchTemplateSpecification.h +++ b/generated/src/aws-cpp-sdk-batch/include/aws/batch/model/LaunchTemplateSpecification.h @@ -6,6 +6,8 @@ #pragma once #include #include +#include +#include #include namespace Aws @@ -74,15 +76,15 @@ namespace Model ///@{ /** - *

The version number of the launch template, $Latest, or - * $Default.

If the value is $Latest, the latest - * version of the launch template is used. If the value is $Default, - * the default version of the launch template is used.

If the + *

The version number of the launch template, $Default, or + * $Latest.

If the value is $Default, the default + * version of the launch template is used. If the value is $Latest, + * the latest version of the launch template is used.

If the * AMI ID that's used in a compute environment is from the launch template, the AMI * isn't changed when the compute environment is updated. It's only changed if the * updateToLatestImageVersion parameter for the compute environment is * set to true. During an infrastructure update, if either - * $Latest or $Default is specified, Batch re-evaluates + * $Default or $Latest is specified, Batch re-evaluates * the launch template version, and it might use a different version of the launch * template. This is the case even if the launch template isn't specified in the * update. When updating a compute environment, changing the launch template @@ -90,7 +92,7 @@ namespace Model * information, see Updating * compute environments in the Batch User Guide.

- *

Default: $Default.

+ *

Default: $Default

Latest: $Latest

*/ inline const Aws::String& GetVersion() const{ return m_version; } inline bool VersionHasBeenSet() const { return m_versionHasBeenSet; } @@ -101,6 +103,28 @@ namespace Model inline LaunchTemplateSpecification& WithVersion(Aws::String&& value) { SetVersion(std::move(value)); return *this;} inline LaunchTemplateSpecification& WithVersion(const char* value) { SetVersion(value); return *this;} ///@} + + ///@{ + /** + *

A launch template to use in place of the default launch template. You must + * specify either the launch template ID or launch template name in the request, + * but not both.

You can specify up to ten (10) launch template overrides + * that are associated to unique instance types or families for each compute + * environment.

To unset all override templates for a compute + * environment, you can pass an empty array to the UpdateComputeEnvironment.overrides + * parameter, or not include the overrides parameter when submitting + * the UpdateComputeEnvironment API operation.

+ */ + inline const Aws::Vector& GetOverrides() const{ return m_overrides; } + inline bool OverridesHasBeenSet() const { return m_overridesHasBeenSet; } + inline void SetOverrides(const Aws::Vector& value) { m_overridesHasBeenSet = true; m_overrides = value; } + inline void SetOverrides(Aws::Vector&& value) { m_overridesHasBeenSet = true; m_overrides = std::move(value); } + inline LaunchTemplateSpecification& WithOverrides(const Aws::Vector& value) { SetOverrides(value); return *this;} + inline LaunchTemplateSpecification& WithOverrides(Aws::Vector&& value) { SetOverrides(std::move(value)); return *this;} + inline LaunchTemplateSpecification& AddOverrides(const LaunchTemplateSpecificationOverride& value) { m_overridesHasBeenSet = true; m_overrides.push_back(value); return *this; } + inline LaunchTemplateSpecification& AddOverrides(LaunchTemplateSpecificationOverride&& value) { m_overridesHasBeenSet = true; m_overrides.push_back(std::move(value)); return *this; } + ///@} private: Aws::String m_launchTemplateId; @@ -111,6 +135,9 @@ namespace Model Aws::String m_version; bool m_versionHasBeenSet = false; + + Aws::Vector m_overrides; + bool m_overridesHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-batch/include/aws/batch/model/LaunchTemplateSpecificationOverride.h b/generated/src/aws-cpp-sdk-batch/include/aws/batch/model/LaunchTemplateSpecificationOverride.h new file mode 100644 index 00000000000..f3aa836e489 --- /dev/null +++ b/generated/src/aws-cpp-sdk-batch/include/aws/batch/model/LaunchTemplateSpecificationOverride.h @@ -0,0 +1,165 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Batch +{ +namespace Model +{ + + /** + *

An object that represents a launch template to use in place of the default + * launch template. You must specify either the launch template ID or launch + * template name in the request, but not both.

If security groups are + * specified using both the securityGroupIds parameter of + * CreateComputeEnvironment and the launch template, the values in the + * securityGroupIds parameter of CreateComputeEnvironment + * will be used.

You can define up to ten (10) overrides for each compute + * environment.

This object isn't applicable to jobs that are running + * on Fargate resources.

To unset all override templates for + * a compute environment, you can pass an empty array to the UpdateComputeEnvironment.overrides + * parameter, or not include the overrides parameter when submitting + * the UpdateComputeEnvironment API operation.

See + * Also:

AWS + * API Reference

+ */ + class LaunchTemplateSpecificationOverride + { + public: + AWS_BATCH_API LaunchTemplateSpecificationOverride(); + AWS_BATCH_API LaunchTemplateSpecificationOverride(Aws::Utils::Json::JsonView jsonValue); + AWS_BATCH_API LaunchTemplateSpecificationOverride& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BATCH_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The ID of the launch template.

Note: If you specify the + * launchTemplateId you can't specify the + * launchTemplateName as well.

+ */ + inline const Aws::String& GetLaunchTemplateId() const{ return m_launchTemplateId; } + inline bool LaunchTemplateIdHasBeenSet() const { return m_launchTemplateIdHasBeenSet; } + inline void SetLaunchTemplateId(const Aws::String& value) { m_launchTemplateIdHasBeenSet = true; m_launchTemplateId = value; } + inline void SetLaunchTemplateId(Aws::String&& value) { m_launchTemplateIdHasBeenSet = true; m_launchTemplateId = std::move(value); } + inline void SetLaunchTemplateId(const char* value) { m_launchTemplateIdHasBeenSet = true; m_launchTemplateId.assign(value); } + inline LaunchTemplateSpecificationOverride& WithLaunchTemplateId(const Aws::String& value) { SetLaunchTemplateId(value); return *this;} + inline LaunchTemplateSpecificationOverride& WithLaunchTemplateId(Aws::String&& value) { SetLaunchTemplateId(std::move(value)); return *this;} + inline LaunchTemplateSpecificationOverride& WithLaunchTemplateId(const char* value) { SetLaunchTemplateId(value); return *this;} + ///@} + + ///@{ + /** + *

The name of the launch template.

Note: If you specify the + * launchTemplateName you can't specify the + * launchTemplateId as well.

+ */ + inline const Aws::String& GetLaunchTemplateName() const{ return m_launchTemplateName; } + inline bool LaunchTemplateNameHasBeenSet() const { return m_launchTemplateNameHasBeenSet; } + inline void SetLaunchTemplateName(const Aws::String& value) { m_launchTemplateNameHasBeenSet = true; m_launchTemplateName = value; } + inline void SetLaunchTemplateName(Aws::String&& value) { m_launchTemplateNameHasBeenSet = true; m_launchTemplateName = std::move(value); } + inline void SetLaunchTemplateName(const char* value) { m_launchTemplateNameHasBeenSet = true; m_launchTemplateName.assign(value); } + inline LaunchTemplateSpecificationOverride& WithLaunchTemplateName(const Aws::String& value) { SetLaunchTemplateName(value); return *this;} + inline LaunchTemplateSpecificationOverride& WithLaunchTemplateName(Aws::String&& value) { SetLaunchTemplateName(std::move(value)); return *this;} + inline LaunchTemplateSpecificationOverride& WithLaunchTemplateName(const char* value) { SetLaunchTemplateName(value); return *this;} + ///@} + + ///@{ + /** + *

The version number of the launch template, $Default, or + * $Latest.

If the value is $Default, the default + * version of the launch template is used. If the value is $Latest, + * the latest version of the launch template is used.

If the + * AMI ID that's used in a compute environment is from the launch template, the AMI + * isn't changed when the compute environment is updated. It's only changed if the + * updateToLatestImageVersion parameter for the compute environment is + * set to true. During an infrastructure update, if either + * $Default or $Latest is specified, Batch re-evaluates + * the launch template version, and it might use a different version of the launch + * template. This is the case even if the launch template isn't specified in the + * update. When updating a compute environment, changing the launch template + * requires an infrastructure update of the compute environment. For more + * information, see Updating + * compute environments in the Batch User Guide.

+ *

Default: $Default

Latest: $Latest

+ */ + inline const Aws::String& GetVersion() const{ return m_version; } + inline bool VersionHasBeenSet() const { return m_versionHasBeenSet; } + inline void SetVersion(const Aws::String& value) { m_versionHasBeenSet = true; m_version = value; } + inline void SetVersion(Aws::String&& value) { m_versionHasBeenSet = true; m_version = std::move(value); } + inline void SetVersion(const char* value) { m_versionHasBeenSet = true; m_version.assign(value); } + inline LaunchTemplateSpecificationOverride& WithVersion(const Aws::String& value) { SetVersion(value); return *this;} + inline LaunchTemplateSpecificationOverride& WithVersion(Aws::String&& value) { SetVersion(std::move(value)); return *this;} + inline LaunchTemplateSpecificationOverride& WithVersion(const char* value) { SetVersion(value); return *this;} + ///@} + + ///@{ + /** + *

The instance type or family that this this override launch template should be + * applied to.

This parameter is required when defining a launch template + * override.

Information included in this parameter must meet the following + * requirements:

  • Must be a valid Amazon EC2 instance type or + * family.

  • optimal isn't allowed.

  • + *

    targetInstanceTypes can target only instance types and families + * that are included within the + * ComputeResource.instanceTypes set. + * targetInstanceTypes doesn't need to include all of the instances + * from the instanceType set, but at least a subset. For example, if + * ComputeResource.instanceTypes includes [m5, g5], + * targetInstanceTypes can include [m5.2xlarge] and + * [m5.large] but not [c5.large].

  • + * targetInstanceTypes included within the same launch template + * override or across launch template overrides can't overlap for the same compute + * environment. For example, you can't define one launch template override to + * target an instance family and another define an instance type within this same + * family.

+ */ + inline const Aws::Vector& GetTargetInstanceTypes() const{ return m_targetInstanceTypes; } + inline bool TargetInstanceTypesHasBeenSet() const { return m_targetInstanceTypesHasBeenSet; } + inline void SetTargetInstanceTypes(const Aws::Vector& value) { m_targetInstanceTypesHasBeenSet = true; m_targetInstanceTypes = value; } + inline void SetTargetInstanceTypes(Aws::Vector&& value) { m_targetInstanceTypesHasBeenSet = true; m_targetInstanceTypes = std::move(value); } + inline LaunchTemplateSpecificationOverride& WithTargetInstanceTypes(const Aws::Vector& value) { SetTargetInstanceTypes(value); return *this;} + inline LaunchTemplateSpecificationOverride& WithTargetInstanceTypes(Aws::Vector&& value) { SetTargetInstanceTypes(std::move(value)); return *this;} + inline LaunchTemplateSpecificationOverride& AddTargetInstanceTypes(const Aws::String& value) { m_targetInstanceTypesHasBeenSet = true; m_targetInstanceTypes.push_back(value); return *this; } + inline LaunchTemplateSpecificationOverride& AddTargetInstanceTypes(Aws::String&& value) { m_targetInstanceTypesHasBeenSet = true; m_targetInstanceTypes.push_back(std::move(value)); return *this; } + inline LaunchTemplateSpecificationOverride& AddTargetInstanceTypes(const char* value) { m_targetInstanceTypesHasBeenSet = true; m_targetInstanceTypes.push_back(value); return *this; } + ///@} + private: + + Aws::String m_launchTemplateId; + bool m_launchTemplateIdHasBeenSet = false; + + Aws::String m_launchTemplateName; + bool m_launchTemplateNameHasBeenSet = false; + + Aws::String m_version; + bool m_versionHasBeenSet = false; + + Aws::Vector m_targetInstanceTypes; + bool m_targetInstanceTypesHasBeenSet = false; + }; + +} // namespace Model +} // namespace Batch +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-batch/source/model/LaunchTemplateSpecification.cpp b/generated/src/aws-cpp-sdk-batch/source/model/LaunchTemplateSpecification.cpp index dc9219a6fb8..db04f4f904c 100644 --- a/generated/src/aws-cpp-sdk-batch/source/model/LaunchTemplateSpecification.cpp +++ b/generated/src/aws-cpp-sdk-batch/source/model/LaunchTemplateSpecification.cpp @@ -21,7 +21,8 @@ namespace Model LaunchTemplateSpecification::LaunchTemplateSpecification() : m_launchTemplateIdHasBeenSet(false), m_launchTemplateNameHasBeenSet(false), - m_versionHasBeenSet(false) + m_versionHasBeenSet(false), + m_overridesHasBeenSet(false) { } @@ -54,6 +55,16 @@ LaunchTemplateSpecification& LaunchTemplateSpecification::operator =(JsonView js m_versionHasBeenSet = true; } + if(jsonValue.ValueExists("overrides")) + { + Aws::Utils::Array overridesJsonList = jsonValue.GetArray("overrides"); + for(unsigned overridesIndex = 0; overridesIndex < overridesJsonList.GetLength(); ++overridesIndex) + { + m_overrides.push_back(overridesJsonList[overridesIndex].AsObject()); + } + m_overridesHasBeenSet = true; + } + return *this; } @@ -79,6 +90,17 @@ JsonValue LaunchTemplateSpecification::Jsonize() const } + if(m_overridesHasBeenSet) + { + Aws::Utils::Array overridesJsonList(m_overrides.size()); + for(unsigned overridesIndex = 0; overridesIndex < overridesJsonList.GetLength(); ++overridesIndex) + { + overridesJsonList[overridesIndex].AsObject(m_overrides[overridesIndex].Jsonize()); + } + payload.WithArray("overrides", std::move(overridesJsonList)); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-batch/source/model/LaunchTemplateSpecificationOverride.cpp b/generated/src/aws-cpp-sdk-batch/source/model/LaunchTemplateSpecificationOverride.cpp new file mode 100644 index 00000000000..0e0b21f470e --- /dev/null +++ b/generated/src/aws-cpp-sdk-batch/source/model/LaunchTemplateSpecificationOverride.cpp @@ -0,0 +1,109 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Batch +{ +namespace Model +{ + +LaunchTemplateSpecificationOverride::LaunchTemplateSpecificationOverride() : + m_launchTemplateIdHasBeenSet(false), + m_launchTemplateNameHasBeenSet(false), + m_versionHasBeenSet(false), + m_targetInstanceTypesHasBeenSet(false) +{ +} + +LaunchTemplateSpecificationOverride::LaunchTemplateSpecificationOverride(JsonView jsonValue) + : LaunchTemplateSpecificationOverride() +{ + *this = jsonValue; +} + +LaunchTemplateSpecificationOverride& LaunchTemplateSpecificationOverride::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("launchTemplateId")) + { + m_launchTemplateId = jsonValue.GetString("launchTemplateId"); + + m_launchTemplateIdHasBeenSet = true; + } + + if(jsonValue.ValueExists("launchTemplateName")) + { + m_launchTemplateName = jsonValue.GetString("launchTemplateName"); + + m_launchTemplateNameHasBeenSet = true; + } + + if(jsonValue.ValueExists("version")) + { + m_version = jsonValue.GetString("version"); + + m_versionHasBeenSet = true; + } + + if(jsonValue.ValueExists("targetInstanceTypes")) + { + Aws::Utils::Array targetInstanceTypesJsonList = jsonValue.GetArray("targetInstanceTypes"); + for(unsigned targetInstanceTypesIndex = 0; targetInstanceTypesIndex < targetInstanceTypesJsonList.GetLength(); ++targetInstanceTypesIndex) + { + m_targetInstanceTypes.push_back(targetInstanceTypesJsonList[targetInstanceTypesIndex].AsString()); + } + m_targetInstanceTypesHasBeenSet = true; + } + + return *this; +} + +JsonValue LaunchTemplateSpecificationOverride::Jsonize() const +{ + JsonValue payload; + + if(m_launchTemplateIdHasBeenSet) + { + payload.WithString("launchTemplateId", m_launchTemplateId); + + } + + if(m_launchTemplateNameHasBeenSet) + { + payload.WithString("launchTemplateName", m_launchTemplateName); + + } + + if(m_versionHasBeenSet) + { + payload.WithString("version", m_version); + + } + + if(m_targetInstanceTypesHasBeenSet) + { + Aws::Utils::Array targetInstanceTypesJsonList(m_targetInstanceTypes.size()); + for(unsigned targetInstanceTypesIndex = 0; targetInstanceTypesIndex < targetInstanceTypesJsonList.GetLength(); ++targetInstanceTypesIndex) + { + targetInstanceTypesJsonList[targetInstanceTypesIndex].AsString(m_targetInstanceTypes[targetInstanceTypesIndex]); + } + payload.WithArray("targetInstanceTypes", std::move(targetInstanceTypesJsonList)); + + } + + return payload; +} + +} // namespace Model +} // namespace Batch +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowCompletionEvent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowCompletionEvent.h index 8f83c61b888..0f1d98d3613 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowCompletionEvent.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowCompletionEvent.h @@ -24,10 +24,8 @@ namespace Model { /** - *

Contains information about why a flow completed.

This data type is - * used in the following API operations:

See Also:

Contains information about why a flow completed.

See Also:

+ *
AWS * API Reference

*/ diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowInput.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowInput.h index aac57ad65a7..3e8d72bc4c0 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowInput.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowInput.h @@ -26,10 +26,7 @@ namespace Model /** *

Contains information about an input into the prompt flow and where to send - * it.

This data type is used in the following API operations:

See Also:

See Also:

AWS * API Reference

*/ diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowInputContent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowInputContent.h index c6c5ec9ec2a..94942bda13e 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowInputContent.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowInputContent.h @@ -24,10 +24,8 @@ namespace Model { /** - *

Contains information about an input into the flow.

This data type is - * used in the following API operations:

See Also:

Contains information about an input into the flow.

See Also:

+ *
AWS * API Reference

*/ diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowOutputContent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowOutputContent.h index 85c4ed445e9..b21c807207a 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowOutputContent.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowOutputContent.h @@ -25,10 +25,7 @@ namespace Model /** *

Contains information about the content in an output from prompt flow - * invocation.

This data type is used in the following API operations:

- *

See Also:

See Also:

AWS * API Reference

*/ diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowOutputEvent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowOutputEvent.h index bed86141e6d..67491a0bbd8 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowOutputEvent.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowOutputEvent.h @@ -26,10 +26,8 @@ namespace Model { /** - *

Contains information about an output from prompt flow invoction.

This - * data type is used in the following API operations:

See Also:

Contains information about an output from prompt flow + * invoction.

See Also:

AWS * API Reference

*/ diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowResponseStream.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowResponseStream.h index d397399dd07..53c8a5833b0 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowResponseStream.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowResponseStream.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -34,10 +35,7 @@ namespace Model { /** - *

The output of the flow.

This data type is used in the following API - * operations:

See Also:

The output of the flow.

See Also:

AWS * API Reference

*/ @@ -126,6 +124,19 @@ namespace Model inline FlowResponseStream& WithFlowOutputEvent(FlowOutputEvent&& value) { SetFlowOutputEvent(std::move(value)); return *this;} ///@} + ///@{ + /** + *

Contains information about a trace, which tracks an input or output for a + * node in the flow.

+ */ + inline const FlowTraceEvent& GetFlowTraceEvent() const{ return m_flowTraceEvent; } + inline bool FlowTraceEventHasBeenSet() const { return m_flowTraceEventHasBeenSet; } + inline void SetFlowTraceEvent(const FlowTraceEvent& value) { m_flowTraceEventHasBeenSet = true; m_flowTraceEvent = value; } + inline void SetFlowTraceEvent(FlowTraceEvent&& value) { m_flowTraceEventHasBeenSet = true; m_flowTraceEvent = std::move(value); } + inline FlowResponseStream& WithFlowTraceEvent(const FlowTraceEvent& value) { SetFlowTraceEvent(value); return *this;} + inline FlowResponseStream& WithFlowTraceEvent(FlowTraceEvent&& value) { SetFlowTraceEvent(std::move(value)); return *this;} + ///@} + ///@{ /** *

An internal server error occurred. Retry your request.

@@ -208,6 +219,9 @@ namespace Model FlowOutputEvent m_flowOutputEvent; bool m_flowOutputEventHasBeenSet = false; + FlowTraceEvent m_flowTraceEvent; + bool m_flowTraceEventHasBeenSet = false; + InternalServerException m_internalServerException; bool m_internalServerExceptionHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTrace.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTrace.h new file mode 100644 index 00000000000..2fdaa762ad4 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTrace.h @@ -0,0 +1,95 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains information about an input or output for a node in the flow. For + * more information, see Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTrace + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTrace(); + AWS_BEDROCKAGENTRUNTIME_API FlowTrace(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTrace& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

Contains information about an output from a condition node.

+ */ + inline const FlowTraceConditionNodeResultEvent& GetConditionNodeResultTrace() const{ return m_conditionNodeResultTrace; } + inline bool ConditionNodeResultTraceHasBeenSet() const { return m_conditionNodeResultTraceHasBeenSet; } + inline void SetConditionNodeResultTrace(const FlowTraceConditionNodeResultEvent& value) { m_conditionNodeResultTraceHasBeenSet = true; m_conditionNodeResultTrace = value; } + inline void SetConditionNodeResultTrace(FlowTraceConditionNodeResultEvent&& value) { m_conditionNodeResultTraceHasBeenSet = true; m_conditionNodeResultTrace = std::move(value); } + inline FlowTrace& WithConditionNodeResultTrace(const FlowTraceConditionNodeResultEvent& value) { SetConditionNodeResultTrace(value); return *this;} + inline FlowTrace& WithConditionNodeResultTrace(FlowTraceConditionNodeResultEvent&& value) { SetConditionNodeResultTrace(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

Contains information about the input into a node.

+ */ + inline const FlowTraceNodeInputEvent& GetNodeInputTrace() const{ return m_nodeInputTrace; } + inline bool NodeInputTraceHasBeenSet() const { return m_nodeInputTraceHasBeenSet; } + inline void SetNodeInputTrace(const FlowTraceNodeInputEvent& value) { m_nodeInputTraceHasBeenSet = true; m_nodeInputTrace = value; } + inline void SetNodeInputTrace(FlowTraceNodeInputEvent&& value) { m_nodeInputTraceHasBeenSet = true; m_nodeInputTrace = std::move(value); } + inline FlowTrace& WithNodeInputTrace(const FlowTraceNodeInputEvent& value) { SetNodeInputTrace(value); return *this;} + inline FlowTrace& WithNodeInputTrace(FlowTraceNodeInputEvent&& value) { SetNodeInputTrace(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

Contains information about the output from a node.

+ */ + inline const FlowTraceNodeOutputEvent& GetNodeOutputTrace() const{ return m_nodeOutputTrace; } + inline bool NodeOutputTraceHasBeenSet() const { return m_nodeOutputTraceHasBeenSet; } + inline void SetNodeOutputTrace(const FlowTraceNodeOutputEvent& value) { m_nodeOutputTraceHasBeenSet = true; m_nodeOutputTrace = value; } + inline void SetNodeOutputTrace(FlowTraceNodeOutputEvent&& value) { m_nodeOutputTraceHasBeenSet = true; m_nodeOutputTrace = std::move(value); } + inline FlowTrace& WithNodeOutputTrace(const FlowTraceNodeOutputEvent& value) { SetNodeOutputTrace(value); return *this;} + inline FlowTrace& WithNodeOutputTrace(FlowTraceNodeOutputEvent&& value) { SetNodeOutputTrace(std::move(value)); return *this;} + ///@} + private: + + FlowTraceConditionNodeResultEvent m_conditionNodeResultTrace; + bool m_conditionNodeResultTraceHasBeenSet = false; + + FlowTraceNodeInputEvent m_nodeInputTrace; + bool m_nodeInputTraceHasBeenSet = false; + + FlowTraceNodeOutputEvent m_nodeOutputTrace; + bool m_nodeOutputTraceHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceCondition.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceCondition.h new file mode 100644 index 00000000000..9c6d825c9b5 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceCondition.h @@ -0,0 +1,65 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains information about a condition that was satisfied. For more + * information, see Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceCondition + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceCondition(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceCondition(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceCondition& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The name of the condition.

+ */ + inline const Aws::String& GetConditionName() const{ return m_conditionName; } + inline bool ConditionNameHasBeenSet() const { return m_conditionNameHasBeenSet; } + inline void SetConditionName(const Aws::String& value) { m_conditionNameHasBeenSet = true; m_conditionName = value; } + inline void SetConditionName(Aws::String&& value) { m_conditionNameHasBeenSet = true; m_conditionName = std::move(value); } + inline void SetConditionName(const char* value) { m_conditionNameHasBeenSet = true; m_conditionName.assign(value); } + inline FlowTraceCondition& WithConditionName(const Aws::String& value) { SetConditionName(value); return *this;} + inline FlowTraceCondition& WithConditionName(Aws::String&& value) { SetConditionName(std::move(value)); return *this;} + inline FlowTraceCondition& WithConditionName(const char* value) { SetConditionName(value); return *this;} + ///@} + private: + + Aws::String m_conditionName; + bool m_conditionNameHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceConditionNodeResultEvent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceConditionNodeResultEvent.h new file mode 100644 index 00000000000..2f2b7b20226 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceConditionNodeResultEvent.h @@ -0,0 +1,101 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains information about an output from a condition node. For more + * information, see Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceConditionNodeResultEvent + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceConditionNodeResultEvent(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceConditionNodeResultEvent(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceConditionNodeResultEvent& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The name of the condition node.

+ */ + inline const Aws::String& GetNodeName() const{ return m_nodeName; } + inline bool NodeNameHasBeenSet() const { return m_nodeNameHasBeenSet; } + inline void SetNodeName(const Aws::String& value) { m_nodeNameHasBeenSet = true; m_nodeName = value; } + inline void SetNodeName(Aws::String&& value) { m_nodeNameHasBeenSet = true; m_nodeName = std::move(value); } + inline void SetNodeName(const char* value) { m_nodeNameHasBeenSet = true; m_nodeName.assign(value); } + inline FlowTraceConditionNodeResultEvent& WithNodeName(const Aws::String& value) { SetNodeName(value); return *this;} + inline FlowTraceConditionNodeResultEvent& WithNodeName(Aws::String&& value) { SetNodeName(std::move(value)); return *this;} + inline FlowTraceConditionNodeResultEvent& WithNodeName(const char* value) { SetNodeName(value); return *this;} + ///@} + + ///@{ + /** + *

An array of objects containing information about the conditions that were + * satisfied.

+ */ + inline const Aws::Vector& GetSatisfiedConditions() const{ return m_satisfiedConditions; } + inline bool SatisfiedConditionsHasBeenSet() const { return m_satisfiedConditionsHasBeenSet; } + inline void SetSatisfiedConditions(const Aws::Vector& value) { m_satisfiedConditionsHasBeenSet = true; m_satisfiedConditions = value; } + inline void SetSatisfiedConditions(Aws::Vector&& value) { m_satisfiedConditionsHasBeenSet = true; m_satisfiedConditions = std::move(value); } + inline FlowTraceConditionNodeResultEvent& WithSatisfiedConditions(const Aws::Vector& value) { SetSatisfiedConditions(value); return *this;} + inline FlowTraceConditionNodeResultEvent& WithSatisfiedConditions(Aws::Vector&& value) { SetSatisfiedConditions(std::move(value)); return *this;} + inline FlowTraceConditionNodeResultEvent& AddSatisfiedConditions(const FlowTraceCondition& value) { m_satisfiedConditionsHasBeenSet = true; m_satisfiedConditions.push_back(value); return *this; } + inline FlowTraceConditionNodeResultEvent& AddSatisfiedConditions(FlowTraceCondition&& value) { m_satisfiedConditionsHasBeenSet = true; m_satisfiedConditions.push_back(std::move(value)); return *this; } + ///@} + + ///@{ + /** + *

The date and time that the trace was returned.

+ */ + inline const Aws::Utils::DateTime& GetTimestamp() const{ return m_timestamp; } + inline bool TimestampHasBeenSet() const { return m_timestampHasBeenSet; } + inline void SetTimestamp(const Aws::Utils::DateTime& value) { m_timestampHasBeenSet = true; m_timestamp = value; } + inline void SetTimestamp(Aws::Utils::DateTime&& value) { m_timestampHasBeenSet = true; m_timestamp = std::move(value); } + inline FlowTraceConditionNodeResultEvent& WithTimestamp(const Aws::Utils::DateTime& value) { SetTimestamp(value); return *this;} + inline FlowTraceConditionNodeResultEvent& WithTimestamp(Aws::Utils::DateTime&& value) { SetTimestamp(std::move(value)); return *this;} + ///@} + private: + + Aws::String m_nodeName; + bool m_nodeNameHasBeenSet = false; + + Aws::Vector m_satisfiedConditions; + bool m_satisfiedConditionsHasBeenSet = false; + + Aws::Utils::DateTime m_timestamp; + bool m_timestampHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceEvent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceEvent.h new file mode 100644 index 00000000000..5422ea7d987 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceEvent.h @@ -0,0 +1,64 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains information about a trace, which tracks an input or output for a + * node in the flow. For more information, see Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceEvent + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceEvent(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceEvent(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceEvent& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The trace object containing information about an input or output for a node + * in the flow.

+ */ + inline const FlowTrace& GetTrace() const{ return m_trace; } + inline bool TraceHasBeenSet() const { return m_traceHasBeenSet; } + inline void SetTrace(const FlowTrace& value) { m_traceHasBeenSet = true; m_trace = value; } + inline void SetTrace(FlowTrace&& value) { m_traceHasBeenSet = true; m_trace = std::move(value); } + inline FlowTraceEvent& WithTrace(const FlowTrace& value) { SetTrace(value); return *this;} + inline FlowTraceEvent& WithTrace(FlowTrace&& value) { SetTrace(std::move(value)); return *this;} + ///@} + private: + + FlowTrace m_trace; + bool m_traceHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputContent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputContent.h new file mode 100644 index 00000000000..e0c103837dd --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputContent.h @@ -0,0 +1,62 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains the content of the node input. For more information, see Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceNodeInputContent + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputContent(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputContent(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputContent& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The content of the node input.

+ */ + inline Aws::Utils::DocumentView GetDocument() const{ return m_document; } + inline bool DocumentHasBeenSet() const { return m_documentHasBeenSet; } + inline void SetDocument(const Aws::Utils::Document& value) { m_documentHasBeenSet = true; m_document = value; } + inline void SetDocument(Aws::Utils::Document&& value) { m_documentHasBeenSet = true; m_document = std::move(value); } + inline FlowTraceNodeInputContent& WithDocument(const Aws::Utils::Document& value) { SetDocument(value); return *this;} + inline FlowTraceNodeInputContent& WithDocument(Aws::Utils::Document&& value) { SetDocument(std::move(value)); return *this;} + ///@} + private: + + Aws::Utils::Document m_document; + bool m_documentHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputEvent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputEvent.h new file mode 100644 index 00000000000..1cbe9d640d0 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputEvent.h @@ -0,0 +1,100 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains information about the input into a node. For more information, see + * Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceNodeInputEvent + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputEvent(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputEvent(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputEvent& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

An array of objects containing information about each field in the input.

+ */ + inline const Aws::Vector& GetFields() const{ return m_fields; } + inline bool FieldsHasBeenSet() const { return m_fieldsHasBeenSet; } + inline void SetFields(const Aws::Vector& value) { m_fieldsHasBeenSet = true; m_fields = value; } + inline void SetFields(Aws::Vector&& value) { m_fieldsHasBeenSet = true; m_fields = std::move(value); } + inline FlowTraceNodeInputEvent& WithFields(const Aws::Vector& value) { SetFields(value); return *this;} + inline FlowTraceNodeInputEvent& WithFields(Aws::Vector&& value) { SetFields(std::move(value)); return *this;} + inline FlowTraceNodeInputEvent& AddFields(const FlowTraceNodeInputField& value) { m_fieldsHasBeenSet = true; m_fields.push_back(value); return *this; } + inline FlowTraceNodeInputEvent& AddFields(FlowTraceNodeInputField&& value) { m_fieldsHasBeenSet = true; m_fields.push_back(std::move(value)); return *this; } + ///@} + + ///@{ + /** + *

The name of the node that received the input.

+ */ + inline const Aws::String& GetNodeName() const{ return m_nodeName; } + inline bool NodeNameHasBeenSet() const { return m_nodeNameHasBeenSet; } + inline void SetNodeName(const Aws::String& value) { m_nodeNameHasBeenSet = true; m_nodeName = value; } + inline void SetNodeName(Aws::String&& value) { m_nodeNameHasBeenSet = true; m_nodeName = std::move(value); } + inline void SetNodeName(const char* value) { m_nodeNameHasBeenSet = true; m_nodeName.assign(value); } + inline FlowTraceNodeInputEvent& WithNodeName(const Aws::String& value) { SetNodeName(value); return *this;} + inline FlowTraceNodeInputEvent& WithNodeName(Aws::String&& value) { SetNodeName(std::move(value)); return *this;} + inline FlowTraceNodeInputEvent& WithNodeName(const char* value) { SetNodeName(value); return *this;} + ///@} + + ///@{ + /** + *

The date and time that the trace was returned.

+ */ + inline const Aws::Utils::DateTime& GetTimestamp() const{ return m_timestamp; } + inline bool TimestampHasBeenSet() const { return m_timestampHasBeenSet; } + inline void SetTimestamp(const Aws::Utils::DateTime& value) { m_timestampHasBeenSet = true; m_timestamp = value; } + inline void SetTimestamp(Aws::Utils::DateTime&& value) { m_timestampHasBeenSet = true; m_timestamp = std::move(value); } + inline FlowTraceNodeInputEvent& WithTimestamp(const Aws::Utils::DateTime& value) { SetTimestamp(value); return *this;} + inline FlowTraceNodeInputEvent& WithTimestamp(Aws::Utils::DateTime&& value) { SetTimestamp(std::move(value)); return *this;} + ///@} + private: + + Aws::Vector m_fields; + bool m_fieldsHasBeenSet = false; + + Aws::String m_nodeName; + bool m_nodeNameHasBeenSet = false; + + Aws::Utils::DateTime m_timestamp; + bool m_timestampHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputField.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputField.h new file mode 100644 index 00000000000..5464d96878b --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeInputField.h @@ -0,0 +1,81 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains information about a field in the input into a node. For more + * information, see Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceNodeInputField + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputField(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputField(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeInputField& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The content of the node input.

+ */ + inline const FlowTraceNodeInputContent& GetContent() const{ return m_content; } + inline bool ContentHasBeenSet() const { return m_contentHasBeenSet; } + inline void SetContent(const FlowTraceNodeInputContent& value) { m_contentHasBeenSet = true; m_content = value; } + inline void SetContent(FlowTraceNodeInputContent&& value) { m_contentHasBeenSet = true; m_content = std::move(value); } + inline FlowTraceNodeInputField& WithContent(const FlowTraceNodeInputContent& value) { SetContent(value); return *this;} + inline FlowTraceNodeInputField& WithContent(FlowTraceNodeInputContent&& value) { SetContent(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

The name of the node input.

+ */ + inline const Aws::String& GetNodeInputName() const{ return m_nodeInputName; } + inline bool NodeInputNameHasBeenSet() const { return m_nodeInputNameHasBeenSet; } + inline void SetNodeInputName(const Aws::String& value) { m_nodeInputNameHasBeenSet = true; m_nodeInputName = value; } + inline void SetNodeInputName(Aws::String&& value) { m_nodeInputNameHasBeenSet = true; m_nodeInputName = std::move(value); } + inline void SetNodeInputName(const char* value) { m_nodeInputNameHasBeenSet = true; m_nodeInputName.assign(value); } + inline FlowTraceNodeInputField& WithNodeInputName(const Aws::String& value) { SetNodeInputName(value); return *this;} + inline FlowTraceNodeInputField& WithNodeInputName(Aws::String&& value) { SetNodeInputName(std::move(value)); return *this;} + inline FlowTraceNodeInputField& WithNodeInputName(const char* value) { SetNodeInputName(value); return *this;} + ///@} + private: + + FlowTraceNodeInputContent m_content; + bool m_contentHasBeenSet = false; + + Aws::String m_nodeInputName; + bool m_nodeInputNameHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputContent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputContent.h new file mode 100644 index 00000000000..aae69254452 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputContent.h @@ -0,0 +1,62 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains the content of the node output. For more information, see Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceNodeOutputContent + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputContent(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputContent(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputContent& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The content of the node output.

+ */ + inline Aws::Utils::DocumentView GetDocument() const{ return m_document; } + inline bool DocumentHasBeenSet() const { return m_documentHasBeenSet; } + inline void SetDocument(const Aws::Utils::Document& value) { m_documentHasBeenSet = true; m_document = value; } + inline void SetDocument(Aws::Utils::Document&& value) { m_documentHasBeenSet = true; m_document = std::move(value); } + inline FlowTraceNodeOutputContent& WithDocument(const Aws::Utils::Document& value) { SetDocument(value); return *this;} + inline FlowTraceNodeOutputContent& WithDocument(Aws::Utils::Document&& value) { SetDocument(std::move(value)); return *this;} + ///@} + private: + + Aws::Utils::Document m_document; + bool m_documentHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputEvent.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputEvent.h new file mode 100644 index 00000000000..d11b877c36a --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputEvent.h @@ -0,0 +1,101 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains information about the output from a node. For more information, see + * Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceNodeOutputEvent + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputEvent(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputEvent(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputEvent& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

An array of objects containing information about each field in the + * output.

+ */ + inline const Aws::Vector& GetFields() const{ return m_fields; } + inline bool FieldsHasBeenSet() const { return m_fieldsHasBeenSet; } + inline void SetFields(const Aws::Vector& value) { m_fieldsHasBeenSet = true; m_fields = value; } + inline void SetFields(Aws::Vector&& value) { m_fieldsHasBeenSet = true; m_fields = std::move(value); } + inline FlowTraceNodeOutputEvent& WithFields(const Aws::Vector& value) { SetFields(value); return *this;} + inline FlowTraceNodeOutputEvent& WithFields(Aws::Vector&& value) { SetFields(std::move(value)); return *this;} + inline FlowTraceNodeOutputEvent& AddFields(const FlowTraceNodeOutputField& value) { m_fieldsHasBeenSet = true; m_fields.push_back(value); return *this; } + inline FlowTraceNodeOutputEvent& AddFields(FlowTraceNodeOutputField&& value) { m_fieldsHasBeenSet = true; m_fields.push_back(std::move(value)); return *this; } + ///@} + + ///@{ + /** + *

The name of the node that yielded the output.

+ */ + inline const Aws::String& GetNodeName() const{ return m_nodeName; } + inline bool NodeNameHasBeenSet() const { return m_nodeNameHasBeenSet; } + inline void SetNodeName(const Aws::String& value) { m_nodeNameHasBeenSet = true; m_nodeName = value; } + inline void SetNodeName(Aws::String&& value) { m_nodeNameHasBeenSet = true; m_nodeName = std::move(value); } + inline void SetNodeName(const char* value) { m_nodeNameHasBeenSet = true; m_nodeName.assign(value); } + inline FlowTraceNodeOutputEvent& WithNodeName(const Aws::String& value) { SetNodeName(value); return *this;} + inline FlowTraceNodeOutputEvent& WithNodeName(Aws::String&& value) { SetNodeName(std::move(value)); return *this;} + inline FlowTraceNodeOutputEvent& WithNodeName(const char* value) { SetNodeName(value); return *this;} + ///@} + + ///@{ + /** + *

The date and time that the trace was returned.

+ */ + inline const Aws::Utils::DateTime& GetTimestamp() const{ return m_timestamp; } + inline bool TimestampHasBeenSet() const { return m_timestampHasBeenSet; } + inline void SetTimestamp(const Aws::Utils::DateTime& value) { m_timestampHasBeenSet = true; m_timestamp = value; } + inline void SetTimestamp(Aws::Utils::DateTime&& value) { m_timestampHasBeenSet = true; m_timestamp = std::move(value); } + inline FlowTraceNodeOutputEvent& WithTimestamp(const Aws::Utils::DateTime& value) { SetTimestamp(value); return *this;} + inline FlowTraceNodeOutputEvent& WithTimestamp(Aws::Utils::DateTime&& value) { SetTimestamp(std::move(value)); return *this;} + ///@} + private: + + Aws::Vector m_fields; + bool m_fieldsHasBeenSet = false; + + Aws::String m_nodeName; + bool m_nodeNameHasBeenSet = false; + + Aws::Utils::DateTime m_timestamp; + bool m_timestampHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputField.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputField.h new file mode 100644 index 00000000000..c0a052215e2 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/FlowTraceNodeOutputField.h @@ -0,0 +1,81 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace BedrockAgentRuntime +{ +namespace Model +{ + + /** + *

Contains information about a field in the output from a node. For more + * information, see Track + * each step in your prompt flow by viewing its trace in Amazon + * Bedrock.

See Also:

AWS + * API Reference

+ */ + class FlowTraceNodeOutputField + { + public: + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputField(); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputField(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API FlowTraceNodeOutputField& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_BEDROCKAGENTRUNTIME_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The content of the node output.

+ */ + inline const FlowTraceNodeOutputContent& GetContent() const{ return m_content; } + inline bool ContentHasBeenSet() const { return m_contentHasBeenSet; } + inline void SetContent(const FlowTraceNodeOutputContent& value) { m_contentHasBeenSet = true; m_content = value; } + inline void SetContent(FlowTraceNodeOutputContent&& value) { m_contentHasBeenSet = true; m_content = std::move(value); } + inline FlowTraceNodeOutputField& WithContent(const FlowTraceNodeOutputContent& value) { SetContent(value); return *this;} + inline FlowTraceNodeOutputField& WithContent(FlowTraceNodeOutputContent&& value) { SetContent(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

The name of the node output.

+ */ + inline const Aws::String& GetNodeOutputName() const{ return m_nodeOutputName; } + inline bool NodeOutputNameHasBeenSet() const { return m_nodeOutputNameHasBeenSet; } + inline void SetNodeOutputName(const Aws::String& value) { m_nodeOutputNameHasBeenSet = true; m_nodeOutputName = value; } + inline void SetNodeOutputName(Aws::String&& value) { m_nodeOutputNameHasBeenSet = true; m_nodeOutputName = std::move(value); } + inline void SetNodeOutputName(const char* value) { m_nodeOutputNameHasBeenSet = true; m_nodeOutputName.assign(value); } + inline FlowTraceNodeOutputField& WithNodeOutputName(const Aws::String& value) { SetNodeOutputName(value); return *this;} + inline FlowTraceNodeOutputField& WithNodeOutputName(Aws::String&& value) { SetNodeOutputName(std::move(value)); return *this;} + inline FlowTraceNodeOutputField& WithNodeOutputName(const char* value) { SetNodeOutputName(value); return *this;} + ///@} + private: + + FlowTraceNodeOutputContent m_content; + bool m_contentHasBeenSet = false; + + Aws::String m_nodeOutputName; + bool m_nodeOutputNameHasBeenSet = false; + }; + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/GenerationConfiguration.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/GenerationConfiguration.h index 01e7db7db0f..9a54c3a521e 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/GenerationConfiguration.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/GenerationConfiguration.h @@ -94,7 +94,10 @@ namespace Model ///@{ /** *

Contains the template for the prompt that's sent to the model for response - * generation.

+ * generation. Generation prompts must include the $search_results$ + * variable. For more information, see Use + * placeholder variables in the user guide.

*/ inline const PromptTemplate& GetPromptTemplate() const{ return m_promptTemplate; } inline bool PromptTemplateHasBeenSet() const { return m_promptTemplateHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/InvokeFlowHandler.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/InvokeFlowHandler.h index 13219a28ba0..e56e12206ee 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/InvokeFlowHandler.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/InvokeFlowHandler.h @@ -13,6 +13,7 @@ #include #include #include +#include namespace Aws { @@ -25,6 +26,7 @@ namespace Model INITIAL_RESPONSE, FLOWCOMPLETIONEVENT, FLOWOUTPUTEVENT, + FLOWTRACEEVENT, UNKNOWN }; @@ -34,6 +36,7 @@ namespace Model typedef std::function InvokeFlowInitialResponseCallbackEx; typedef std::function FlowCompletionEventCallback; typedef std::function FlowOutputEventCallback; + typedef std::function FlowTraceEventCallback; typedef std::function& error)> ErrorCallback; public: @@ -60,6 +63,7 @@ namespace Model ///@} inline void SetFlowCompletionEventCallback(const FlowCompletionEventCallback& callback) { m_onFlowCompletionEvent = callback; } inline void SetFlowOutputEventCallback(const FlowOutputEventCallback& callback) { m_onFlowOutputEvent = callback; } + inline void SetFlowTraceEventCallback(const FlowTraceEventCallback& callback) { m_onFlowTraceEvent = callback; } inline void SetOnErrorCallback(const ErrorCallback& callback) { m_onError = callback; } inline InvokeFlowInitialResponseCallbackEx& GetInitialResponseCallbackEx() { return m_onInitialResponse; } @@ -72,6 +76,7 @@ namespace Model InvokeFlowInitialResponseCallbackEx m_onInitialResponse; FlowCompletionEventCallback m_onFlowCompletionEvent; FlowOutputEventCallback m_onFlowOutputEvent; + FlowTraceEventCallback m_onFlowTraceEvent; ErrorCallback m_onError; }; diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/InvokeFlowRequest.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/InvokeFlowRequest.h index 1c0dfcb5625..107bc6ff73e 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/InvokeFlowRequest.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/InvokeFlowRequest.h @@ -56,6 +56,19 @@ namespace Model inline InvokeFlowRequest& WithEventStreamHandler(const InvokeFlowHandler& value) { SetEventStreamHandler(value); return *this; } + ///@{ + /** + *

Specifies whether to return the trace for the flow or not. Traces track + * inputs and outputs for nodes in the flow. For more information, see Track + * each step in your prompt flow by viewing its trace in Amazon Bedrock.

+ */ + inline bool GetEnableTrace() const{ return m_enableTrace; } + inline bool EnableTraceHasBeenSet() const { return m_enableTraceHasBeenSet; } + inline void SetEnableTrace(bool value) { m_enableTraceHasBeenSet = true; m_enableTrace = value; } + inline InvokeFlowRequest& WithEnableTrace(bool value) { SetEnableTrace(value); return *this;} + ///@} + ///@{ /** *

The unique identifier of the flow alias.

@@ -100,6 +113,9 @@ namespace Model ///@} private: + bool m_enableTrace; + bool m_enableTraceHasBeenSet = false; + Aws::String m_flowAliasIdentifier; bool m_flowAliasIdentifierHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/OrchestrationConfiguration.h b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/OrchestrationConfiguration.h index 4153e06dc54..5a5af1bfe02 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/OrchestrationConfiguration.h +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/include/aws/bedrock-agent-runtime/model/OrchestrationConfiguration.h @@ -78,8 +78,12 @@ namespace Model ///@{ /** - *

Contains the template for the prompt that's sent to the model for response - * generation.

+ *

Contains the template for the prompt that's sent to the model. Orchestration + * prompts must include the $conversation_history$ and + * $output_format_instructions$ variables. For more information, see + * Use + * placeholder variables in the user guide.

*/ inline const PromptTemplate& GetPromptTemplate() const{ return m_promptTemplate; } inline bool PromptTemplateHasBeenSet() const { return m_promptTemplateHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTrace.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTrace.cpp new file mode 100644 index 00000000000..c3c7cc3f2cc --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTrace.cpp @@ -0,0 +1,87 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTrace::FlowTrace() : + m_conditionNodeResultTraceHasBeenSet(false), + m_nodeInputTraceHasBeenSet(false), + m_nodeOutputTraceHasBeenSet(false) +{ +} + +FlowTrace::FlowTrace(JsonView jsonValue) + : FlowTrace() +{ + *this = jsonValue; +} + +FlowTrace& FlowTrace::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("conditionNodeResultTrace")) + { + m_conditionNodeResultTrace = jsonValue.GetObject("conditionNodeResultTrace"); + + m_conditionNodeResultTraceHasBeenSet = true; + } + + if(jsonValue.ValueExists("nodeInputTrace")) + { + m_nodeInputTrace = jsonValue.GetObject("nodeInputTrace"); + + m_nodeInputTraceHasBeenSet = true; + } + + if(jsonValue.ValueExists("nodeOutputTrace")) + { + m_nodeOutputTrace = jsonValue.GetObject("nodeOutputTrace"); + + m_nodeOutputTraceHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTrace::Jsonize() const +{ + JsonValue payload; + + if(m_conditionNodeResultTraceHasBeenSet) + { + payload.WithObject("conditionNodeResultTrace", m_conditionNodeResultTrace.Jsonize()); + + } + + if(m_nodeInputTraceHasBeenSet) + { + payload.WithObject("nodeInputTrace", m_nodeInputTrace.Jsonize()); + + } + + if(m_nodeOutputTraceHasBeenSet) + { + payload.WithObject("nodeOutputTrace", m_nodeOutputTrace.Jsonize()); + + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceCondition.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceCondition.cpp new file mode 100644 index 00000000000..afc766be97a --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceCondition.cpp @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceCondition::FlowTraceCondition() : + m_conditionNameHasBeenSet(false) +{ +} + +FlowTraceCondition::FlowTraceCondition(JsonView jsonValue) + : FlowTraceCondition() +{ + *this = jsonValue; +} + +FlowTraceCondition& FlowTraceCondition::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("conditionName")) + { + m_conditionName = jsonValue.GetString("conditionName"); + + m_conditionNameHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceCondition::Jsonize() const +{ + JsonValue payload; + + if(m_conditionNameHasBeenSet) + { + payload.WithString("conditionName", m_conditionName); + + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceConditionNodeResultEvent.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceConditionNodeResultEvent.cpp new file mode 100644 index 00000000000..ea337e17b52 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceConditionNodeResultEvent.cpp @@ -0,0 +1,94 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceConditionNodeResultEvent::FlowTraceConditionNodeResultEvent() : + m_nodeNameHasBeenSet(false), + m_satisfiedConditionsHasBeenSet(false), + m_timestampHasBeenSet(false) +{ +} + +FlowTraceConditionNodeResultEvent::FlowTraceConditionNodeResultEvent(JsonView jsonValue) + : FlowTraceConditionNodeResultEvent() +{ + *this = jsonValue; +} + +FlowTraceConditionNodeResultEvent& FlowTraceConditionNodeResultEvent::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("nodeName")) + { + m_nodeName = jsonValue.GetString("nodeName"); + + m_nodeNameHasBeenSet = true; + } + + if(jsonValue.ValueExists("satisfiedConditions")) + { + Aws::Utils::Array satisfiedConditionsJsonList = jsonValue.GetArray("satisfiedConditions"); + for(unsigned satisfiedConditionsIndex = 0; satisfiedConditionsIndex < satisfiedConditionsJsonList.GetLength(); ++satisfiedConditionsIndex) + { + m_satisfiedConditions.push_back(satisfiedConditionsJsonList[satisfiedConditionsIndex].AsObject()); + } + m_satisfiedConditionsHasBeenSet = true; + } + + if(jsonValue.ValueExists("timestamp")) + { + m_timestamp = jsonValue.GetString("timestamp"); + + m_timestampHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceConditionNodeResultEvent::Jsonize() const +{ + JsonValue payload; + + if(m_nodeNameHasBeenSet) + { + payload.WithString("nodeName", m_nodeName); + + } + + if(m_satisfiedConditionsHasBeenSet) + { + Aws::Utils::Array satisfiedConditionsJsonList(m_satisfiedConditions.size()); + for(unsigned satisfiedConditionsIndex = 0; satisfiedConditionsIndex < satisfiedConditionsJsonList.GetLength(); ++satisfiedConditionsIndex) + { + satisfiedConditionsJsonList[satisfiedConditionsIndex].AsObject(m_satisfiedConditions[satisfiedConditionsIndex].Jsonize()); + } + payload.WithArray("satisfiedConditions", std::move(satisfiedConditionsJsonList)); + + } + + if(m_timestampHasBeenSet) + { + payload.WithString("timestamp", m_timestamp.ToGmtString(Aws::Utils::DateFormat::ISO_8601)); + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceEvent.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceEvent.cpp new file mode 100644 index 00000000000..c30a7d1024a --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceEvent.cpp @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceEvent::FlowTraceEvent() : + m_traceHasBeenSet(false) +{ +} + +FlowTraceEvent::FlowTraceEvent(JsonView jsonValue) + : FlowTraceEvent() +{ + *this = jsonValue; +} + +FlowTraceEvent& FlowTraceEvent::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("trace")) + { + m_trace = jsonValue.GetObject("trace"); + + m_traceHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceEvent::Jsonize() const +{ + JsonValue payload; + + if(m_traceHasBeenSet) + { + payload.WithObject("trace", m_trace.Jsonize()); + + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputContent.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputContent.cpp new file mode 100644 index 00000000000..78a2f1489aa --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputContent.cpp @@ -0,0 +1,61 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceNodeInputContent::FlowTraceNodeInputContent() : + m_documentHasBeenSet(false) +{ +} + +FlowTraceNodeInputContent::FlowTraceNodeInputContent(JsonView jsonValue) + : FlowTraceNodeInputContent() +{ + *this = jsonValue; +} + +FlowTraceNodeInputContent& FlowTraceNodeInputContent::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("document")) + { + m_document = jsonValue.GetObject("document"); + + m_documentHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceNodeInputContent::Jsonize() const +{ + JsonValue payload; + + if(m_documentHasBeenSet) + { + if(!m_document.View().IsNull()) + { + payload.WithObject("document", JsonValue(m_document.View())); + } + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputEvent.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputEvent.cpp new file mode 100644 index 00000000000..69938f0ee55 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputEvent.cpp @@ -0,0 +1,94 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceNodeInputEvent::FlowTraceNodeInputEvent() : + m_fieldsHasBeenSet(false), + m_nodeNameHasBeenSet(false), + m_timestampHasBeenSet(false) +{ +} + +FlowTraceNodeInputEvent::FlowTraceNodeInputEvent(JsonView jsonValue) + : FlowTraceNodeInputEvent() +{ + *this = jsonValue; +} + +FlowTraceNodeInputEvent& FlowTraceNodeInputEvent::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("fields")) + { + Aws::Utils::Array fieldsJsonList = jsonValue.GetArray("fields"); + for(unsigned fieldsIndex = 0; fieldsIndex < fieldsJsonList.GetLength(); ++fieldsIndex) + { + m_fields.push_back(fieldsJsonList[fieldsIndex].AsObject()); + } + m_fieldsHasBeenSet = true; + } + + if(jsonValue.ValueExists("nodeName")) + { + m_nodeName = jsonValue.GetString("nodeName"); + + m_nodeNameHasBeenSet = true; + } + + if(jsonValue.ValueExists("timestamp")) + { + m_timestamp = jsonValue.GetString("timestamp"); + + m_timestampHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceNodeInputEvent::Jsonize() const +{ + JsonValue payload; + + if(m_fieldsHasBeenSet) + { + Aws::Utils::Array fieldsJsonList(m_fields.size()); + for(unsigned fieldsIndex = 0; fieldsIndex < fieldsJsonList.GetLength(); ++fieldsIndex) + { + fieldsJsonList[fieldsIndex].AsObject(m_fields[fieldsIndex].Jsonize()); + } + payload.WithArray("fields", std::move(fieldsJsonList)); + + } + + if(m_nodeNameHasBeenSet) + { + payload.WithString("nodeName", m_nodeName); + + } + + if(m_timestampHasBeenSet) + { + payload.WithString("timestamp", m_timestamp.ToGmtString(Aws::Utils::DateFormat::ISO_8601)); + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputField.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputField.cpp new file mode 100644 index 00000000000..7e929d1f9b1 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeInputField.cpp @@ -0,0 +1,73 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceNodeInputField::FlowTraceNodeInputField() : + m_contentHasBeenSet(false), + m_nodeInputNameHasBeenSet(false) +{ +} + +FlowTraceNodeInputField::FlowTraceNodeInputField(JsonView jsonValue) + : FlowTraceNodeInputField() +{ + *this = jsonValue; +} + +FlowTraceNodeInputField& FlowTraceNodeInputField::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("content")) + { + m_content = jsonValue.GetObject("content"); + + m_contentHasBeenSet = true; + } + + if(jsonValue.ValueExists("nodeInputName")) + { + m_nodeInputName = jsonValue.GetString("nodeInputName"); + + m_nodeInputNameHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceNodeInputField::Jsonize() const +{ + JsonValue payload; + + if(m_contentHasBeenSet) + { + payload.WithObject("content", m_content.Jsonize()); + + } + + if(m_nodeInputNameHasBeenSet) + { + payload.WithString("nodeInputName", m_nodeInputName); + + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputContent.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputContent.cpp new file mode 100644 index 00000000000..9e84c571fd9 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputContent.cpp @@ -0,0 +1,61 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceNodeOutputContent::FlowTraceNodeOutputContent() : + m_documentHasBeenSet(false) +{ +} + +FlowTraceNodeOutputContent::FlowTraceNodeOutputContent(JsonView jsonValue) + : FlowTraceNodeOutputContent() +{ + *this = jsonValue; +} + +FlowTraceNodeOutputContent& FlowTraceNodeOutputContent::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("document")) + { + m_document = jsonValue.GetObject("document"); + + m_documentHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceNodeOutputContent::Jsonize() const +{ + JsonValue payload; + + if(m_documentHasBeenSet) + { + if(!m_document.View().IsNull()) + { + payload.WithObject("document", JsonValue(m_document.View())); + } + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputEvent.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputEvent.cpp new file mode 100644 index 00000000000..899abf2d2f8 --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputEvent.cpp @@ -0,0 +1,94 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceNodeOutputEvent::FlowTraceNodeOutputEvent() : + m_fieldsHasBeenSet(false), + m_nodeNameHasBeenSet(false), + m_timestampHasBeenSet(false) +{ +} + +FlowTraceNodeOutputEvent::FlowTraceNodeOutputEvent(JsonView jsonValue) + : FlowTraceNodeOutputEvent() +{ + *this = jsonValue; +} + +FlowTraceNodeOutputEvent& FlowTraceNodeOutputEvent::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("fields")) + { + Aws::Utils::Array fieldsJsonList = jsonValue.GetArray("fields"); + for(unsigned fieldsIndex = 0; fieldsIndex < fieldsJsonList.GetLength(); ++fieldsIndex) + { + m_fields.push_back(fieldsJsonList[fieldsIndex].AsObject()); + } + m_fieldsHasBeenSet = true; + } + + if(jsonValue.ValueExists("nodeName")) + { + m_nodeName = jsonValue.GetString("nodeName"); + + m_nodeNameHasBeenSet = true; + } + + if(jsonValue.ValueExists("timestamp")) + { + m_timestamp = jsonValue.GetString("timestamp"); + + m_timestampHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceNodeOutputEvent::Jsonize() const +{ + JsonValue payload; + + if(m_fieldsHasBeenSet) + { + Aws::Utils::Array fieldsJsonList(m_fields.size()); + for(unsigned fieldsIndex = 0; fieldsIndex < fieldsJsonList.GetLength(); ++fieldsIndex) + { + fieldsJsonList[fieldsIndex].AsObject(m_fields[fieldsIndex].Jsonize()); + } + payload.WithArray("fields", std::move(fieldsJsonList)); + + } + + if(m_nodeNameHasBeenSet) + { + payload.WithString("nodeName", m_nodeName); + + } + + if(m_timestampHasBeenSet) + { + payload.WithString("timestamp", m_timestamp.ToGmtString(Aws::Utils::DateFormat::ISO_8601)); + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputField.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputField.cpp new file mode 100644 index 00000000000..abff3eec6bc --- /dev/null +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/FlowTraceNodeOutputField.cpp @@ -0,0 +1,73 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace BedrockAgentRuntime +{ +namespace Model +{ + +FlowTraceNodeOutputField::FlowTraceNodeOutputField() : + m_contentHasBeenSet(false), + m_nodeOutputNameHasBeenSet(false) +{ +} + +FlowTraceNodeOutputField::FlowTraceNodeOutputField(JsonView jsonValue) + : FlowTraceNodeOutputField() +{ + *this = jsonValue; +} + +FlowTraceNodeOutputField& FlowTraceNodeOutputField::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("content")) + { + m_content = jsonValue.GetObject("content"); + + m_contentHasBeenSet = true; + } + + if(jsonValue.ValueExists("nodeOutputName")) + { + m_nodeOutputName = jsonValue.GetString("nodeOutputName"); + + m_nodeOutputNameHasBeenSet = true; + } + + return *this; +} + +JsonValue FlowTraceNodeOutputField::Jsonize() const +{ + JsonValue payload; + + if(m_contentHasBeenSet) + { + payload.WithObject("content", m_content.Jsonize()); + + } + + if(m_nodeOutputNameHasBeenSet) + { + payload.WithString("nodeOutputName", m_nodeOutputName); + + } + + return payload; +} + +} // namespace Model +} // namespace BedrockAgentRuntime +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/InvokeFlowHandler.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/InvokeFlowHandler.cpp index dac510c57a2..7cf0b6beba7 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/InvokeFlowHandler.cpp +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/InvokeFlowHandler.cpp @@ -46,6 +46,11 @@ namespace Model AWS_LOGSTREAM_TRACE(INVOKEFLOW_HANDLER_CLASS_TAG, "FlowOutputEvent received."); }; + m_onFlowTraceEvent = [&](const FlowTraceEvent&) + { + AWS_LOGSTREAM_TRACE(INVOKEFLOW_HANDLER_CLASS_TAG, "FlowTraceEvent received."); + }; + m_onError = [&](const AWSError& error) { AWS_LOGSTREAM_TRACE(INVOKEFLOW_HANDLER_CLASS_TAG, "BedrockAgentRuntime Errors received, " << error); @@ -132,6 +137,18 @@ namespace Model m_onFlowOutputEvent(FlowOutputEvent{json.View()}); break; } + case InvokeFlowEventType::FLOWTRACEEVENT: + { + JsonValue json(GetEventPayloadAsString()); + if (!json.WasParseSuccessful()) + { + AWS_LOGSTREAM_WARN(INVOKEFLOW_HANDLER_CLASS_TAG, "Unable to generate a proper FlowTraceEvent object from the response in JSON format."); + break; + } + + m_onFlowTraceEvent(FlowTraceEvent{json.View()}); + break; + } default: AWS_LOGSTREAM_WARN(INVOKEFLOW_HANDLER_CLASS_TAG, "Unexpected event type: " << eventTypeHeaderIter->second.GetEventHeaderValueAsString()); @@ -227,6 +244,7 @@ namespace InvokeFlowEventMapper static const int INITIAL_RESPONSE_HASH = Aws::Utils::HashingUtils::HashString("initial-response"); static const int FLOWCOMPLETIONEVENT_HASH = Aws::Utils::HashingUtils::HashString("flowCompletionEvent"); static const int FLOWOUTPUTEVENT_HASH = Aws::Utils::HashingUtils::HashString("flowOutputEvent"); + static const int FLOWTRACEEVENT_HASH = Aws::Utils::HashingUtils::HashString("flowTraceEvent"); InvokeFlowEventType GetInvokeFlowEventTypeForName(const Aws::String& name) { @@ -244,6 +262,10 @@ namespace InvokeFlowEventMapper { return InvokeFlowEventType::FLOWOUTPUTEVENT; } + else if (hashCode == FLOWTRACEEVENT_HASH) + { + return InvokeFlowEventType::FLOWTRACEEVENT; + } return InvokeFlowEventType::UNKNOWN; } @@ -257,6 +279,8 @@ namespace InvokeFlowEventMapper return "flowCompletionEvent"; case InvokeFlowEventType::FLOWOUTPUTEVENT: return "flowOutputEvent"; + case InvokeFlowEventType::FLOWTRACEEVENT: + return "flowTraceEvent"; default: return "Unknown"; } diff --git a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/InvokeFlowRequest.cpp b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/InvokeFlowRequest.cpp index 03976102943..a640d98deeb 100644 --- a/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/InvokeFlowRequest.cpp +++ b/generated/src/aws-cpp-sdk-bedrock-agent-runtime/source/model/InvokeFlowRequest.cpp @@ -13,6 +13,8 @@ using namespace Aws::Utils::Json; using namespace Aws::Utils; InvokeFlowRequest::InvokeFlowRequest() : + m_enableTrace(false), + m_enableTraceHasBeenSet(false), m_flowAliasIdentifierHasBeenSet(false), m_flowIdentifierHasBeenSet(false), m_inputsHasBeenSet(false), @@ -24,6 +26,12 @@ Aws::String InvokeFlowRequest::SerializePayload() const { JsonValue payload; + if(m_enableTraceHasBeenSet) + { + payload.WithBool("enableTrace", m_enableTrace); + + } + if(m_inputsHasBeenSet) { Aws::Utils::Array inputsJsonList(m_inputs.size()); diff --git a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/AmazonTranscribeProcessorConfiguration.h b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/AmazonTranscribeProcessorConfiguration.h index 6825641dd3d..c9e37ac58e5 100644 --- a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/AmazonTranscribeProcessorConfiguration.h +++ b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/AmazonTranscribeProcessorConfiguration.h @@ -263,7 +263,11 @@ namespace Model ///@{ /** - *

Turns language identification on or off for multiple languages.

+ *

Turns language identification on or off for multiple languages.

+ *

Calls to this API must include a LanguageCode, + * IdentifyLanguage, or IdentifyMultipleLanguages + * parameter. If you include more than one of those parameters, your transcription + * job fails.

*/ inline bool GetIdentifyMultipleLanguages() const{ return m_identifyMultipleLanguages; } inline bool IdentifyMultipleLanguagesHasBeenSet() const { return m_identifyMultipleLanguagesHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/CreateMediaCapturePipelineRequest.h b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/CreateMediaCapturePipelineRequest.h index 6e1ab75d52a..747f02a5b66 100644 --- a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/CreateMediaCapturePipelineRequest.h +++ b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/CreateMediaCapturePipelineRequest.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -120,6 +121,44 @@ namespace Model inline CreateMediaCapturePipelineRequest& WithChimeSdkMeetingConfiguration(ChimeSdkMeetingConfiguration&& value) { SetChimeSdkMeetingConfiguration(std::move(value)); return *this;} ///@} + ///@{ + /** + *

An object that contains server side encryption parameters to be used by media + * capture pipeline. The parameters can also be used by media concatenation + * pipeline taking media capture pipeline as a media source.

+ */ + inline const SseAwsKeyManagementParams& GetSseAwsKeyManagementParams() const{ return m_sseAwsKeyManagementParams; } + inline bool SseAwsKeyManagementParamsHasBeenSet() const { return m_sseAwsKeyManagementParamsHasBeenSet; } + inline void SetSseAwsKeyManagementParams(const SseAwsKeyManagementParams& value) { m_sseAwsKeyManagementParamsHasBeenSet = true; m_sseAwsKeyManagementParams = value; } + inline void SetSseAwsKeyManagementParams(SseAwsKeyManagementParams&& value) { m_sseAwsKeyManagementParamsHasBeenSet = true; m_sseAwsKeyManagementParams = std::move(value); } + inline CreateMediaCapturePipelineRequest& WithSseAwsKeyManagementParams(const SseAwsKeyManagementParams& value) { SetSseAwsKeyManagementParams(value); return *this;} + inline CreateMediaCapturePipelineRequest& WithSseAwsKeyManagementParams(SseAwsKeyManagementParams&& value) { SetSseAwsKeyManagementParams(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

The Amazon Resource Name (ARN) of the sink role to be used with + * AwsKmsKeyId in SseAwsKeyManagementParams. Can only + * interact with S3Bucket sink type. The role must belong to the + * caller’s account and be able to act on behalf of the caller during the API call. + * All minimum policy permissions requirements for the caller to perform + * sink-related actions are the same for SinkIamRoleArn.

+ *

Additionally, the role must have permission to + * kms:GenerateDataKey using KMS key supplied as + * AwsKmsKeyId in SseAwsKeyManagementParams. If media + * concatenation will be required later, the role must also have permission to + * kms:Decrypt for the same KMS key.

+ */ + inline const Aws::String& GetSinkIamRoleArn() const{ return m_sinkIamRoleArn; } + inline bool SinkIamRoleArnHasBeenSet() const { return m_sinkIamRoleArnHasBeenSet; } + inline void SetSinkIamRoleArn(const Aws::String& value) { m_sinkIamRoleArnHasBeenSet = true; m_sinkIamRoleArn = value; } + inline void SetSinkIamRoleArn(Aws::String&& value) { m_sinkIamRoleArnHasBeenSet = true; m_sinkIamRoleArn = std::move(value); } + inline void SetSinkIamRoleArn(const char* value) { m_sinkIamRoleArnHasBeenSet = true; m_sinkIamRoleArn.assign(value); } + inline CreateMediaCapturePipelineRequest& WithSinkIamRoleArn(const Aws::String& value) { SetSinkIamRoleArn(value); return *this;} + inline CreateMediaCapturePipelineRequest& WithSinkIamRoleArn(Aws::String&& value) { SetSinkIamRoleArn(std::move(value)); return *this;} + inline CreateMediaCapturePipelineRequest& WithSinkIamRoleArn(const char* value) { SetSinkIamRoleArn(value); return *this;} + ///@} + ///@{ /** *

The tag key-value pairs.

@@ -153,6 +192,12 @@ namespace Model ChimeSdkMeetingConfiguration m_chimeSdkMeetingConfiguration; bool m_chimeSdkMeetingConfigurationHasBeenSet = false; + SseAwsKeyManagementParams m_sseAwsKeyManagementParams; + bool m_sseAwsKeyManagementParamsHasBeenSet = false; + + Aws::String m_sinkIamRoleArn; + bool m_sinkIamRoleArnHasBeenSet = false; + Aws::Vector m_tags; bool m_tagsHasBeenSet = false; }; diff --git a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/MediaCapturePipeline.h b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/MediaCapturePipeline.h index eb4ffcdf6cc..366a9b5425e 100644 --- a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/MediaCapturePipeline.h +++ b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/MediaCapturePipeline.h @@ -11,6 +11,7 @@ #include #include #include +#include #include namespace Aws @@ -173,6 +174,35 @@ namespace Model inline MediaCapturePipeline& WithChimeSdkMeetingConfiguration(const ChimeSdkMeetingConfiguration& value) { SetChimeSdkMeetingConfiguration(value); return *this;} inline MediaCapturePipeline& WithChimeSdkMeetingConfiguration(ChimeSdkMeetingConfiguration&& value) { SetChimeSdkMeetingConfiguration(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

An object that contains server side encryption parameters to be used by media + * capture pipeline. The parameters can also be used by media concatenation + * pipeline taking media capture pipeline as a media source.

+ */ + inline const SseAwsKeyManagementParams& GetSseAwsKeyManagementParams() const{ return m_sseAwsKeyManagementParams; } + inline bool SseAwsKeyManagementParamsHasBeenSet() const { return m_sseAwsKeyManagementParamsHasBeenSet; } + inline void SetSseAwsKeyManagementParams(const SseAwsKeyManagementParams& value) { m_sseAwsKeyManagementParamsHasBeenSet = true; m_sseAwsKeyManagementParams = value; } + inline void SetSseAwsKeyManagementParams(SseAwsKeyManagementParams&& value) { m_sseAwsKeyManagementParamsHasBeenSet = true; m_sseAwsKeyManagementParams = std::move(value); } + inline MediaCapturePipeline& WithSseAwsKeyManagementParams(const SseAwsKeyManagementParams& value) { SetSseAwsKeyManagementParams(value); return *this;} + inline MediaCapturePipeline& WithSseAwsKeyManagementParams(SseAwsKeyManagementParams&& value) { SetSseAwsKeyManagementParams(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

The Amazon Resource Name (ARN) of the sink role to be used with + * AwsKmsKeyId in SseAwsKeyManagementParams.

+ */ + inline const Aws::String& GetSinkIamRoleArn() const{ return m_sinkIamRoleArn; } + inline bool SinkIamRoleArnHasBeenSet() const { return m_sinkIamRoleArnHasBeenSet; } + inline void SetSinkIamRoleArn(const Aws::String& value) { m_sinkIamRoleArnHasBeenSet = true; m_sinkIamRoleArn = value; } + inline void SetSinkIamRoleArn(Aws::String&& value) { m_sinkIamRoleArnHasBeenSet = true; m_sinkIamRoleArn = std::move(value); } + inline void SetSinkIamRoleArn(const char* value) { m_sinkIamRoleArnHasBeenSet = true; m_sinkIamRoleArn.assign(value); } + inline MediaCapturePipeline& WithSinkIamRoleArn(const Aws::String& value) { SetSinkIamRoleArn(value); return *this;} + inline MediaCapturePipeline& WithSinkIamRoleArn(Aws::String&& value) { SetSinkIamRoleArn(std::move(value)); return *this;} + inline MediaCapturePipeline& WithSinkIamRoleArn(const char* value) { SetSinkIamRoleArn(value); return *this;} + ///@} private: Aws::String m_mediaPipelineId; @@ -204,6 +234,12 @@ namespace Model ChimeSdkMeetingConfiguration m_chimeSdkMeetingConfiguration; bool m_chimeSdkMeetingConfigurationHasBeenSet = false; + + SseAwsKeyManagementParams m_sseAwsKeyManagementParams; + bool m_sseAwsKeyManagementParamsHasBeenSet = false; + + Aws::String m_sinkIamRoleArn; + bool m_sinkIamRoleArnHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/SseAwsKeyManagementParams.h b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/SseAwsKeyManagementParams.h new file mode 100644 index 00000000000..15f51d508e9 --- /dev/null +++ b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/include/aws/chime-sdk-media-pipelines/model/SseAwsKeyManagementParams.h @@ -0,0 +1,106 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace ChimeSDKMediaPipelines +{ +namespace Model +{ + + /** + *

Contains server side encryption parameters to be used by media capture + * pipeline. The parameters can also be used by media concatenation pipeline taking + * media capture pipeline as a media source.

See Also:

AWS + * API Reference

+ */ + class SseAwsKeyManagementParams + { + public: + AWS_CHIMESDKMEDIAPIPELINES_API SseAwsKeyManagementParams(); + AWS_CHIMESDKMEDIAPIPELINES_API SseAwsKeyManagementParams(Aws::Utils::Json::JsonView jsonValue); + AWS_CHIMESDKMEDIAPIPELINES_API SseAwsKeyManagementParams& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_CHIMESDKMEDIAPIPELINES_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The KMS key you want to use to encrypt your media pipeline output. Decryption + * is required for concatenation pipeline. If using a key located in the current + * Amazon Web Services account, you can specify your KMS key in one of four + * ways:

  • Use the KMS key ID itself. For example, + * 1234abcd-12ab-34cd-56ef-1234567890ab.

  • Use an + * alias for the KMS key ID. For example, alias/ExampleAlias.

    + *
  • Use the Amazon Resource Name (ARN) for the KMS key ID. For + * example, + * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    + *
  • Use the ARN for the KMS key alias. For example, + * arn:aws:kms:region:account-ID:alias/ExampleAlias.

+ *

If using a key located in a different Amazon Web Services account than the + * current Amazon Web Services account, you can specify your KMS key in one of two + * ways:

  • Use the ARN for the KMS key ID. For example, + * arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    + *
  • Use the ARN for the KMS key alias. For example, + * arn:aws:kms:region:account-ID:alias/ExampleAlias.

+ *

If you don't specify an encryption key, your output is encrypted with the + * default Amazon S3 key (SSE-S3).

Note that the role specified in the + * SinkIamRoleArn request parameter must have permission to use the + * specified KMS key.

+ */ + inline const Aws::String& GetAwsKmsKeyId() const{ return m_awsKmsKeyId; } + inline bool AwsKmsKeyIdHasBeenSet() const { return m_awsKmsKeyIdHasBeenSet; } + inline void SetAwsKmsKeyId(const Aws::String& value) { m_awsKmsKeyIdHasBeenSet = true; m_awsKmsKeyId = value; } + inline void SetAwsKmsKeyId(Aws::String&& value) { m_awsKmsKeyIdHasBeenSet = true; m_awsKmsKeyId = std::move(value); } + inline void SetAwsKmsKeyId(const char* value) { m_awsKmsKeyIdHasBeenSet = true; m_awsKmsKeyId.assign(value); } + inline SseAwsKeyManagementParams& WithAwsKmsKeyId(const Aws::String& value) { SetAwsKmsKeyId(value); return *this;} + inline SseAwsKeyManagementParams& WithAwsKmsKeyId(Aws::String&& value) { SetAwsKmsKeyId(std::move(value)); return *this;} + inline SseAwsKeyManagementParams& WithAwsKmsKeyId(const char* value) { SetAwsKmsKeyId(value); return *this;} + ///@} + + ///@{ + /** + *

Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption + * context as non-secret key-value pair known as encryption context pairs, that + * provides an added layer of security for your data. For more information, see KMS + * encryption context and Asymmetric + * keys in KMS in the Key Management Service Developer Guide.

+ */ + inline const Aws::String& GetAwsKmsEncryptionContext() const{ return m_awsKmsEncryptionContext; } + inline bool AwsKmsEncryptionContextHasBeenSet() const { return m_awsKmsEncryptionContextHasBeenSet; } + inline void SetAwsKmsEncryptionContext(const Aws::String& value) { m_awsKmsEncryptionContextHasBeenSet = true; m_awsKmsEncryptionContext = value; } + inline void SetAwsKmsEncryptionContext(Aws::String&& value) { m_awsKmsEncryptionContextHasBeenSet = true; m_awsKmsEncryptionContext = std::move(value); } + inline void SetAwsKmsEncryptionContext(const char* value) { m_awsKmsEncryptionContextHasBeenSet = true; m_awsKmsEncryptionContext.assign(value); } + inline SseAwsKeyManagementParams& WithAwsKmsEncryptionContext(const Aws::String& value) { SetAwsKmsEncryptionContext(value); return *this;} + inline SseAwsKeyManagementParams& WithAwsKmsEncryptionContext(Aws::String&& value) { SetAwsKmsEncryptionContext(std::move(value)); return *this;} + inline SseAwsKeyManagementParams& WithAwsKmsEncryptionContext(const char* value) { SetAwsKmsEncryptionContext(value); return *this;} + ///@} + private: + + Aws::String m_awsKmsKeyId; + bool m_awsKmsKeyIdHasBeenSet = false; + + Aws::String m_awsKmsEncryptionContext; + bool m_awsKmsEncryptionContextHasBeenSet = false; + }; + +} // namespace Model +} // namespace ChimeSDKMediaPipelines +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/CreateMediaCapturePipelineRequest.cpp b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/CreateMediaCapturePipelineRequest.cpp index 4a33f0f1a82..21cfc57d844 100644 --- a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/CreateMediaCapturePipelineRequest.cpp +++ b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/CreateMediaCapturePipelineRequest.cpp @@ -22,6 +22,8 @@ CreateMediaCapturePipelineRequest::CreateMediaCapturePipelineRequest() : m_clientRequestToken(Aws::Utils::UUID::PseudoRandomUUID()), m_clientRequestTokenHasBeenSet(true), m_chimeSdkMeetingConfigurationHasBeenSet(false), + m_sseAwsKeyManagementParamsHasBeenSet(false), + m_sinkIamRoleArnHasBeenSet(false), m_tagsHasBeenSet(false) { } @@ -64,6 +66,18 @@ Aws::String CreateMediaCapturePipelineRequest::SerializePayload() const } + if(m_sseAwsKeyManagementParamsHasBeenSet) + { + payload.WithObject("SseAwsKeyManagementParams", m_sseAwsKeyManagementParams.Jsonize()); + + } + + if(m_sinkIamRoleArnHasBeenSet) + { + payload.WithString("SinkIamRoleArn", m_sinkIamRoleArn); + + } + if(m_tagsHasBeenSet) { Aws::Utils::Array tagsJsonList(m_tags.size()); diff --git a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/MediaCapturePipeline.cpp b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/MediaCapturePipeline.cpp index bbb78c8aff3..1ce3a59aa62 100644 --- a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/MediaCapturePipeline.cpp +++ b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/MediaCapturePipeline.cpp @@ -31,7 +31,9 @@ MediaCapturePipeline::MediaCapturePipeline() : m_sinkArnHasBeenSet(false), m_createdTimestampHasBeenSet(false), m_updatedTimestampHasBeenSet(false), - m_chimeSdkMeetingConfigurationHasBeenSet(false) + m_chimeSdkMeetingConfigurationHasBeenSet(false), + m_sseAwsKeyManagementParamsHasBeenSet(false), + m_sinkIamRoleArnHasBeenSet(false) { } @@ -113,6 +115,20 @@ MediaCapturePipeline& MediaCapturePipeline::operator =(JsonView jsonValue) m_chimeSdkMeetingConfigurationHasBeenSet = true; } + if(jsonValue.ValueExists("SseAwsKeyManagementParams")) + { + m_sseAwsKeyManagementParams = jsonValue.GetObject("SseAwsKeyManagementParams"); + + m_sseAwsKeyManagementParamsHasBeenSet = true; + } + + if(jsonValue.ValueExists("SinkIamRoleArn")) + { + m_sinkIamRoleArn = jsonValue.GetString("SinkIamRoleArn"); + + m_sinkIamRoleArnHasBeenSet = true; + } + return *this; } @@ -175,6 +191,18 @@ JsonValue MediaCapturePipeline::Jsonize() const } + if(m_sseAwsKeyManagementParamsHasBeenSet) + { + payload.WithObject("SseAwsKeyManagementParams", m_sseAwsKeyManagementParams.Jsonize()); + + } + + if(m_sinkIamRoleArnHasBeenSet) + { + payload.WithString("SinkIamRoleArn", m_sinkIamRoleArn); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/SseAwsKeyManagementParams.cpp b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/SseAwsKeyManagementParams.cpp new file mode 100644 index 00000000000..e7e44ae8899 --- /dev/null +++ b/generated/src/aws-cpp-sdk-chime-sdk-media-pipelines/source/model/SseAwsKeyManagementParams.cpp @@ -0,0 +1,73 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace ChimeSDKMediaPipelines +{ +namespace Model +{ + +SseAwsKeyManagementParams::SseAwsKeyManagementParams() : + m_awsKmsKeyIdHasBeenSet(false), + m_awsKmsEncryptionContextHasBeenSet(false) +{ +} + +SseAwsKeyManagementParams::SseAwsKeyManagementParams(JsonView jsonValue) + : SseAwsKeyManagementParams() +{ + *this = jsonValue; +} + +SseAwsKeyManagementParams& SseAwsKeyManagementParams::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("AwsKmsKeyId")) + { + m_awsKmsKeyId = jsonValue.GetString("AwsKmsKeyId"); + + m_awsKmsKeyIdHasBeenSet = true; + } + + if(jsonValue.ValueExists("AwsKmsEncryptionContext")) + { + m_awsKmsEncryptionContext = jsonValue.GetString("AwsKmsEncryptionContext"); + + m_awsKmsEncryptionContextHasBeenSet = true; + } + + return *this; +} + +JsonValue SseAwsKeyManagementParams::Jsonize() const +{ + JsonValue payload; + + if(m_awsKmsKeyIdHasBeenSet) + { + payload.WithString("AwsKmsKeyId", m_awsKmsKeyId); + + } + + if(m_awsKmsEncryptionContextHasBeenSet) + { + payload.WithString("AwsKmsEncryptionContext", m_awsKmsEncryptionContext); + + } + + return payload; +} + +} // namespace Model +} // namespace ChimeSDKMediaPipelines +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/ControlParameter.h b/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/ControlParameter.h new file mode 100644 index 00000000000..b065fdc9176 --- /dev/null +++ b/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/ControlParameter.h @@ -0,0 +1,83 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace ControlCatalog +{ +namespace Model +{ + + /** + *

Four types of control parameters are supported.

  • + * AllowedRegions: List of Amazon Web Services Regions exempted from the + * control. Each string is expected to be an Amazon Web Services Region code. This + * parameter is mandatory for the OU Region deny control, + * CT.MULTISERVICE.PV.1.

    Example: + * ["us-east-1","us-west-2"]

  • + * ExemptedActions: List of Amazon Web Services IAM actions exempted from + * the control. Each string is expected to be an IAM action.

    Example: + * ["logs:DescribeLogGroups","logs:StartQuery","logs:GetQueryResults"] + *

  • ExemptedPrincipalArns: List of Amazon Web Services + * IAM principal ARNs exempted from the control. Each string is expected to be an + * IAM principal that follows the pattern + * ^arn:(aws|aws-us-gov):(iam|sts)::.+:.+$

    Example: + * ["arn:aws:iam::*:role/ReadOnly","arn:aws:sts::*:assumed-role/ReadOnly/ *"] + *

  • ExemptedResourceArns: List of resource ARNs exempted + * from the control. Each string is expected to be a resource ARN.

    Example: + * ["arn:aws:s3:::my-bucket-name"]

See + * Also:

AWS + * API Reference

+ */ + class ControlParameter + { + public: + AWS_CONTROLCATALOG_API ControlParameter(); + AWS_CONTROLCATALOG_API ControlParameter(Aws::Utils::Json::JsonView jsonValue); + AWS_CONTROLCATALOG_API ControlParameter& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_CONTROLCATALOG_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

The parameter name. This name is the parameter key when you call + * + * EnableControl or + * UpdateEnabledControl .

+ */ + inline const Aws::String& GetName() const{ return m_name; } + inline bool NameHasBeenSet() const { return m_nameHasBeenSet; } + inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; } + inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); } + inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); } + inline ControlParameter& WithName(const Aws::String& value) { SetName(value); return *this;} + inline ControlParameter& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;} + inline ControlParameter& WithName(const char* value) { SetName(value); return *this;} + ///@} + private: + + Aws::String m_name; + bool m_nameHasBeenSet = false; + }; + +} // namespace Model +} // namespace ControlCatalog +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/GetControlResult.h b/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/GetControlResult.h index 68d3f0c30d3..2d45fee2ed6 100644 --- a/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/GetControlResult.h +++ b/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/GetControlResult.h @@ -8,6 +8,9 @@ #include #include #include +#include +#include +#include #include namespace Aws @@ -76,7 +79,7 @@ namespace Model ///@{ /** *

A term that identifies the control's functional behavior. One of - * Preventive, Deteictive, Proactive

+ * Preventive, Detective, Proactive

*/ inline const ControlBehavior& GetBehavior() const{ return m_behavior; } inline void SetBehavior(const ControlBehavior& value) { m_behavior = value; } @@ -94,6 +97,34 @@ namespace Model inline GetControlResult& WithRegionConfiguration(RegionConfiguration&& value) { SetRegionConfiguration(std::move(value)); return *this;} ///@} + ///@{ + /** + *

Returns information about the control, as an + * ImplementationDetails object that shows the underlying + * implementation type for a control.

+ */ + inline const ImplementationDetails& GetImplementation() const{ return m_implementation; } + inline void SetImplementation(const ImplementationDetails& value) { m_implementation = value; } + inline void SetImplementation(ImplementationDetails&& value) { m_implementation = std::move(value); } + inline GetControlResult& WithImplementation(const ImplementationDetails& value) { SetImplementation(value); return *this;} + inline GetControlResult& WithImplementation(ImplementationDetails&& value) { SetImplementation(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

Returns an array of ControlParameter objects that specify the + * parameters a control supports. An empty list is returned for controls that don’t + * support parameters.

+ */ + inline const Aws::Vector& GetParameters() const{ return m_parameters; } + inline void SetParameters(const Aws::Vector& value) { m_parameters = value; } + inline void SetParameters(Aws::Vector&& value) { m_parameters = std::move(value); } + inline GetControlResult& WithParameters(const Aws::Vector& value) { SetParameters(value); return *this;} + inline GetControlResult& WithParameters(Aws::Vector&& value) { SetParameters(std::move(value)); return *this;} + inline GetControlResult& AddParameters(const ControlParameter& value) { m_parameters.push_back(value); return *this; } + inline GetControlResult& AddParameters(ControlParameter&& value) { m_parameters.push_back(std::move(value)); return *this; } + ///@} + ///@{ inline const Aws::String& GetRequestId() const{ return m_requestId; } @@ -116,6 +147,10 @@ namespace Model RegionConfiguration m_regionConfiguration; + ImplementationDetails m_implementation; + + Aws::Vector m_parameters; + Aws::String m_requestId; }; diff --git a/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/ImplementationDetails.h b/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/ImplementationDetails.h new file mode 100644 index 00000000000..ea499c4e5bb --- /dev/null +++ b/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/ImplementationDetails.h @@ -0,0 +1,77 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace ControlCatalog +{ +namespace Model +{ + + /** + *

An object that describes the implementation type for a control.

Our + * ImplementationDetails Type format has three required + * segments:

  • + * SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME

+ *

For example, AWS::Config::ConfigRule or + * AWS::SecurityHub::SecurityControl resources have the format with + * three required segments.

Our ImplementationDetails + * Type format has an optional fourth segment, which is present for + * applicable implementation types. The format is as follows:

  • + * SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME::RESOURCE-TYPE-DESCRIPTION + *

For example, + * AWS::Organizations::Policy::SERVICE_CONTROL_POLICY or + * AWS::CloudFormation::Type::HOOK have the format with four + * segments.

Although the format is similar, the values for the + * Type field do not match any Amazon Web Services CloudFormation + * values, and we do not use CloudFormation to implement these + * controls.

See Also:

AWS + * API Reference

+ */ + class ImplementationDetails + { + public: + AWS_CONTROLCATALOG_API ImplementationDetails(); + AWS_CONTROLCATALOG_API ImplementationDetails(Aws::Utils::Json::JsonView jsonValue); + AWS_CONTROLCATALOG_API ImplementationDetails& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_CONTROLCATALOG_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

A string that describes a control's implementation type.

+ */ + inline const Aws::String& GetType() const{ return m_type; } + inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; } + inline void SetType(const Aws::String& value) { m_typeHasBeenSet = true; m_type = value; } + inline void SetType(Aws::String&& value) { m_typeHasBeenSet = true; m_type = std::move(value); } + inline void SetType(const char* value) { m_typeHasBeenSet = true; m_type.assign(value); } + inline ImplementationDetails& WithType(const Aws::String& value) { SetType(value); return *this;} + inline ImplementationDetails& WithType(Aws::String&& value) { SetType(std::move(value)); return *this;} + inline ImplementationDetails& WithType(const char* value) { SetType(value); return *this;} + ///@} + private: + + Aws::String m_type; + bool m_typeHasBeenSet = false; + }; + +} // namespace Model +} // namespace ControlCatalog +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/RegionConfiguration.h b/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/RegionConfiguration.h index e11e4d744b3..540625ea041 100644 --- a/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/RegionConfiguration.h +++ b/generated/src/aws-cpp-sdk-controlcatalog/include/aws/controlcatalog/model/RegionConfiguration.h @@ -28,7 +28,9 @@ namespace Model /** *

Returns information about the control, including the scope of the control, if * enabled, and the Regions in which the control currently is available for - * deployment.

If you are applying controls through an Amazon Web Services + * deployment. For more information about scope, see Global + * services.

If you are applying controls through an Amazon Web Services * Control Tower landing zone environment, remember that the values returned in the * RegionConfiguration API operation are not related to the governed * Regions in your landing zone. For example, if you are governing Regions diff --git a/generated/src/aws-cpp-sdk-controlcatalog/source/model/ControlParameter.cpp b/generated/src/aws-cpp-sdk-controlcatalog/source/model/ControlParameter.cpp new file mode 100644 index 00000000000..df39ad17c16 --- /dev/null +++ b/generated/src/aws-cpp-sdk-controlcatalog/source/model/ControlParameter.cpp @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace ControlCatalog +{ +namespace Model +{ + +ControlParameter::ControlParameter() : + m_nameHasBeenSet(false) +{ +} + +ControlParameter::ControlParameter(JsonView jsonValue) + : ControlParameter() +{ + *this = jsonValue; +} + +ControlParameter& ControlParameter::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Name")) + { + m_name = jsonValue.GetString("Name"); + + m_nameHasBeenSet = true; + } + + return *this; +} + +JsonValue ControlParameter::Jsonize() const +{ + JsonValue payload; + + if(m_nameHasBeenSet) + { + payload.WithString("Name", m_name); + + } + + return payload; +} + +} // namespace Model +} // namespace ControlCatalog +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-controlcatalog/source/model/GetControlResult.cpp b/generated/src/aws-cpp-sdk-controlcatalog/source/model/GetControlResult.cpp index 75ecc4b1e6e..3f2f6ec254a 100644 --- a/generated/src/aws-cpp-sdk-controlcatalog/source/model/GetControlResult.cpp +++ b/generated/src/aws-cpp-sdk-controlcatalog/source/model/GetControlResult.cpp @@ -61,6 +61,21 @@ GetControlResult& GetControlResult::operator =(const Aws::AmazonWebServiceResult } + if(jsonValue.ValueExists("Implementation")) + { + m_implementation = jsonValue.GetObject("Implementation"); + + } + + if(jsonValue.ValueExists("Parameters")) + { + Aws::Utils::Array parametersJsonList = jsonValue.GetArray("Parameters"); + for(unsigned parametersIndex = 0; parametersIndex < parametersJsonList.GetLength(); ++parametersIndex) + { + m_parameters.push_back(parametersJsonList[parametersIndex].AsObject()); + } + } + const auto& headers = result.GetHeaderValueCollection(); const auto& requestIdIter = headers.find("x-amzn-requestid"); diff --git a/generated/src/aws-cpp-sdk-controlcatalog/source/model/ImplementationDetails.cpp b/generated/src/aws-cpp-sdk-controlcatalog/source/model/ImplementationDetails.cpp new file mode 100644 index 00000000000..838d666757a --- /dev/null +++ b/generated/src/aws-cpp-sdk-controlcatalog/source/model/ImplementationDetails.cpp @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace ControlCatalog +{ +namespace Model +{ + +ImplementationDetails::ImplementationDetails() : + m_typeHasBeenSet(false) +{ +} + +ImplementationDetails::ImplementationDetails(JsonView jsonValue) + : ImplementationDetails() +{ + *this = jsonValue; +} + +ImplementationDetails& ImplementationDetails::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Type")) + { + m_type = jsonValue.GetString("Type"); + + m_typeHasBeenSet = true; + } + + return *this; +} + +JsonValue ImplementationDetails::Jsonize() const +{ + JsonValue payload; + + if(m_typeHasBeenSet) + { + payload.WithString("Type", m_type); + + } + + return payload; +} + +} // namespace Model +} // namespace ControlCatalog +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-eks/include/aws/eks/model/Issue.h b/generated/src/aws-cpp-sdk-eks/include/aws/eks/model/Issue.h index b4e09049b41..c0a385e89c6 100644 --- a/generated/src/aws-cpp-sdk-eks/include/aws/eks/model/Issue.h +++ b/generated/src/aws-cpp-sdk-eks/include/aws/eks/model/Issue.h @@ -53,15 +53,19 @@ namespace Model * ClusterUnreachable: Amazon EKS or one or more of your managed nodes is * unable to to communicate with your Kubernetes cluster API server. This can * happen if there are network disruptions or if API servers are timing out - * processing requests.

  • Ec2LaunchTemplateNotFound: We - * couldn't find the Amazon EC2 launch template for your managed node group. You - * may be able to recreate a launch template with the same settings to recover.

    - *
  • Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch - * template version for your managed node group does not match the version that - * Amazon EKS created. You may be able to revert to the version that Amazon EKS - * created to recover.

  • Ec2SecurityGroupDeletionFailure: - * We could not delete the remote access security group for your managed node - * group. Remove any dependencies from the security group.

  • + * processing requests.

  • Ec2InstanceTypeDoesNotExist: One + * or more of the supplied Amazon EC2 instance types do not exist. Amazon EKS + * checked for the instance types that you provided in this Amazon Web Services + * Region, and one or more aren't available.

  • + * Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch + * template for your managed node group. You may be able to recreate a launch + * template with the same settings to recover.

  • + * Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version + * for your managed node group does not match the version that Amazon EKS created. + * You may be able to revert to the version that Amazon EKS created to recover.

    + *
  • Ec2SecurityGroupDeletionFailure: We could not delete the + * remote access security group for your managed node group. Remove any + * dependencies from the security group.

  • * Ec2SecurityGroupNotFound: We couldn't find the cluster security group for * the cluster. You must recreate your cluster.

  • * Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified diff --git a/generated/src/aws-cpp-sdk-eks/include/aws/eks/model/NodegroupIssueCode.h b/generated/src/aws-cpp-sdk-eks/include/aws/eks/model/NodegroupIssueCode.h index c2161ecf545..cae8064953f 100644 --- a/generated/src/aws-cpp-sdk-eks/include/aws/eks/model/NodegroupIssueCode.h +++ b/generated/src/aws-cpp-sdk-eks/include/aws/eks/model/NodegroupIssueCode.h @@ -50,7 +50,8 @@ namespace Model Unknown, AutoScalingGroupInstanceRefreshActive, KubernetesLabelInvalid, - Ec2LaunchTemplateVersionMaxLimitExceeded + Ec2LaunchTemplateVersionMaxLimitExceeded, + Ec2InstanceTypeDoesNotExist }; namespace NodegroupIssueCodeMapper diff --git a/generated/src/aws-cpp-sdk-eks/source/model/NodegroupIssueCode.cpp b/generated/src/aws-cpp-sdk-eks/source/model/NodegroupIssueCode.cpp index 5d449446295..845cc7f36dc 100644 --- a/generated/src/aws-cpp-sdk-eks/source/model/NodegroupIssueCode.cpp +++ b/generated/src/aws-cpp-sdk-eks/source/model/NodegroupIssueCode.cpp @@ -55,6 +55,7 @@ namespace Aws static const int AutoScalingGroupInstanceRefreshActive_HASH = HashingUtils::HashString("AutoScalingGroupInstanceRefreshActive"); static const int KubernetesLabelInvalid_HASH = HashingUtils::HashString("KubernetesLabelInvalid"); static const int Ec2LaunchTemplateVersionMaxLimitExceeded_HASH = HashingUtils::HashString("Ec2LaunchTemplateVersionMaxLimitExceeded"); + static const int Ec2InstanceTypeDoesNotExist_HASH = HashingUtils::HashString("Ec2InstanceTypeDoesNotExist"); NodegroupIssueCode GetNodegroupIssueCodeForName(const Aws::String& name) @@ -200,6 +201,10 @@ namespace Aws { return NodegroupIssueCode::Ec2LaunchTemplateVersionMaxLimitExceeded; } + else if (hashCode == Ec2InstanceTypeDoesNotExist_HASH) + { + return NodegroupIssueCode::Ec2InstanceTypeDoesNotExist; + } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { @@ -286,6 +291,8 @@ namespace Aws return "KubernetesLabelInvalid"; case NodegroupIssueCode::Ec2LaunchTemplateVersionMaxLimitExceeded: return "Ec2LaunchTemplateVersionMaxLimitExceeded"; + case NodegroupIssueCode::Ec2InstanceTypeDoesNotExist: + return "Ec2InstanceTypeDoesNotExist"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/FirehoseClient.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/FirehoseClient.h index 24e5342e2f9..6ae0b218085 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/FirehoseClient.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/FirehoseClient.h @@ -82,30 +82,29 @@ namespace Firehose virtual ~FirehoseClient(); /** - *

    Creates a Firehose delivery stream.

    By default, you can create up to - * 50 delivery streams per Amazon Web Services Region.

    This is an - * asynchronous operation that immediately returns. The initial status of the - * delivery stream is CREATING. After the delivery stream is created, - * its status is ACTIVE and it now accepts data. If the delivery - * stream creation fails, the status transitions to CREATING_FAILED. - * Attempts to send data to a delivery stream that is not in the - * ACTIVE state cause an exception. To check the state of a delivery - * stream, use DescribeDeliveryStream.

    If the status of a delivery - * stream is CREATING_FAILED, this status doesn't change, and you - * can't invoke CreateDeliveryStream again on it. However, you can - * invoke the DeleteDeliveryStream operation to delete it.

    A Firehose - * delivery stream can be configured to receive records directly from providers - * using PutRecord or PutRecordBatch, or it can be configured to use - * an existing Kinesis stream as its source. To specify a Kinesis data stream as - * input, set the DeliveryStreamType parameter to - * KinesisStreamAsSource, and provide the Kinesis stream Amazon - * Resource Name (ARN) and role ARN in the + *

    Creates a Firehose stream.

    By default, you can create up to 50 + * Firehose streams per Amazon Web Services Region.

    This is an asynchronous + * operation that immediately returns. The initial status of the Firehose stream is + * CREATING. After the Firehose stream is created, its status is + * ACTIVE and it now accepts data. If the Firehose stream creation + * fails, the status transitions to CREATING_FAILED. Attempts to send + * data to a delivery stream that is not in the ACTIVE state cause an + * exception. To check the state of a Firehose stream, use + * DescribeDeliveryStream.

    If the status of a Firehose stream is + * CREATING_FAILED, this status doesn't change, and you can't invoke + * CreateDeliveryStream again on it. However, you can invoke the + * DeleteDeliveryStream operation to delete it.

    A Firehose stream can + * be configured to receive records directly from providers using PutRecord + * or PutRecordBatch, or it can be configured to use an existing Kinesis + * stream as its source. To specify a Kinesis data stream as input, set the + * DeliveryStreamType parameter to KinesisStreamAsSource, + * and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the * KinesisStreamSourceConfiguration parameter.

    To create a - * delivery stream with server-side encryption (SSE) enabled, include + * Firehose stream with server-side encryption (SSE) enabled, include * DeliveryStreamEncryptionConfigurationInput in your request. This is * optional. You can also invoke StartDeliveryStreamEncryption to turn on - * SSE for an existing delivery stream that doesn't have SSE enabled.

    A - * delivery stream is configured with a single destination, such as Amazon Simple + * SSE for an existing Firehose stream that doesn't have SSE enabled.

    A + * Firehose stream is configured with a single destination, such as Amazon Simple * Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon * OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints * owned by or supported by third-party service providers, including Datadog, @@ -168,19 +167,19 @@ namespace Firehose } /** - *

    Deletes a delivery stream and its data.

    You can delete a delivery + *

    Deletes a Firehose stream and its data.

    You can delete a Firehose * stream only if it is in one of the following states: ACTIVE, * DELETING, CREATING_FAILED, or - * DELETING_FAILED. You can't delete a delivery stream that is in the - * CREATING state. To check the state of a delivery stream, use + * DELETING_FAILED. You can't delete a Firehose stream that is in the + * CREATING state. To check the state of a Firehose stream, use * DescribeDeliveryStream.

    DeleteDeliveryStream is an asynchronous - * API. When an API request to DeleteDeliveryStream succeeds, the delivery stream + * API. When an API request to DeleteDeliveryStream succeeds, the Firehose stream * is marked for deletion, and it goes into the DELETING state.While - * the delivery stream is in the DELETING state, the service might + * the Firehose stream is in the DELETING state, the service might * continue to accept records, but it doesn't make any guarantees with respect to * delivering the data. Therefore, as a best practice, first stop any applications - * that are sending records before you delete a delivery stream.

    Removal of - * a delivery stream that is in the DELETING state is a low priority + * that are sending records before you delete a Firehose stream.

    Removal of + * a Firehose stream that is in the DELETING state is a low priority * operation for the service. A stream may remain in the DELETING * state for several minutes. Therefore, as a best practice, applications should * not wait for streams in the DELETING state to be removed. @@ -209,10 +208,10 @@ namespace Firehose } /** - *

    Describes the specified delivery stream and its status. For example, after - * your delivery stream is created, call DescribeDeliveryStream to see - * whether the delivery stream is ACTIVE and therefore ready for data - * to be sent to it.

    If the status of a delivery stream is + *

    Describes the specified Firehose stream and its status. For example, after + * your Firehose stream is created, call DescribeDeliveryStream to see + * whether the Firehose stream is ACTIVE and therefore ready for data + * to be sent to it.

    If the status of a Firehose stream is * CREATING_FAILED, this status doesn't change, and you can't invoke * CreateDeliveryStream again on it. However, you can invoke the * DeleteDeliveryStream operation to delete it. If the status is @@ -244,15 +243,15 @@ namespace Firehose } /** - *

    Lists your delivery streams in alphabetical order of their names.

    The - * number of delivery streams might be too large to return using a single call to - * ListDeliveryStreams. You can limit the number of delivery streams + *

    Lists your Firehose streams in alphabetical order of their names.

    The + * number of Firehose streams might be too large to return using a single call to + * ListDeliveryStreams. You can limit the number of Firehose streams * returned, using the Limit parameter. To determine whether there are * more delivery streams to list, check the value of - * HasMoreDeliveryStreams in the output. If there are more delivery + * HasMoreDeliveryStreams in the output. If there are more Firehose * streams to list, you can request them by calling this operation again and * setting the ExclusiveStartDeliveryStreamName parameter to the name - * of the last delivery stream returned in the last call.

    See Also:

    + * of the last Firehose stream returned in the last call.

    See Also:

    * AWS * API Reference

    @@ -278,7 +277,7 @@ namespace Firehose } /** - *

    Lists the tags for the specified delivery stream. This operation has a limit + *

    Lists the tags for the specified Firehose stream. This operation has a limit * of five transactions per second per account.

    See Also:

    AWS * API Reference

    @@ -304,44 +303,49 @@ namespace Firehose } /** - *

    Writes a single data record into an Amazon Firehose delivery stream. To write - * multiple data records into a delivery stream, use PutRecordBatch. - * Applications using these operations are referred to as producers.

    By - * default, each delivery stream can take in up to 2,000 transactions per second, - * 5,000 records per second, or 5 MB per second. If you use PutRecord and + *

    Writes a single data record into an Firehose stream. To write multiple data + * records into a Firehose stream, use PutRecordBatch. Applications using + * these operations are referred to as producers.

    By default, each Firehose + * stream can take in up to 2,000 transactions per second, 5,000 records per + * second, or 5 MB per second. If you use PutRecord and * PutRecordBatch, the limits are an aggregate across these two operations - * for each delivery stream. For more information about limits and how to request + * for each Firehose stream. For more information about limits and how to request * an increase, see Amazon * Firehose Limits.

    Firehose accumulates and publishes a particular * metric for a customer account in one minute intervals. It is possible that the - * bursts of incoming bytes/records ingested to a delivery stream last only for a + * bursts of incoming bytes/records ingested to a Firehose stream last only for a * few seconds. Due to this, the actual spikes in the traffic might not be fully * visible in the customer's 1 minute CloudWatch metrics.

    You must specify - * the name of the delivery stream and the data record when using PutRecord. + * the name of the Firehose stream and the data record when using PutRecord. * The data record consists of a data blob that can be up to 1,000 KiB in size, and * any kind of data. For example, it can be a segment from a log file, geographic - * location data, website clickstream data, and so on.

    Firehose buffers - * records before delivering them to the destination. To disambiguate the data - * blobs at the destination, a common solution is to use delimiters in the data, - * such as a newline (\n) or some other character unique within the - * data. This allows the consumer application to parse individual data items when - * reading the data from the destination.

    The PutRecord - * operation returns a RecordId, which is a unique string assigned to - * each record. Producer applications can use this ID for purposes such as - * auditability and investigation.

    If the PutRecord operation - * throws a ServiceUnavailableException, the API is automatically - * reinvoked (retried) 3 times. If the exception persists, it is possible that the - * throughput limits have been exceeded for the delivery stream.

    - *

    Re-invoking the Put API operations (for example, PutRecord and - * PutRecordBatch) can result in data duplicates. For larger data assets, allow for - * a longer time out before retrying Put API operations.

    Data records sent - * to Firehose are stored for 24 hours from the time they are added to a delivery - * stream as it tries to send the records to the destination. If the destination is - * unreachable for more than 24 hours, the data is no longer available.

    - *

    Don't concatenate two or more base64 strings to form the data - * fields of your records. Instead, concatenate the raw data, then perform base64 - * encoding.

    See Also:

    For multi record + * de-aggregation, you can not put more than 500 records even if the data blob + * length is less than 1000 KiB. If you include more than 500 records, the request + * succeeds but the record de-aggregation doesn't work as expected and + * transformation lambda is invoked with the complete base64 encoded data blob + * instead of de-aggregated base64 decoded records.

    Firehose buffers records + * before delivering them to the destination. To disambiguate the data blobs at the + * destination, a common solution is to use delimiters in the data, such as a + * newline (\n) or some other character unique within the data. This + * allows the consumer application to parse individual data items when reading the + * data from the destination.

    The PutRecord operation returns a + * RecordId, which is a unique string assigned to each record. + * Producer applications can use this ID for purposes such as auditability and + * investigation.

    If the PutRecord operation throws a + * ServiceUnavailableException, the API is automatically reinvoked + * (retried) 3 times. If the exception persists, it is possible that the throughput + * limits have been exceeded for the Firehose stream.

    Re-invoking the Put + * API operations (for example, PutRecord and PutRecordBatch) can result in data + * duplicates. For larger data assets, allow for a longer time out before retrying + * Put API operations.

    Data records sent to Firehose are stored for 24 hours + * from the time they are added to a Firehose stream as it tries to send the + * records to the destination. If the destination is unreachable for more than 24 + * hours, the data is no longer available.

    Don't concatenate two + * or more base64 strings to form the data fields of your records. Instead, + * concatenate the raw data, then perform base64 encoding.

    + *

    See Also:

    AWS * API Reference

    */ @@ -366,13 +370,13 @@ namespace Firehose } /** - *

    Writes multiple data records into a delivery stream in a single call, which + *

    Writes multiple data records into a Firehose stream in a single call, which * can achieve higher throughput per producer than when writing single records. To - * write single data records into a delivery stream, use PutRecord. + * write single data records into a Firehose stream, use PutRecord. * Applications using these operations are referred to as producers.

    *

    Firehose accumulates and publishes a particular metric for a customer account * in one minute intervals. It is possible that the bursts of incoming - * bytes/records ingested to a delivery stream last only for a few seconds. Due to + * bytes/records ingested to a Firehose stream last only for a few seconds. Due to * this, the actual spikes in the traffic might not be fully visible in the * customer's 1 minute CloudWatch metrics.

    For information about service * quota, see .

    Each PutRecordBatch request supports up to 500 * records. Each record in the request can be as large as 1,000 KB (before base64 * encoding), up to a limit of 4 MB for the entire request. These limits cannot be - * changed.

    You must specify the name of the delivery stream and the data + * changed.

    You must specify the name of the Firehose stream and the data * record when using PutRecord. The data record consists of a data blob that * can be up to 1,000 KB in size, and any kind of data. For example, it could be a * segment from a log file, geographic location data, website clickstream data, and - * so on.

    Firehose buffers records before delivering them to the - * destination. To disambiguate the data blobs at the destination, a common - * solution is to use delimiters in the data, such as a newline (\n) - * or some other character unique within the data. This allows the consumer - * application to parse individual data items when reading the data from the - * destination.

    The PutRecordBatch response includes a count of - * failed records, FailedPutCount, and an array of responses, + * so on.

    For multi record de-aggregation, you can not put more than 500 + * records even if the data blob length is less than 1000 KiB. If you include more + * than 500 records, the request succeeds but the record de-aggregation doesn't + * work as expected and transformation lambda is invoked with the complete base64 + * encoded data blob instead of de-aggregated base64 decoded records.

    + *

    Firehose buffers records before delivering them to the destination. To + * disambiguate the data blobs at the destination, a common solution is to use + * delimiters in the data, such as a newline (\n) or some other + * character unique within the data. This allows the consumer application to parse + * individual data items when reading the data from the destination.

    The + * PutRecordBatch response includes a count of failed records, + * FailedPutCount, and an array of responses, * RequestResponses. Even if the PutRecordBatch call succeeds, * the value of FailedPutCount may be greater than 0, indicating that * there are records for which the operation didn't succeed. Each entry in the @@ -416,11 +425,11 @@ namespace Firehose * handle any duplicates at the destination.

    If PutRecordBatch throws * ServiceUnavailableException, the API is automatically reinvoked * (retried) 3 times. If the exception persists, it is possible that the throughput - * limits have been exceeded for the delivery stream.

    Re-invoking the Put + * limits have been exceeded for the Firehose stream.

    Re-invoking the Put * API operations (for example, PutRecord and PutRecordBatch) can result in data * duplicates. For larger data assets, allow for a longer time out before retrying * Put API operations.

    Data records sent to Firehose are stored for 24 hours - * from the time they are added to a delivery stream as it attempts to send the + * from the time they are added to a Firehose stream as it attempts to send the * records to the destination. If the destination is unreachable for more than 24 * hours, the data is no longer available.

    Don't concatenate two * or more base64 strings to form the data fields of your records. Instead, @@ -450,21 +459,21 @@ namespace Firehose } /** - *

    Enables server-side encryption (SSE) for the delivery stream.

    This + *

    Enables server-side encryption (SSE) for the Firehose stream.

    This * operation is asynchronous. It returns immediately. When you invoke it, Firehose * first sets the encryption status of the stream to ENABLING, and - * then to ENABLED. The encryption status of a delivery stream is the + * then to ENABLED. The encryption status of a Firehose stream is the * Status property in DeliveryStreamEncryptionConfiguration. If * the operation fails, the encryption status changes to * ENABLING_FAILED. You can continue to read and write data to your - * delivery stream while the encryption status is ENABLING, but the + * Firehose stream while the encryption status is ENABLING, but the * data is not encrypted. It can take up to 5 seconds after the encryption status - * changes to ENABLED before all records written to the delivery + * changes to ENABLED before all records written to the Firehose * stream are encrypted. To find out whether a record or a batch of records was * encrypted, check the response elements PutRecordOutput$Encrypted and * PutRecordBatchOutput$Encrypted, respectively.

    To check the - * encryption status of a delivery stream, use DescribeDeliveryStream.

    - *

    Even if encryption is currently enabled for a delivery stream, you can still + * encryption status of a Firehose stream, use DescribeDeliveryStream.

    + *

    Even if encryption is currently enabled for a Firehose stream, you can still * invoke this operation on it to change the ARN of the CMK or both its type and * ARN. If you invoke this method to change the CMK, and the old CMK is of type * CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the @@ -474,21 +483,21 @@ namespace Firehose * the KMS grant creation to be successful, the Firehose API operations * StartDeliveryStreamEncryption and CreateDeliveryStream * should not be called with session credentials that are more than 6 hours - * old.

    If a delivery stream already has encryption enabled and then you + * old.

    If a Firehose stream already has encryption enabled and then you * invoke this operation to change the ARN of the CMK or both its type and ARN and * you get ENABLING_FAILED, this only means that the attempt to change * the CMK failed. In this case, encryption remains enabled with the old CMK.

    - *

    If the encryption status of your delivery stream is + *

    If the encryption status of your Firehose stream is * ENABLING_FAILED, you can invoke this operation again with a valid * CMK. The CMK must be enabled and the key policy mustn't explicitly deny the * permission for Firehose to invoke KMS encrypt and decrypt operations.

    You - * can enable SSE for a delivery stream only if it's a delivery stream that uses + * can enable SSE for a Firehose stream only if it's a Firehose stream that uses * DirectPut as its source.

    The * StartDeliveryStreamEncryption and * StopDeliveryStreamEncryption operations have a combined limit of 25 - * calls per delivery stream per 24 hours. For example, you reach the limit if you + * calls per Firehose stream per 24 hours. For example, you reach the limit if you * call StartDeliveryStreamEncryption 13 times and - * StopDeliveryStreamEncryption 12 times for the same delivery stream + * StopDeliveryStreamEncryption 12 times for the same Firehose stream * in a 24-hour period.

    See Also:

    AWS * API Reference

    @@ -514,26 +523,26 @@ namespace Firehose } /** - *

    Disables server-side encryption (SSE) for the delivery stream.

    This + *

    Disables server-side encryption (SSE) for the Firehose stream.

    This * operation is asynchronous. It returns immediately. When you invoke it, Firehose * first sets the encryption status of the stream to DISABLING, and * then to DISABLED. You can continue to read and write data to your * stream while its status is DISABLING. It can take up to 5 seconds * after the encryption status changes to DISABLED before all records - * written to the delivery stream are no longer subject to encryption. To find out + * written to the Firehose stream are no longer subject to encryption. To find out * whether a record or a batch of records was encrypted, check the response * elements PutRecordOutput$Encrypted and * PutRecordBatchOutput$Encrypted, respectively.

    To check the - * encryption state of a delivery stream, use DescribeDeliveryStream.

    + * encryption state of a Firehose stream, use DescribeDeliveryStream.

    *

    If SSE is enabled using a customer managed CMK and then you invoke * StopDeliveryStreamEncryption, Firehose schedules the related KMS * grant for retirement and then retires it after it ensures that it is finished * delivering records to the destination.

    The * StartDeliveryStreamEncryption and * StopDeliveryStreamEncryption operations have a combined limit of 25 - * calls per delivery stream per 24 hours. For example, you reach the limit if you + * calls per Firehose stream per 24 hours. For example, you reach the limit if you * call StartDeliveryStreamEncryption 13 times and - * StopDeliveryStreamEncryption 12 times for the same delivery stream + * StopDeliveryStreamEncryption 12 times for the same Firehose stream * in a 24-hour period.

    See Also:

    AWS * API Reference

    @@ -559,15 +568,15 @@ namespace Firehose } /** - *

    Adds or updates tags for the specified delivery stream. A tag is a key-value + *

    Adds or updates tags for the specified Firehose stream. A tag is a key-value * pair that you can define and assign to Amazon Web Services resources. If you * specify a tag that already exists, the tag value is replaced with the value that * you specify in the request. Tags are metadata. For example, you can add friendly * names and descriptions or other types of information that can help you - * distinguish the delivery stream. For more information about tags, see Using * Cost Allocation Tags in the Amazon Web Services Billing and Cost - * Management User Guide.

    Each delivery stream can have up to 50 tags. + * Management User Guide.

    Each Firehose stream can have up to 50 tags. *

    This operation has a limit of five transactions per second per account. *

    See Also:

    AWS @@ -594,7 +603,7 @@ namespace Firehose } /** - *

    Removes tags from the specified delivery stream. Removed tags are deleted, + *

    Removes tags from the specified Firehose stream. Removed tags are deleted, * and you can't recover them after this operation successfully completes.

    *

    If you specify a tag that doesn't exist, the operation ignores it.

    *

    This operation has a limit of five transactions per second per account. @@ -623,13 +632,13 @@ namespace Firehose } /** - *

    Updates the specified destination of the specified delivery stream.

    + *

    Updates the specified destination of the specified Firehose stream.

    *

    Use this operation to change the destination type (for example, to replace * the Amazon S3 destination with Amazon Redshift) or change the parameters * associated with a destination (for example, to change the bucket name of the * Amazon S3 destination). The update might not occur immediately. The target - * delivery stream remains active while the configurations are updated, so data - * writes to the delivery stream can continue during this process. The updated + * Firehose stream remains active while the configurations are updated, so data + * writes to the Firehose stream can continue during this process. The updated * configurations are usually effective within a few minutes.

    Switching * between Amazon OpenSearch Service and other services is not supported. For an * Amazon OpenSearch Service destination, you can only update to another Amazon diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonOpenSearchServerlessBufferingHints.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonOpenSearchServerlessBufferingHints.h index 7a2ca9587f9..55526e93088 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonOpenSearchServerlessBufferingHints.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonOpenSearchServerlessBufferingHints.h @@ -53,7 +53,7 @@ namespace Model *

    Buffer incoming data to the specified size, in MBs, before delivering it to * the destination. The default value is 5.

    We recommend setting this * parameter to a value greater than the amount of data you typically ingest into - * the delivery stream in 10 seconds. For example, if you typically ingest data at + * the Firehose stream in 10 seconds. For example, if you typically ingest data at * 1 MB/sec, the value should be 10 MB or higher.

    */ inline int GetSizeInMBs() const{ return m_sizeInMBs; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonopensearchserviceBufferingHints.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonopensearchserviceBufferingHints.h index 6459fb69a97..58ce0a75bc9 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonopensearchserviceBufferingHints.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonopensearchserviceBufferingHints.h @@ -52,7 +52,7 @@ namespace Model *

    Buffer incoming data to the specified size, in MBs, before delivering it to * the destination. The default value is 5.

    We recommend setting this * parameter to a value greater than the amount of data you typically ingest into - * the delivery stream in 10 seconds. For example, if you typically ingest data at + * the Firehose stream in 10 seconds. For example, if you typically ingest data at * 1 MB/sec, the value should be 10 MB or higher.

    */ inline int GetSizeInMBs() const{ return m_sizeInMBs; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonopensearchserviceDestinationUpdate.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonopensearchserviceDestinationUpdate.h index ec786166bfb..a389ab741f9 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonopensearchserviceDestinationUpdate.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/AmazonopensearchserviceDestinationUpdate.h @@ -111,9 +111,9 @@ namespace Model *

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be * only one type per index. If you try to specify a new type for an existing index * that already has another type, Firehose returns an error during runtime.

    - *

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery + *

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your Firehose * stream, Firehose still delivers data to Elasticsearch with the old index name - * and type name. If you want to update your delivery stream with a new index name, + * and type name. If you want to update your Firehose stream with a new index name, * provide an empty string for TypeName.

    */ inline const Aws::String& GetTypeName() const{ return m_typeName; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/BufferingHints.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/BufferingHints.h index 792cbe039ef..5797eef376e 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/BufferingHints.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/BufferingHints.h @@ -47,7 +47,7 @@ namespace Model * specify a value for it, you must also specify a value for * IntervalInSeconds, and vice versa.

    We recommend setting this * parameter to a value greater than the amount of data you typically ingest into - * the delivery stream in 10 seconds. For example, if you typically ingest data at + * the Firehose stream in 10 seconds. For example, if you typically ingest data at * 1 MiB/sec, the value should be 10 MiB or higher.

    */ inline int GetSizeInMBs() const{ return m_sizeInMBs; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CatalogConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CatalogConfiguration.h index 7f6cc3cc57c..6caf511c807 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CatalogConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CatalogConfiguration.h @@ -25,8 +25,7 @@ namespace Model /** *

    Describes the containers where the destination Apache Iceberg Tables are - * persisted.

    Amazon Data Firehose is in preview release and is subject to - * change.

    See Also:

    See Also:

    AWS * API Reference

    */ @@ -41,10 +40,9 @@ namespace Model ///@{ /** - *

    Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg + *

    Specifies the Glue catalog ARN identifier of the destination Apache Iceberg * Tables. You must specify the ARN in the format - * arn:aws:glue:region:account-id:catalog.

    Amazon Data - * Firehose is in preview release and is subject to change.

    + * arn:aws:glue:region:account-id:catalog.

    */ inline const Aws::String& GetCatalogARN() const{ return m_catalogARN; } inline bool CatalogARNHasBeenSet() const { return m_catalogARNHasBeenSet; } @@ -55,10 +53,28 @@ namespace Model inline CatalogConfiguration& WithCatalogARN(Aws::String&& value) { SetCatalogARN(std::move(value)); return *this;} inline CatalogConfiguration& WithCatalogARN(const char* value) { SetCatalogARN(value); return *this;} ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetWarehouseLocation() const{ return m_warehouseLocation; } + inline bool WarehouseLocationHasBeenSet() const { return m_warehouseLocationHasBeenSet; } + inline void SetWarehouseLocation(const Aws::String& value) { m_warehouseLocationHasBeenSet = true; m_warehouseLocation = value; } + inline void SetWarehouseLocation(Aws::String&& value) { m_warehouseLocationHasBeenSet = true; m_warehouseLocation = std::move(value); } + inline void SetWarehouseLocation(const char* value) { m_warehouseLocationHasBeenSet = true; m_warehouseLocation.assign(value); } + inline CatalogConfiguration& WithWarehouseLocation(const Aws::String& value) { SetWarehouseLocation(value); return *this;} + inline CatalogConfiguration& WithWarehouseLocation(Aws::String&& value) { SetWarehouseLocation(std::move(value)); return *this;} + inline CatalogConfiguration& WithWarehouseLocation(const char* value) { SetWarehouseLocation(value); return *this;} + ///@} private: Aws::String m_catalogARN; bool m_catalogARNHasBeenSet = false; + + Aws::String m_warehouseLocation; + bool m_warehouseLocationHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CloudWatchLoggingOptions.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CloudWatchLoggingOptions.h index 45f97a08067..f29c9c19bcf 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CloudWatchLoggingOptions.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CloudWatchLoggingOptions.h @@ -24,7 +24,7 @@ namespace Model { /** - *

    Describes the Amazon CloudWatch logging options for your delivery + *

    Describes the Amazon CloudWatch logging options for your Firehose * stream.

    See Also:

    AWS * API Reference

    diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CreateDeliveryStreamRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CreateDeliveryStreamRequest.h index 4e0f27f5989..3d0c003ff45 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CreateDeliveryStreamRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CreateDeliveryStreamRequest.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -51,9 +52,9 @@ namespace Model ///@{ /** - *

    The name of the delivery stream. This name must be unique per Amazon Web - * Services account in the same Amazon Web Services Region. If the delivery streams - * are in different accounts or different Regions, you can have multiple delivery + *

    The name of the Firehose stream. This name must be unique per Amazon Web + * Services account in the same Amazon Web Services Region. If the Firehose streams + * are in different accounts or different Regions, you can have multiple Firehose * streams with the same name.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } @@ -68,10 +69,10 @@ namespace Model ///@{ /** - *

    The delivery stream type. This parameter can be one of the following + *

    The Firehose stream type. This parameter can be one of the following * values:

    • DirectPut: Provider applications access - * the delivery stream directly.

    • - * KinesisStreamAsSource: The delivery stream uses a Kinesis data + * the Firehose stream directly.

    • + * KinesisStreamAsSource: The Firehose stream uses a Kinesis data * stream as a source.

    */ inline const DeliveryStreamType& GetDeliveryStreamType() const{ return m_deliveryStreamType; } @@ -84,7 +85,7 @@ namespace Model ///@{ /** - *

    When a Kinesis data stream is used as the source for the delivery stream, a + *

    When a Kinesis data stream is used as the source for the Firehose stream, a * KinesisStreamSourceConfiguration containing the Kinesis data stream * Amazon Resource Name (ARN) and the role ARN for the source stream.

    */ @@ -185,19 +186,19 @@ namespace Model ///@{ /** - *

    A set of tags to assign to the delivery stream. A tag is a key-value pair + *

    A set of tags to assign to the Firehose stream. A tag is a key-value pair * that you can define and assign to Amazon Web Services resources. Tags are * metadata. For example, you can add friendly names and descriptions or other - * types of information that can help you distinguish the delivery stream. For more + * types of information that can help you distinguish the Firehose stream. For more * information about tags, see Using * Cost Allocation Tags in the Amazon Web Services Billing and Cost Management - * User Guide.

    You can specify up to 50 tags when creating a delivery + * User Guide.

    You can specify up to 50 tags when creating a Firehose * stream.

    If you specify tags in the CreateDeliveryStream * action, Amazon Data Firehose performs an additional authorization on the * firehose:TagDeliveryStream action to verify if users have * permissions to create tags. If you do not provide this permission, requests to - * create new Firehose delivery streams with IAM resource tags will fail with an + * create new Firehose Firehose streams with IAM resource tags will fail with an * AccessDeniedException such as following.

    * AccessDeniedException

    User: arn:aws:sts::x:assumed-role/x/x is * not authorized to perform: firehose:TagDeliveryStream on resource: @@ -253,8 +254,7 @@ namespace Model ///@{ /** - *

    Configure Apache Iceberg Tables destination.

    Amazon Data Firehose is - * in preview release and is subject to change.

    + *

    Configure Apache Iceberg Tables destination.

    */ inline const IcebergDestinationConfiguration& GetIcebergDestinationConfiguration() const{ return m_icebergDestinationConfiguration; } inline bool IcebergDestinationConfigurationHasBeenSet() const { return m_icebergDestinationConfigurationHasBeenSet; } @@ -263,6 +263,19 @@ namespace Model inline CreateDeliveryStreamRequest& WithIcebergDestinationConfiguration(const IcebergDestinationConfiguration& value) { SetIcebergDestinationConfiguration(value); return *this;} inline CreateDeliveryStreamRequest& WithIcebergDestinationConfiguration(IcebergDestinationConfiguration&& value) { SetIcebergDestinationConfiguration(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseSourceConfiguration& GetDatabaseSourceConfiguration() const{ return m_databaseSourceConfiguration; } + inline bool DatabaseSourceConfigurationHasBeenSet() const { return m_databaseSourceConfigurationHasBeenSet; } + inline void SetDatabaseSourceConfiguration(const DatabaseSourceConfiguration& value) { m_databaseSourceConfigurationHasBeenSet = true; m_databaseSourceConfiguration = value; } + inline void SetDatabaseSourceConfiguration(DatabaseSourceConfiguration&& value) { m_databaseSourceConfigurationHasBeenSet = true; m_databaseSourceConfiguration = std::move(value); } + inline CreateDeliveryStreamRequest& WithDatabaseSourceConfiguration(const DatabaseSourceConfiguration& value) { SetDatabaseSourceConfiguration(value); return *this;} + inline CreateDeliveryStreamRequest& WithDatabaseSourceConfiguration(DatabaseSourceConfiguration&& value) { SetDatabaseSourceConfiguration(std::move(value)); return *this;} + ///@} private: Aws::String m_deliveryStreamName; @@ -309,6 +322,9 @@ namespace Model IcebergDestinationConfiguration m_icebergDestinationConfiguration; bool m_icebergDestinationConfigurationHasBeenSet = false; + + DatabaseSourceConfiguration m_databaseSourceConfiguration; + bool m_databaseSourceConfigurationHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CreateDeliveryStreamResult.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CreateDeliveryStreamResult.h index 6139320ff9a..04b87262df9 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CreateDeliveryStreamResult.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/CreateDeliveryStreamResult.h @@ -34,7 +34,7 @@ namespace Model ///@{ /** - *

    The ARN of the delivery stream.

    + *

    The ARN of the Firehose stream.

    */ inline const Aws::String& GetDeliveryStreamARN() const{ return m_deliveryStreamARN; } inline void SetDeliveryStreamARN(const Aws::String& value) { m_deliveryStreamARN = value; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseColumnList.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseColumnList.h new file mode 100644 index 00000000000..8a6dde0c9fb --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseColumnList.h @@ -0,0 +1,84 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class DatabaseColumnList + { + public: + AWS_FIREHOSE_API DatabaseColumnList(); + AWS_FIREHOSE_API DatabaseColumnList(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API DatabaseColumnList& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetInclude() const{ return m_include; } + inline bool IncludeHasBeenSet() const { return m_includeHasBeenSet; } + inline void SetInclude(const Aws::Vector& value) { m_includeHasBeenSet = true; m_include = value; } + inline void SetInclude(Aws::Vector&& value) { m_includeHasBeenSet = true; m_include = std::move(value); } + inline DatabaseColumnList& WithInclude(const Aws::Vector& value) { SetInclude(value); return *this;} + inline DatabaseColumnList& WithInclude(Aws::Vector&& value) { SetInclude(std::move(value)); return *this;} + inline DatabaseColumnList& AddInclude(const Aws::String& value) { m_includeHasBeenSet = true; m_include.push_back(value); return *this; } + inline DatabaseColumnList& AddInclude(Aws::String&& value) { m_includeHasBeenSet = true; m_include.push_back(std::move(value)); return *this; } + inline DatabaseColumnList& AddInclude(const char* value) { m_includeHasBeenSet = true; m_include.push_back(value); return *this; } + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetExclude() const{ return m_exclude; } + inline bool ExcludeHasBeenSet() const { return m_excludeHasBeenSet; } + inline void SetExclude(const Aws::Vector& value) { m_excludeHasBeenSet = true; m_exclude = value; } + inline void SetExclude(Aws::Vector&& value) { m_excludeHasBeenSet = true; m_exclude = std::move(value); } + inline DatabaseColumnList& WithExclude(const Aws::Vector& value) { SetExclude(value); return *this;} + inline DatabaseColumnList& WithExclude(Aws::Vector&& value) { SetExclude(std::move(value)); return *this;} + inline DatabaseColumnList& AddExclude(const Aws::String& value) { m_excludeHasBeenSet = true; m_exclude.push_back(value); return *this; } + inline DatabaseColumnList& AddExclude(Aws::String&& value) { m_excludeHasBeenSet = true; m_exclude.push_back(std::move(value)); return *this; } + inline DatabaseColumnList& AddExclude(const char* value) { m_excludeHasBeenSet = true; m_exclude.push_back(value); return *this; } + ///@} + private: + + Aws::Vector m_include; + bool m_includeHasBeenSet = false; + + Aws::Vector m_exclude; + bool m_excludeHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseList.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseList.h new file mode 100644 index 00000000000..966bbd2aa38 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseList.h @@ -0,0 +1,84 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class DatabaseList + { + public: + AWS_FIREHOSE_API DatabaseList(); + AWS_FIREHOSE_API DatabaseList(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API DatabaseList& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetInclude() const{ return m_include; } + inline bool IncludeHasBeenSet() const { return m_includeHasBeenSet; } + inline void SetInclude(const Aws::Vector& value) { m_includeHasBeenSet = true; m_include = value; } + inline void SetInclude(Aws::Vector&& value) { m_includeHasBeenSet = true; m_include = std::move(value); } + inline DatabaseList& WithInclude(const Aws::Vector& value) { SetInclude(value); return *this;} + inline DatabaseList& WithInclude(Aws::Vector&& value) { SetInclude(std::move(value)); return *this;} + inline DatabaseList& AddInclude(const Aws::String& value) { m_includeHasBeenSet = true; m_include.push_back(value); return *this; } + inline DatabaseList& AddInclude(Aws::String&& value) { m_includeHasBeenSet = true; m_include.push_back(std::move(value)); return *this; } + inline DatabaseList& AddInclude(const char* value) { m_includeHasBeenSet = true; m_include.push_back(value); return *this; } + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetExclude() const{ return m_exclude; } + inline bool ExcludeHasBeenSet() const { return m_excludeHasBeenSet; } + inline void SetExclude(const Aws::Vector& value) { m_excludeHasBeenSet = true; m_exclude = value; } + inline void SetExclude(Aws::Vector&& value) { m_excludeHasBeenSet = true; m_exclude = std::move(value); } + inline DatabaseList& WithExclude(const Aws::Vector& value) { SetExclude(value); return *this;} + inline DatabaseList& WithExclude(Aws::Vector&& value) { SetExclude(std::move(value)); return *this;} + inline DatabaseList& AddExclude(const Aws::String& value) { m_excludeHasBeenSet = true; m_exclude.push_back(value); return *this; } + inline DatabaseList& AddExclude(Aws::String&& value) { m_excludeHasBeenSet = true; m_exclude.push_back(std::move(value)); return *this; } + inline DatabaseList& AddExclude(const char* value) { m_excludeHasBeenSet = true; m_exclude.push_back(value); return *this; } + ///@} + private: + + Aws::Vector m_include; + bool m_includeHasBeenSet = false; + + Aws::Vector m_exclude; + bool m_excludeHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSnapshotInfo.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSnapshotInfo.h new file mode 100644 index 00000000000..6cade216f28 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSnapshotInfo.h @@ -0,0 +1,146 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class DatabaseSnapshotInfo + { + public: + AWS_FIREHOSE_API DatabaseSnapshotInfo(); + AWS_FIREHOSE_API DatabaseSnapshotInfo(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API DatabaseSnapshotInfo& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetId() const{ return m_id; } + inline bool IdHasBeenSet() const { return m_idHasBeenSet; } + inline void SetId(const Aws::String& value) { m_idHasBeenSet = true; m_id = value; } + inline void SetId(Aws::String&& value) { m_idHasBeenSet = true; m_id = std::move(value); } + inline void SetId(const char* value) { m_idHasBeenSet = true; m_id.assign(value); } + inline DatabaseSnapshotInfo& WithId(const Aws::String& value) { SetId(value); return *this;} + inline DatabaseSnapshotInfo& WithId(Aws::String&& value) { SetId(std::move(value)); return *this;} + inline DatabaseSnapshotInfo& WithId(const char* value) { SetId(value); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetTable() const{ return m_table; } + inline bool TableHasBeenSet() const { return m_tableHasBeenSet; } + inline void SetTable(const Aws::String& value) { m_tableHasBeenSet = true; m_table = value; } + inline void SetTable(Aws::String&& value) { m_tableHasBeenSet = true; m_table = std::move(value); } + inline void SetTable(const char* value) { m_tableHasBeenSet = true; m_table.assign(value); } + inline DatabaseSnapshotInfo& WithTable(const Aws::String& value) { SetTable(value); return *this;} + inline DatabaseSnapshotInfo& WithTable(Aws::String&& value) { SetTable(std::move(value)); return *this;} + inline DatabaseSnapshotInfo& WithTable(const char* value) { SetTable(value); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Utils::DateTime& GetRequestTimestamp() const{ return m_requestTimestamp; } + inline bool RequestTimestampHasBeenSet() const { return m_requestTimestampHasBeenSet; } + inline void SetRequestTimestamp(const Aws::Utils::DateTime& value) { m_requestTimestampHasBeenSet = true; m_requestTimestamp = value; } + inline void SetRequestTimestamp(Aws::Utils::DateTime&& value) { m_requestTimestampHasBeenSet = true; m_requestTimestamp = std::move(value); } + inline DatabaseSnapshotInfo& WithRequestTimestamp(const Aws::Utils::DateTime& value) { SetRequestTimestamp(value); return *this;} + inline DatabaseSnapshotInfo& WithRequestTimestamp(Aws::Utils::DateTime&& value) { SetRequestTimestamp(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const SnapshotRequestedBy& GetRequestedBy() const{ return m_requestedBy; } + inline bool RequestedByHasBeenSet() const { return m_requestedByHasBeenSet; } + inline void SetRequestedBy(const SnapshotRequestedBy& value) { m_requestedByHasBeenSet = true; m_requestedBy = value; } + inline void SetRequestedBy(SnapshotRequestedBy&& value) { m_requestedByHasBeenSet = true; m_requestedBy = std::move(value); } + inline DatabaseSnapshotInfo& WithRequestedBy(const SnapshotRequestedBy& value) { SetRequestedBy(value); return *this;} + inline DatabaseSnapshotInfo& WithRequestedBy(SnapshotRequestedBy&& value) { SetRequestedBy(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const SnapshotStatus& GetStatus() const{ return m_status; } + inline bool StatusHasBeenSet() const { return m_statusHasBeenSet; } + inline void SetStatus(const SnapshotStatus& value) { m_statusHasBeenSet = true; m_status = value; } + inline void SetStatus(SnapshotStatus&& value) { m_statusHasBeenSet = true; m_status = std::move(value); } + inline DatabaseSnapshotInfo& WithStatus(const SnapshotStatus& value) { SetStatus(value); return *this;} + inline DatabaseSnapshotInfo& WithStatus(SnapshotStatus&& value) { SetStatus(std::move(value)); return *this;} + ///@} + + ///@{ + + inline const FailureDescription& GetFailureDescription() const{ return m_failureDescription; } + inline bool FailureDescriptionHasBeenSet() const { return m_failureDescriptionHasBeenSet; } + inline void SetFailureDescription(const FailureDescription& value) { m_failureDescriptionHasBeenSet = true; m_failureDescription = value; } + inline void SetFailureDescription(FailureDescription&& value) { m_failureDescriptionHasBeenSet = true; m_failureDescription = std::move(value); } + inline DatabaseSnapshotInfo& WithFailureDescription(const FailureDescription& value) { SetFailureDescription(value); return *this;} + inline DatabaseSnapshotInfo& WithFailureDescription(FailureDescription&& value) { SetFailureDescription(std::move(value)); return *this;} + ///@} + private: + + Aws::String m_id; + bool m_idHasBeenSet = false; + + Aws::String m_table; + bool m_tableHasBeenSet = false; + + Aws::Utils::DateTime m_requestTimestamp; + bool m_requestTimestampHasBeenSet = false; + + SnapshotRequestedBy m_requestedBy; + bool m_requestedByHasBeenSet = false; + + SnapshotStatus m_status; + bool m_statusHasBeenSet = false; + + FailureDescription m_failureDescription; + bool m_failureDescriptionHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceAuthenticationConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceAuthenticationConfiguration.h new file mode 100644 index 00000000000..08519375d32 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceAuthenticationConfiguration.h @@ -0,0 +1,58 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class DatabaseSourceAuthenticationConfiguration + { + public: + AWS_FIREHOSE_API DatabaseSourceAuthenticationConfiguration(); + AWS_FIREHOSE_API DatabaseSourceAuthenticationConfiguration(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API DatabaseSourceAuthenticationConfiguration& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + + inline const SecretsManagerConfiguration& GetSecretsManagerConfiguration() const{ return m_secretsManagerConfiguration; } + inline bool SecretsManagerConfigurationHasBeenSet() const { return m_secretsManagerConfigurationHasBeenSet; } + inline void SetSecretsManagerConfiguration(const SecretsManagerConfiguration& value) { m_secretsManagerConfigurationHasBeenSet = true; m_secretsManagerConfiguration = value; } + inline void SetSecretsManagerConfiguration(SecretsManagerConfiguration&& value) { m_secretsManagerConfigurationHasBeenSet = true; m_secretsManagerConfiguration = std::move(value); } + inline DatabaseSourceAuthenticationConfiguration& WithSecretsManagerConfiguration(const SecretsManagerConfiguration& value) { SetSecretsManagerConfiguration(value); return *this;} + inline DatabaseSourceAuthenticationConfiguration& WithSecretsManagerConfiguration(SecretsManagerConfiguration&& value) { SetSecretsManagerConfiguration(std::move(value)); return *this;} + ///@} + private: + + SecretsManagerConfiguration m_secretsManagerConfiguration; + bool m_secretsManagerConfigurationHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceConfiguration.h new file mode 100644 index 00000000000..c16f1b109dc --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceConfiguration.h @@ -0,0 +1,234 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class DatabaseSourceConfiguration + { + public: + AWS_FIREHOSE_API DatabaseSourceConfiguration(); + AWS_FIREHOSE_API DatabaseSourceConfiguration(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API DatabaseSourceConfiguration& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseType& GetType() const{ return m_type; } + inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; } + inline void SetType(const DatabaseType& value) { m_typeHasBeenSet = true; m_type = value; } + inline void SetType(DatabaseType&& value) { m_typeHasBeenSet = true; m_type = std::move(value); } + inline DatabaseSourceConfiguration& WithType(const DatabaseType& value) { SetType(value); return *this;} + inline DatabaseSourceConfiguration& WithType(DatabaseType&& value) { SetType(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetEndpoint() const{ return m_endpoint; } + inline bool EndpointHasBeenSet() const { return m_endpointHasBeenSet; } + inline void SetEndpoint(const Aws::String& value) { m_endpointHasBeenSet = true; m_endpoint = value; } + inline void SetEndpoint(Aws::String&& value) { m_endpointHasBeenSet = true; m_endpoint = std::move(value); } + inline void SetEndpoint(const char* value) { m_endpointHasBeenSet = true; m_endpoint.assign(value); } + inline DatabaseSourceConfiguration& WithEndpoint(const Aws::String& value) { SetEndpoint(value); return *this;} + inline DatabaseSourceConfiguration& WithEndpoint(Aws::String&& value) { SetEndpoint(std::move(value)); return *this;} + inline DatabaseSourceConfiguration& WithEndpoint(const char* value) { SetEndpoint(value); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline int GetPort() const{ return m_port; } + inline bool PortHasBeenSet() const { return m_portHasBeenSet; } + inline void SetPort(int value) { m_portHasBeenSet = true; m_port = value; } + inline DatabaseSourceConfiguration& WithPort(int value) { SetPort(value); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const SSLMode& GetSSLMode() const{ return m_sSLMode; } + inline bool SSLModeHasBeenSet() const { return m_sSLModeHasBeenSet; } + inline void SetSSLMode(const SSLMode& value) { m_sSLModeHasBeenSet = true; m_sSLMode = value; } + inline void SetSSLMode(SSLMode&& value) { m_sSLModeHasBeenSet = true; m_sSLMode = std::move(value); } + inline DatabaseSourceConfiguration& WithSSLMode(const SSLMode& value) { SetSSLMode(value); return *this;} + inline DatabaseSourceConfiguration& WithSSLMode(SSLMode&& value) { SetSSLMode(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseList& GetDatabases() const{ return m_databases; } + inline bool DatabasesHasBeenSet() const { return m_databasesHasBeenSet; } + inline void SetDatabases(const DatabaseList& value) { m_databasesHasBeenSet = true; m_databases = value; } + inline void SetDatabases(DatabaseList&& value) { m_databasesHasBeenSet = true; m_databases = std::move(value); } + inline DatabaseSourceConfiguration& WithDatabases(const DatabaseList& value) { SetDatabases(value); return *this;} + inline DatabaseSourceConfiguration& WithDatabases(DatabaseList&& value) { SetDatabases(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseTableList& GetTables() const{ return m_tables; } + inline bool TablesHasBeenSet() const { return m_tablesHasBeenSet; } + inline void SetTables(const DatabaseTableList& value) { m_tablesHasBeenSet = true; m_tables = value; } + inline void SetTables(DatabaseTableList&& value) { m_tablesHasBeenSet = true; m_tables = std::move(value); } + inline DatabaseSourceConfiguration& WithTables(const DatabaseTableList& value) { SetTables(value); return *this;} + inline DatabaseSourceConfiguration& WithTables(DatabaseTableList&& value) { SetTables(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseColumnList& GetColumns() const{ return m_columns; } + inline bool ColumnsHasBeenSet() const { return m_columnsHasBeenSet; } + inline void SetColumns(const DatabaseColumnList& value) { m_columnsHasBeenSet = true; m_columns = value; } + inline void SetColumns(DatabaseColumnList&& value) { m_columnsHasBeenSet = true; m_columns = std::move(value); } + inline DatabaseSourceConfiguration& WithColumns(const DatabaseColumnList& value) { SetColumns(value); return *this;} + inline DatabaseSourceConfiguration& WithColumns(DatabaseColumnList&& value) { SetColumns(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetSurrogateKeys() const{ return m_surrogateKeys; } + inline bool SurrogateKeysHasBeenSet() const { return m_surrogateKeysHasBeenSet; } + inline void SetSurrogateKeys(const Aws::Vector& value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys = value; } + inline void SetSurrogateKeys(Aws::Vector&& value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys = std::move(value); } + inline DatabaseSourceConfiguration& WithSurrogateKeys(const Aws::Vector& value) { SetSurrogateKeys(value); return *this;} + inline DatabaseSourceConfiguration& WithSurrogateKeys(Aws::Vector&& value) { SetSurrogateKeys(std::move(value)); return *this;} + inline DatabaseSourceConfiguration& AddSurrogateKeys(const Aws::String& value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys.push_back(value); return *this; } + inline DatabaseSourceConfiguration& AddSurrogateKeys(Aws::String&& value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys.push_back(std::move(value)); return *this; } + inline DatabaseSourceConfiguration& AddSurrogateKeys(const char* value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys.push_back(value); return *this; } + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetSnapshotWatermarkTable() const{ return m_snapshotWatermarkTable; } + inline bool SnapshotWatermarkTableHasBeenSet() const { return m_snapshotWatermarkTableHasBeenSet; } + inline void SetSnapshotWatermarkTable(const Aws::String& value) { m_snapshotWatermarkTableHasBeenSet = true; m_snapshotWatermarkTable = value; } + inline void SetSnapshotWatermarkTable(Aws::String&& value) { m_snapshotWatermarkTableHasBeenSet = true; m_snapshotWatermarkTable = std::move(value); } + inline void SetSnapshotWatermarkTable(const char* value) { m_snapshotWatermarkTableHasBeenSet = true; m_snapshotWatermarkTable.assign(value); } + inline DatabaseSourceConfiguration& WithSnapshotWatermarkTable(const Aws::String& value) { SetSnapshotWatermarkTable(value); return *this;} + inline DatabaseSourceConfiguration& WithSnapshotWatermarkTable(Aws::String&& value) { SetSnapshotWatermarkTable(std::move(value)); return *this;} + inline DatabaseSourceConfiguration& WithSnapshotWatermarkTable(const char* value) { SetSnapshotWatermarkTable(value); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseSourceAuthenticationConfiguration& GetDatabaseSourceAuthenticationConfiguration() const{ return m_databaseSourceAuthenticationConfiguration; } + inline bool DatabaseSourceAuthenticationConfigurationHasBeenSet() const { return m_databaseSourceAuthenticationConfigurationHasBeenSet; } + inline void SetDatabaseSourceAuthenticationConfiguration(const DatabaseSourceAuthenticationConfiguration& value) { m_databaseSourceAuthenticationConfigurationHasBeenSet = true; m_databaseSourceAuthenticationConfiguration = value; } + inline void SetDatabaseSourceAuthenticationConfiguration(DatabaseSourceAuthenticationConfiguration&& value) { m_databaseSourceAuthenticationConfigurationHasBeenSet = true; m_databaseSourceAuthenticationConfiguration = std::move(value); } + inline DatabaseSourceConfiguration& WithDatabaseSourceAuthenticationConfiguration(const DatabaseSourceAuthenticationConfiguration& value) { SetDatabaseSourceAuthenticationConfiguration(value); return *this;} + inline DatabaseSourceConfiguration& WithDatabaseSourceAuthenticationConfiguration(DatabaseSourceAuthenticationConfiguration&& value) { SetDatabaseSourceAuthenticationConfiguration(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseSourceVPCConfiguration& GetDatabaseSourceVPCConfiguration() const{ return m_databaseSourceVPCConfiguration; } + inline bool DatabaseSourceVPCConfigurationHasBeenSet() const { return m_databaseSourceVPCConfigurationHasBeenSet; } + inline void SetDatabaseSourceVPCConfiguration(const DatabaseSourceVPCConfiguration& value) { m_databaseSourceVPCConfigurationHasBeenSet = true; m_databaseSourceVPCConfiguration = value; } + inline void SetDatabaseSourceVPCConfiguration(DatabaseSourceVPCConfiguration&& value) { m_databaseSourceVPCConfigurationHasBeenSet = true; m_databaseSourceVPCConfiguration = std::move(value); } + inline DatabaseSourceConfiguration& WithDatabaseSourceVPCConfiguration(const DatabaseSourceVPCConfiguration& value) { SetDatabaseSourceVPCConfiguration(value); return *this;} + inline DatabaseSourceConfiguration& WithDatabaseSourceVPCConfiguration(DatabaseSourceVPCConfiguration&& value) { SetDatabaseSourceVPCConfiguration(std::move(value)); return *this;} + ///@} + private: + + DatabaseType m_type; + bool m_typeHasBeenSet = false; + + Aws::String m_endpoint; + bool m_endpointHasBeenSet = false; + + int m_port; + bool m_portHasBeenSet = false; + + SSLMode m_sSLMode; + bool m_sSLModeHasBeenSet = false; + + DatabaseList m_databases; + bool m_databasesHasBeenSet = false; + + DatabaseTableList m_tables; + bool m_tablesHasBeenSet = false; + + DatabaseColumnList m_columns; + bool m_columnsHasBeenSet = false; + + Aws::Vector m_surrogateKeys; + bool m_surrogateKeysHasBeenSet = false; + + Aws::String m_snapshotWatermarkTable; + bool m_snapshotWatermarkTableHasBeenSet = false; + + DatabaseSourceAuthenticationConfiguration m_databaseSourceAuthenticationConfiguration; + bool m_databaseSourceAuthenticationConfigurationHasBeenSet = false; + + DatabaseSourceVPCConfiguration m_databaseSourceVPCConfiguration; + bool m_databaseSourceVPCConfigurationHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceDescription.h new file mode 100644 index 00000000000..e96757f0164 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceDescription.h @@ -0,0 +1,253 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class DatabaseSourceDescription + { + public: + AWS_FIREHOSE_API DatabaseSourceDescription(); + AWS_FIREHOSE_API DatabaseSourceDescription(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API DatabaseSourceDescription& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseType& GetType() const{ return m_type; } + inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; } + inline void SetType(const DatabaseType& value) { m_typeHasBeenSet = true; m_type = value; } + inline void SetType(DatabaseType&& value) { m_typeHasBeenSet = true; m_type = std::move(value); } + inline DatabaseSourceDescription& WithType(const DatabaseType& value) { SetType(value); return *this;} + inline DatabaseSourceDescription& WithType(DatabaseType&& value) { SetType(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetEndpoint() const{ return m_endpoint; } + inline bool EndpointHasBeenSet() const { return m_endpointHasBeenSet; } + inline void SetEndpoint(const Aws::String& value) { m_endpointHasBeenSet = true; m_endpoint = value; } + inline void SetEndpoint(Aws::String&& value) { m_endpointHasBeenSet = true; m_endpoint = std::move(value); } + inline void SetEndpoint(const char* value) { m_endpointHasBeenSet = true; m_endpoint.assign(value); } + inline DatabaseSourceDescription& WithEndpoint(const Aws::String& value) { SetEndpoint(value); return *this;} + inline DatabaseSourceDescription& WithEndpoint(Aws::String&& value) { SetEndpoint(std::move(value)); return *this;} + inline DatabaseSourceDescription& WithEndpoint(const char* value) { SetEndpoint(value); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline int GetPort() const{ return m_port; } + inline bool PortHasBeenSet() const { return m_portHasBeenSet; } + inline void SetPort(int value) { m_portHasBeenSet = true; m_port = value; } + inline DatabaseSourceDescription& WithPort(int value) { SetPort(value); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const SSLMode& GetSSLMode() const{ return m_sSLMode; } + inline bool SSLModeHasBeenSet() const { return m_sSLModeHasBeenSet; } + inline void SetSSLMode(const SSLMode& value) { m_sSLModeHasBeenSet = true; m_sSLMode = value; } + inline void SetSSLMode(SSLMode&& value) { m_sSLModeHasBeenSet = true; m_sSLMode = std::move(value); } + inline DatabaseSourceDescription& WithSSLMode(const SSLMode& value) { SetSSLMode(value); return *this;} + inline DatabaseSourceDescription& WithSSLMode(SSLMode&& value) { SetSSLMode(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseList& GetDatabases() const{ return m_databases; } + inline bool DatabasesHasBeenSet() const { return m_databasesHasBeenSet; } + inline void SetDatabases(const DatabaseList& value) { m_databasesHasBeenSet = true; m_databases = value; } + inline void SetDatabases(DatabaseList&& value) { m_databasesHasBeenSet = true; m_databases = std::move(value); } + inline DatabaseSourceDescription& WithDatabases(const DatabaseList& value) { SetDatabases(value); return *this;} + inline DatabaseSourceDescription& WithDatabases(DatabaseList&& value) { SetDatabases(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseTableList& GetTables() const{ return m_tables; } + inline bool TablesHasBeenSet() const { return m_tablesHasBeenSet; } + inline void SetTables(const DatabaseTableList& value) { m_tablesHasBeenSet = true; m_tables = value; } + inline void SetTables(DatabaseTableList&& value) { m_tablesHasBeenSet = true; m_tables = std::move(value); } + inline DatabaseSourceDescription& WithTables(const DatabaseTableList& value) { SetTables(value); return *this;} + inline DatabaseSourceDescription& WithTables(DatabaseTableList&& value) { SetTables(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseColumnList& GetColumns() const{ return m_columns; } + inline bool ColumnsHasBeenSet() const { return m_columnsHasBeenSet; } + inline void SetColumns(const DatabaseColumnList& value) { m_columnsHasBeenSet = true; m_columns = value; } + inline void SetColumns(DatabaseColumnList&& value) { m_columnsHasBeenSet = true; m_columns = std::move(value); } + inline DatabaseSourceDescription& WithColumns(const DatabaseColumnList& value) { SetColumns(value); return *this;} + inline DatabaseSourceDescription& WithColumns(DatabaseColumnList&& value) { SetColumns(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetSurrogateKeys() const{ return m_surrogateKeys; } + inline bool SurrogateKeysHasBeenSet() const { return m_surrogateKeysHasBeenSet; } + inline void SetSurrogateKeys(const Aws::Vector& value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys = value; } + inline void SetSurrogateKeys(Aws::Vector&& value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys = std::move(value); } + inline DatabaseSourceDescription& WithSurrogateKeys(const Aws::Vector& value) { SetSurrogateKeys(value); return *this;} + inline DatabaseSourceDescription& WithSurrogateKeys(Aws::Vector&& value) { SetSurrogateKeys(std::move(value)); return *this;} + inline DatabaseSourceDescription& AddSurrogateKeys(const Aws::String& value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys.push_back(value); return *this; } + inline DatabaseSourceDescription& AddSurrogateKeys(Aws::String&& value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys.push_back(std::move(value)); return *this; } + inline DatabaseSourceDescription& AddSurrogateKeys(const char* value) { m_surrogateKeysHasBeenSet = true; m_surrogateKeys.push_back(value); return *this; } + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetSnapshotWatermarkTable() const{ return m_snapshotWatermarkTable; } + inline bool SnapshotWatermarkTableHasBeenSet() const { return m_snapshotWatermarkTableHasBeenSet; } + inline void SetSnapshotWatermarkTable(const Aws::String& value) { m_snapshotWatermarkTableHasBeenSet = true; m_snapshotWatermarkTable = value; } + inline void SetSnapshotWatermarkTable(Aws::String&& value) { m_snapshotWatermarkTableHasBeenSet = true; m_snapshotWatermarkTable = std::move(value); } + inline void SetSnapshotWatermarkTable(const char* value) { m_snapshotWatermarkTableHasBeenSet = true; m_snapshotWatermarkTable.assign(value); } + inline DatabaseSourceDescription& WithSnapshotWatermarkTable(const Aws::String& value) { SetSnapshotWatermarkTable(value); return *this;} + inline DatabaseSourceDescription& WithSnapshotWatermarkTable(Aws::String&& value) { SetSnapshotWatermarkTable(std::move(value)); return *this;} + inline DatabaseSourceDescription& WithSnapshotWatermarkTable(const char* value) { SetSnapshotWatermarkTable(value); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetSnapshotInfo() const{ return m_snapshotInfo; } + inline bool SnapshotInfoHasBeenSet() const { return m_snapshotInfoHasBeenSet; } + inline void SetSnapshotInfo(const Aws::Vector& value) { m_snapshotInfoHasBeenSet = true; m_snapshotInfo = value; } + inline void SetSnapshotInfo(Aws::Vector&& value) { m_snapshotInfoHasBeenSet = true; m_snapshotInfo = std::move(value); } + inline DatabaseSourceDescription& WithSnapshotInfo(const Aws::Vector& value) { SetSnapshotInfo(value); return *this;} + inline DatabaseSourceDescription& WithSnapshotInfo(Aws::Vector&& value) { SetSnapshotInfo(std::move(value)); return *this;} + inline DatabaseSourceDescription& AddSnapshotInfo(const DatabaseSnapshotInfo& value) { m_snapshotInfoHasBeenSet = true; m_snapshotInfo.push_back(value); return *this; } + inline DatabaseSourceDescription& AddSnapshotInfo(DatabaseSnapshotInfo&& value) { m_snapshotInfoHasBeenSet = true; m_snapshotInfo.push_back(std::move(value)); return *this; } + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseSourceAuthenticationConfiguration& GetDatabaseSourceAuthenticationConfiguration() const{ return m_databaseSourceAuthenticationConfiguration; } + inline bool DatabaseSourceAuthenticationConfigurationHasBeenSet() const { return m_databaseSourceAuthenticationConfigurationHasBeenSet; } + inline void SetDatabaseSourceAuthenticationConfiguration(const DatabaseSourceAuthenticationConfiguration& value) { m_databaseSourceAuthenticationConfigurationHasBeenSet = true; m_databaseSourceAuthenticationConfiguration = value; } + inline void SetDatabaseSourceAuthenticationConfiguration(DatabaseSourceAuthenticationConfiguration&& value) { m_databaseSourceAuthenticationConfigurationHasBeenSet = true; m_databaseSourceAuthenticationConfiguration = std::move(value); } + inline DatabaseSourceDescription& WithDatabaseSourceAuthenticationConfiguration(const DatabaseSourceAuthenticationConfiguration& value) { SetDatabaseSourceAuthenticationConfiguration(value); return *this;} + inline DatabaseSourceDescription& WithDatabaseSourceAuthenticationConfiguration(DatabaseSourceAuthenticationConfiguration&& value) { SetDatabaseSourceAuthenticationConfiguration(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseSourceVPCConfiguration& GetDatabaseSourceVPCConfiguration() const{ return m_databaseSourceVPCConfiguration; } + inline bool DatabaseSourceVPCConfigurationHasBeenSet() const { return m_databaseSourceVPCConfigurationHasBeenSet; } + inline void SetDatabaseSourceVPCConfiguration(const DatabaseSourceVPCConfiguration& value) { m_databaseSourceVPCConfigurationHasBeenSet = true; m_databaseSourceVPCConfiguration = value; } + inline void SetDatabaseSourceVPCConfiguration(DatabaseSourceVPCConfiguration&& value) { m_databaseSourceVPCConfigurationHasBeenSet = true; m_databaseSourceVPCConfiguration = std::move(value); } + inline DatabaseSourceDescription& WithDatabaseSourceVPCConfiguration(const DatabaseSourceVPCConfiguration& value) { SetDatabaseSourceVPCConfiguration(value); return *this;} + inline DatabaseSourceDescription& WithDatabaseSourceVPCConfiguration(DatabaseSourceVPCConfiguration&& value) { SetDatabaseSourceVPCConfiguration(std::move(value)); return *this;} + ///@} + private: + + DatabaseType m_type; + bool m_typeHasBeenSet = false; + + Aws::String m_endpoint; + bool m_endpointHasBeenSet = false; + + int m_port; + bool m_portHasBeenSet = false; + + SSLMode m_sSLMode; + bool m_sSLModeHasBeenSet = false; + + DatabaseList m_databases; + bool m_databasesHasBeenSet = false; + + DatabaseTableList m_tables; + bool m_tablesHasBeenSet = false; + + DatabaseColumnList m_columns; + bool m_columnsHasBeenSet = false; + + Aws::Vector m_surrogateKeys; + bool m_surrogateKeysHasBeenSet = false; + + Aws::String m_snapshotWatermarkTable; + bool m_snapshotWatermarkTableHasBeenSet = false; + + Aws::Vector m_snapshotInfo; + bool m_snapshotInfoHasBeenSet = false; + + DatabaseSourceAuthenticationConfiguration m_databaseSourceAuthenticationConfiguration; + bool m_databaseSourceAuthenticationConfigurationHasBeenSet = false; + + DatabaseSourceVPCConfiguration m_databaseSourceVPCConfiguration; + bool m_databaseSourceVPCConfigurationHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceVPCConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceVPCConfiguration.h new file mode 100644 index 00000000000..4d009ba1873 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseSourceVPCConfiguration.h @@ -0,0 +1,63 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class DatabaseSourceVPCConfiguration + { + public: + AWS_FIREHOSE_API DatabaseSourceVPCConfiguration(); + AWS_FIREHOSE_API DatabaseSourceVPCConfiguration(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API DatabaseSourceVPCConfiguration& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetVpcEndpointServiceName() const{ return m_vpcEndpointServiceName; } + inline bool VpcEndpointServiceNameHasBeenSet() const { return m_vpcEndpointServiceNameHasBeenSet; } + inline void SetVpcEndpointServiceName(const Aws::String& value) { m_vpcEndpointServiceNameHasBeenSet = true; m_vpcEndpointServiceName = value; } + inline void SetVpcEndpointServiceName(Aws::String&& value) { m_vpcEndpointServiceNameHasBeenSet = true; m_vpcEndpointServiceName = std::move(value); } + inline void SetVpcEndpointServiceName(const char* value) { m_vpcEndpointServiceNameHasBeenSet = true; m_vpcEndpointServiceName.assign(value); } + inline DatabaseSourceVPCConfiguration& WithVpcEndpointServiceName(const Aws::String& value) { SetVpcEndpointServiceName(value); return *this;} + inline DatabaseSourceVPCConfiguration& WithVpcEndpointServiceName(Aws::String&& value) { SetVpcEndpointServiceName(std::move(value)); return *this;} + inline DatabaseSourceVPCConfiguration& WithVpcEndpointServiceName(const char* value) { SetVpcEndpointServiceName(value); return *this;} + ///@} + private: + + Aws::String m_vpcEndpointServiceName; + bool m_vpcEndpointServiceNameHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseTableList.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseTableList.h new file mode 100644 index 00000000000..b2e43616344 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseTableList.h @@ -0,0 +1,84 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class DatabaseTableList + { + public: + AWS_FIREHOSE_API DatabaseTableList(); + AWS_FIREHOSE_API DatabaseTableList(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API DatabaseTableList& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetInclude() const{ return m_include; } + inline bool IncludeHasBeenSet() const { return m_includeHasBeenSet; } + inline void SetInclude(const Aws::Vector& value) { m_includeHasBeenSet = true; m_include = value; } + inline void SetInclude(Aws::Vector&& value) { m_includeHasBeenSet = true; m_include = std::move(value); } + inline DatabaseTableList& WithInclude(const Aws::Vector& value) { SetInclude(value); return *this;} + inline DatabaseTableList& WithInclude(Aws::Vector&& value) { SetInclude(std::move(value)); return *this;} + inline DatabaseTableList& AddInclude(const Aws::String& value) { m_includeHasBeenSet = true; m_include.push_back(value); return *this; } + inline DatabaseTableList& AddInclude(Aws::String&& value) { m_includeHasBeenSet = true; m_include.push_back(std::move(value)); return *this; } + inline DatabaseTableList& AddInclude(const char* value) { m_includeHasBeenSet = true; m_include.push_back(value); return *this; } + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetExclude() const{ return m_exclude; } + inline bool ExcludeHasBeenSet() const { return m_excludeHasBeenSet; } + inline void SetExclude(const Aws::Vector& value) { m_excludeHasBeenSet = true; m_exclude = value; } + inline void SetExclude(Aws::Vector&& value) { m_excludeHasBeenSet = true; m_exclude = std::move(value); } + inline DatabaseTableList& WithExclude(const Aws::Vector& value) { SetExclude(value); return *this;} + inline DatabaseTableList& WithExclude(Aws::Vector&& value) { SetExclude(std::move(value)); return *this;} + inline DatabaseTableList& AddExclude(const Aws::String& value) { m_excludeHasBeenSet = true; m_exclude.push_back(value); return *this; } + inline DatabaseTableList& AddExclude(Aws::String&& value) { m_excludeHasBeenSet = true; m_exclude.push_back(std::move(value)); return *this; } + inline DatabaseTableList& AddExclude(const char* value) { m_excludeHasBeenSet = true; m_exclude.push_back(value); return *this; } + ///@} + private: + + Aws::Vector m_include; + bool m_includeHasBeenSet = false; + + Aws::Vector m_exclude; + bool m_excludeHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseType.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseType.h new file mode 100644 index 00000000000..94803153468 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DatabaseType.h @@ -0,0 +1,31 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + enum class DatabaseType + { + NOT_SET, + MySQL, + PostgreSQL + }; + +namespace DatabaseTypeMapper +{ +AWS_FIREHOSE_API DatabaseType GetDatabaseTypeForName(const Aws::String& name); + +AWS_FIREHOSE_API Aws::String GetNameForDatabaseType(DatabaseType value); +} // namespace DatabaseTypeMapper +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeleteDeliveryStreamRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeleteDeliveryStreamRequest.h index 931c94b5001..8422b6be316 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeleteDeliveryStreamRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeleteDeliveryStreamRequest.h @@ -36,7 +36,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } @@ -50,7 +50,7 @@ namespace Model ///@{ /** - *

    Set this to true if you want to delete the delivery stream even if Firehose + *

    Set this to true if you want to delete the Firehose stream even if Firehose * is unable to retire the grant for the CMK. Firehose might be unable to retire * the grant due to a customer error, such as when the CMK or the grant are in an * invalid state. If you force deletion, you can then use the Contains information about a delivery stream.

    See Also:

    Contains information about a Firehose stream.

    See Also:

    AWS * API Reference

    */ @@ -47,7 +47,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } @@ -61,7 +61,7 @@ namespace Model ///@{ /** - *

    The Amazon Resource Name (ARN) of the delivery stream. For more information, + *

    The Amazon Resource Name (ARN) of the Firehose stream. For more information, * see Amazon * Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    @@ -78,7 +78,7 @@ namespace Model ///@{ /** - *

    The status of the delivery stream. If the status of a delivery stream is + *

    The status of the Firehose stream. If the status of a Firehose stream is * CREATING_FAILED, this status doesn't change, and you can't invoke * CreateDeliveryStream again on it. However, you can invoke the * DeleteDeliveryStream operation to delete it.

    @@ -107,7 +107,7 @@ namespace Model ///@{ /** - *

    Indicates the server-side encryption (SSE) status for the delivery + *

    Indicates the server-side encryption (SSE) status for the Firehose * stream.

    */ inline const DeliveryStreamEncryptionConfiguration& GetDeliveryStreamEncryptionConfiguration() const{ return m_deliveryStreamEncryptionConfiguration; } @@ -120,10 +120,10 @@ namespace Model ///@{ /** - *

    The delivery stream type. This can be one of the following values:

      - *
    • DirectPut: Provider applications access the delivery + *

      The Firehose stream type. This can be one of the following values:

        + *
      • DirectPut: Provider applications access the Firehose * stream directly.

      • KinesisStreamAsSource: The - * delivery stream uses a Kinesis data stream as a source.

      + * Firehose stream uses a Kinesis data stream as a source.

    */ inline const DeliveryStreamType& GetDeliveryStreamType() const{ return m_deliveryStreamType; } inline bool DeliveryStreamTypeHasBeenSet() const { return m_deliveryStreamTypeHasBeenSet; } @@ -135,7 +135,7 @@ namespace Model ///@{ /** - *

    Each time the destination is updated for a delivery stream, the version ID is + *

    Each time the destination is updated for a Firehose stream, the version ID is * changed, and the current version ID is required when updating the destination. * This is so that the service knows it is applying the changes to the correct * version of the delivery stream.

    @@ -152,7 +152,7 @@ namespace Model ///@{ /** - *

    The date and time that the delivery stream was created.

    + *

    The date and time that the Firehose stream was created.

    */ inline const Aws::Utils::DateTime& GetCreateTimestamp() const{ return m_createTimestamp; } inline bool CreateTimestampHasBeenSet() const { return m_createTimestampHasBeenSet; } @@ -164,7 +164,7 @@ namespace Model ///@{ /** - *

    The date and time that the delivery stream was last updated.

    + *

    The date and time that the Firehose stream was last updated.

    */ inline const Aws::Utils::DateTime& GetLastUpdateTimestamp() const{ return m_lastUpdateTimestamp; } inline bool LastUpdateTimestampHasBeenSet() const { return m_lastUpdateTimestampHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamEncryptionConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamEncryptionConfiguration.h index ef32416762a..c14497c6fe2 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamEncryptionConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamEncryptionConfiguration.h @@ -80,7 +80,7 @@ namespace Model ///@{ /** - *

    This is the server-side encryption (SSE) status for the delivery stream. For + *

    This is the server-side encryption (SSE) status for the Firehose stream. For * a full description of the different values of this status, see * StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If * this status is ENABLING_FAILED or DISABLING_FAILED, it diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamEncryptionConfigurationInput.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamEncryptionConfigurationInput.h index 855c62a893b..6fa97c6baa0 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamEncryptionConfigurationInput.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamEncryptionConfigurationInput.h @@ -69,12 +69,12 @@ namespace Model * to create a grant that allows the Firehose service to use the customer managed * CMK to perform encryption and decryption. Firehose manages that grant.

    *

    When you invoke StartDeliveryStreamEncryption to change the CMK for a - * delivery stream that is encrypted with a customer managed CMK, Firehose + * Firehose stream that is encrypted with a customer managed CMK, Firehose * schedules the grant it had on the old CMK for retirement.

    You can use a - * CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a + * CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 Firehose streams. If a * CreateDeliveryStream or StartDeliveryStreamEncryption operation * exceeds this limit, Firehose throws a LimitExceededException.

    - *

    To encrypt your delivery stream, use symmetric CMKs. Firehose + *

    To encrypt your Firehose stream, use symmetric CMKs. Firehose * doesn't support asymmetric CMKs. For information about symmetric and asymmetric * CMKs, see About diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamFailureType.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamFailureType.h index 1d27800539d..f9c003cf0a6 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamFailureType.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamFailureType.h @@ -16,6 +16,8 @@ namespace Model enum class DeliveryStreamFailureType { NOT_SET, + VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND, + VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED, RETIRE_KMS_GRANT_FAILED, CREATE_KMS_GRANT_FAILED, KMS_ACCESS_DENIED, diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamType.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamType.h index d21024aea01..36be9b0c577 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamType.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DeliveryStreamType.h @@ -18,7 +18,8 @@ namespace Model NOT_SET, DirectPut, KinesisStreamAsSource, - MSKAsSource + MSKAsSource, + DatabaseAsSource }; namespace DeliveryStreamTypeMapper diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DescribeDeliveryStreamRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DescribeDeliveryStreamRequest.h index 47761239117..7866f5ebdd9 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DescribeDeliveryStreamRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DescribeDeliveryStreamRequest.h @@ -36,7 +36,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } @@ -51,7 +51,7 @@ namespace Model ///@{ /** *

    The limit on the number of destinations to return. You can have one - * destination per delivery stream.

    + * destination per Firehose stream.

    */ inline int GetLimit() const{ return m_limit; } inline bool LimitHasBeenSet() const { return m_limitHasBeenSet; } @@ -62,7 +62,7 @@ namespace Model ///@{ /** *

    The ID of the destination to start returning the destination information. - * Firehose supports one destination per delivery stream.

    + * Firehose supports one destination per Firehose stream.

    */ inline const Aws::String& GetExclusiveStartDestinationId() const{ return m_exclusiveStartDestinationId; } inline bool ExclusiveStartDestinationIdHasBeenSet() const { return m_exclusiveStartDestinationIdHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DescribeDeliveryStreamResult.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DescribeDeliveryStreamResult.h index bba01acf94b..aa7b148a8a0 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DescribeDeliveryStreamResult.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DescribeDeliveryStreamResult.h @@ -35,7 +35,7 @@ namespace Model ///@{ /** - *

    Information about the delivery stream.

    + *

    Information about the Firehose stream.

    */ inline const DeliveryStreamDescription& GetDeliveryStreamDescription() const{ return m_deliveryStreamDescription; } inline void SetDeliveryStreamDescription(const DeliveryStreamDescription& value) { m_deliveryStreamDescription = value; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DestinationDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DestinationDescription.h index 3ecf3a7f544..bb13ccc80ed 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DestinationDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DestinationDescription.h @@ -34,7 +34,7 @@ namespace Model { /** - *

    Describes the destination for a delivery stream.

    See Also:

    + *

    Describes the destination for a Firehose stream.

    See Also:

    *
    AWS * API Reference

    @@ -172,8 +172,7 @@ namespace Model ///@{ /** - *

    Describes a destination in Apache Iceberg Tables.

    Amazon Data - * Firehose is in preview release and is subject to change.

    + *

    Describes a destination in Apache Iceberg Tables.

    */ inline const IcebergDestinationDescription& GetIcebergDestinationDescription() const{ return m_icebergDestinationDescription; } inline bool IcebergDestinationDescriptionHasBeenSet() const { return m_icebergDestinationDescriptionHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DestinationTableConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DestinationTableConfiguration.h index e3666c94c42..380bd3ba1d9 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DestinationTableConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DestinationTableConfiguration.h @@ -7,6 +7,7 @@ #include #include #include +#include #include namespace Aws @@ -25,9 +26,8 @@ namespace Model { /** - *

    Describes the configuration of a destination in Apache Iceberg Tables.

    - *

    Amazon Data Firehose is in preview release and is subject to - * change.

    See Also:

    Describes the configuration of a destination in Apache Iceberg Tables. + *

    See Also:

    AWS * API Reference

    */ @@ -42,8 +42,7 @@ namespace Model ///@{ /** - *

    Specifies the name of the Apache Iceberg Table.

    Amazon Data Firehose - * is in preview release and is subject to change.

    + *

    Specifies the name of the Apache Iceberg Table.

    */ inline const Aws::String& GetDestinationTableName() const{ return m_destinationTableName; } inline bool DestinationTableNameHasBeenSet() const { return m_destinationTableNameHasBeenSet; } @@ -57,8 +56,7 @@ namespace Model ///@{ /** - *

    The name of the Apache Iceberg database.

    Amazon Data Firehose is in - * preview release and is subject to change.

    + *

    The name of the Apache Iceberg database.

    */ inline const Aws::String& GetDestinationDatabaseName() const{ return m_destinationDatabaseName; } inline bool DestinationDatabaseNameHasBeenSet() const { return m_destinationDatabaseNameHasBeenSet; } @@ -73,8 +71,8 @@ namespace Model ///@{ /** *

    A list of unique keys for a given Apache Iceberg table. Firehose will use - * these for running Create/Update/Delete operations on the given Iceberg table. - *

    Amazon Data Firehose is in preview release and is subject to change.

    + * these for running Create, Update, or Delete operations on the given Iceberg + * table.

    */ inline const Aws::Vector& GetUniqueKeys() const{ return m_uniqueKeys; } inline bool UniqueKeysHasBeenSet() const { return m_uniqueKeysHasBeenSet; } @@ -87,12 +85,24 @@ namespace Model inline DestinationTableConfiguration& AddUniqueKeys(const char* value) { m_uniqueKeysHasBeenSet = true; m_uniqueKeys.push_back(value); return *this; } ///@} + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const PartitionSpec& GetPartitionSpec() const{ return m_partitionSpec; } + inline bool PartitionSpecHasBeenSet() const { return m_partitionSpecHasBeenSet; } + inline void SetPartitionSpec(const PartitionSpec& value) { m_partitionSpecHasBeenSet = true; m_partitionSpec = value; } + inline void SetPartitionSpec(PartitionSpec&& value) { m_partitionSpecHasBeenSet = true; m_partitionSpec = std::move(value); } + inline DestinationTableConfiguration& WithPartitionSpec(const PartitionSpec& value) { SetPartitionSpec(value); return *this;} + inline DestinationTableConfiguration& WithPartitionSpec(PartitionSpec&& value) { SetPartitionSpec(std::move(value)); return *this;} + ///@} + ///@{ /** *

    The table specific S3 error output prefix. All the errors that occurred * while delivering to this table will be prefixed with this value in S3 - * destination.

    Amazon Data Firehose is in preview release and is subject - * to change.

    + * destination.

    */ inline const Aws::String& GetS3ErrorOutputPrefix() const{ return m_s3ErrorOutputPrefix; } inline bool S3ErrorOutputPrefixHasBeenSet() const { return m_s3ErrorOutputPrefixHasBeenSet; } @@ -114,6 +124,9 @@ namespace Model Aws::Vector m_uniqueKeys; bool m_uniqueKeysHasBeenSet = false; + PartitionSpec m_partitionSpec; + bool m_partitionSpecHasBeenSet = false; + Aws::String m_s3ErrorOutputPrefix; bool m_s3ErrorOutputPrefixHasBeenSet = false; }; diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DynamicPartitioningConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DynamicPartitioningConfiguration.h index fe032c7462b..f13b0c6dbd6 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DynamicPartitioningConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/DynamicPartitioningConfiguration.h @@ -55,7 +55,7 @@ namespace Model ///@{ /** - *

    Specifies that the dynamic partitioning is enabled for this Firehose delivery + *

    Specifies that the dynamic partitioning is enabled for this Firehose Firehose * stream.

    */ inline bool GetEnabled() const{ return m_enabled; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchBufferingHints.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchBufferingHints.h index 72b9da3bde6..1d150ceac56 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchBufferingHints.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchBufferingHints.h @@ -52,7 +52,7 @@ namespace Model *

    Buffer incoming data to the specified size, in MBs, before delivering it to * the destination. The default value is 5.

    We recommend setting this * parameter to a value greater than the amount of data you typically ingest into - * the delivery stream in 10 seconds. For example, if you typically ingest data at + * the Firehose stream in 10 seconds. For example, if you typically ingest data at * 1 MB/sec, the value should be 10 MB or higher.

    */ inline int GetSizeInMBs() const{ return m_sizeInMBs; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchDestinationConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchDestinationConfiguration.h index 4482ea3abbc..79b3a077d72 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchDestinationConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchDestinationConfiguration.h @@ -189,7 +189,7 @@ namespace Model * href="https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup">Amazon * S3 Backup for the Amazon ES Destination. Default value is * FailedDocumentsOnly.

    You can't change this backup mode after - * you create the delivery stream.

    + * you create the Firehose stream.

    */ inline const ElasticsearchS3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } inline bool S3BackupModeHasBeenSet() const { return m_s3BackupModeHasBeenSet; } @@ -225,7 +225,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchDestinationUpdate.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchDestinationUpdate.h index 9cac1186955..00e75ed8c04 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchDestinationUpdate.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ElasticsearchDestinationUpdate.h @@ -119,9 +119,9 @@ namespace Model *

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one * type per index. If you try to specify a new type for an existing index that * already has another type, Firehose returns an error during runtime.

    If - * you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, + * you upgrade Elasticsearch from 6.x to 7.x and don’t update your Firehose stream, * Firehose still delivers data to Elasticsearch with the old index name and type - * name. If you want to update your delivery stream with a new index name, provide + * name. If you want to update your Firehose stream with a new index name, provide * an empty string for TypeName.

    */ inline const Aws::String& GetTypeName() const{ return m_typeName; } @@ -203,7 +203,7 @@ namespace Model ///@{ /** - *

    The CloudWatch logging options for your delivery stream.

    + *

    The CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationConfiguration.h index 5388db7f91c..e5421fd3a16 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationConfiguration.h @@ -155,7 +155,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } @@ -179,9 +179,9 @@ namespace Model ///@{ /** - *

    The Amazon S3 backup mode. After you create a delivery stream, you can update + *

    The Amazon S3 backup mode. After you create a Firehose stream, you can update * it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't - * update the delivery stream to disable it.

    + * update the Firehose stream to disable it.

    */ inline const S3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } inline bool S3BackupModeHasBeenSet() const { return m_s3BackupModeHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationDescription.h index 29a9dcac260..9d2432b1658 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationDescription.h @@ -154,7 +154,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationUpdate.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationUpdate.h index 9969adf15b1..ad883dc53db 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationUpdate.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ExtendedS3DestinationUpdate.h @@ -155,7 +155,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } @@ -179,8 +179,8 @@ namespace Model ///@{ /** - *

    You can update a delivery stream to enable Amazon S3 backup if it is - * disabled. If backup is enabled, you can't update the delivery stream to disable + *

    You can update a Firehose stream to enable Amazon S3 backup if it is + * disabled. If backup is enabled, you can't update the Firehose stream to disable * it.

    */ inline const S3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/HttpEndpointBufferingHints.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/HttpEndpointBufferingHints.h index b2bac7b2959..bd953bc0144 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/HttpEndpointBufferingHints.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/HttpEndpointBufferingHints.h @@ -45,7 +45,7 @@ namespace Model *

    Buffer incoming data to the specified size, in MBs, before delivering it to * the destination. The default value is 5.

    We recommend setting this * parameter to a value greater than the amount of data you typically ingest into - * the delivery stream in 10 seconds. For example, if you typically ingest data at + * the Firehose stream in 10 seconds. For example, if you typically ingest data at * 1 MB/sec, the value should be 10 MB or higher.

    */ inline int GetSizeInMBs() const{ return m_sizeInMBs; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationConfiguration.h index f1b6a6aaf6a..ae6cd084c9c 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationConfiguration.h @@ -6,6 +6,8 @@ #pragma once #include #include +#include +#include #include #include #include @@ -33,9 +35,8 @@ namespace Model { /** - *

    Specifies the destination configure settings for Apache Iceberg Table.

    - *

    Amazon Data Firehose is in preview release and is subject to - * change.

    See Also:

    Specifies the destination configure settings for Apache Iceberg Table. + *

    See Also:

    AWS * API Reference

    */ @@ -51,8 +52,8 @@ namespace Model ///@{ /** *

    Provides a list of DestinationTableConfigurations which - * Firehose uses to deliver data to Apache Iceberg tables.

    Amazon Data - * Firehose is in preview release and is subject to change.

    + * Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data + * with insert if table specific configuration is not provided here.

    */ inline const Aws::Vector& GetDestinationTableConfigurationList() const{ return m_destinationTableConfigurationList; } inline bool DestinationTableConfigurationListHasBeenSet() const { return m_destinationTableConfigurationListHasBeenSet; } @@ -64,6 +65,32 @@ namespace Model inline IcebergDestinationConfiguration& AddDestinationTableConfigurationList(DestinationTableConfiguration&& value) { m_destinationTableConfigurationListHasBeenSet = true; m_destinationTableConfigurationList.push_back(std::move(value)); return *this; } ///@} + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const SchemaEvolutionConfiguration& GetSchemaEvolutionConfiguration() const{ return m_schemaEvolutionConfiguration; } + inline bool SchemaEvolutionConfigurationHasBeenSet() const { return m_schemaEvolutionConfigurationHasBeenSet; } + inline void SetSchemaEvolutionConfiguration(const SchemaEvolutionConfiguration& value) { m_schemaEvolutionConfigurationHasBeenSet = true; m_schemaEvolutionConfiguration = value; } + inline void SetSchemaEvolutionConfiguration(SchemaEvolutionConfiguration&& value) { m_schemaEvolutionConfigurationHasBeenSet = true; m_schemaEvolutionConfiguration = std::move(value); } + inline IcebergDestinationConfiguration& WithSchemaEvolutionConfiguration(const SchemaEvolutionConfiguration& value) { SetSchemaEvolutionConfiguration(value); return *this;} + inline IcebergDestinationConfiguration& WithSchemaEvolutionConfiguration(SchemaEvolutionConfiguration&& value) { SetSchemaEvolutionConfiguration(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const TableCreationConfiguration& GetTableCreationConfiguration() const{ return m_tableCreationConfiguration; } + inline bool TableCreationConfigurationHasBeenSet() const { return m_tableCreationConfigurationHasBeenSet; } + inline void SetTableCreationConfiguration(const TableCreationConfiguration& value) { m_tableCreationConfigurationHasBeenSet = true; m_tableCreationConfiguration = value; } + inline void SetTableCreationConfiguration(TableCreationConfiguration&& value) { m_tableCreationConfigurationHasBeenSet = true; m_tableCreationConfiguration = std::move(value); } + inline IcebergDestinationConfiguration& WithTableCreationConfiguration(const TableCreationConfiguration& value) { SetTableCreationConfiguration(value); return *this;} + inline IcebergDestinationConfiguration& WithTableCreationConfiguration(TableCreationConfiguration&& value) { SetTableCreationConfiguration(std::move(value)); return *this;} + ///@} + ///@{ inline const BufferingHints& GetBufferingHints() const{ return m_bufferingHints; } @@ -96,9 +123,8 @@ namespace Model ///@{ /** - *

    Describes how Firehose will backup records. Currently,Firehose only supports - * FailedDataOnly for preview.

    Amazon Data Firehose is in - * preview release and is subject to change.

    + *

    Describes how Firehose will backup records. Currently,S3 backup only + * supports FailedDataOnly.

    */ inline const IcebergS3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } inline bool S3BackupModeHasBeenSet() const { return m_s3BackupModeHasBeenSet; } @@ -120,8 +146,8 @@ namespace Model ///@{ /** - *

    The Amazon Resource Name (ARN) of the Apache Iceberg tables role.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + *

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + * calling Apache Iceberg Tables.

    */ inline const Aws::String& GetRoleARN() const{ return m_roleARN; } inline bool RoleARNHasBeenSet() const { return m_roleARNHasBeenSet; } @@ -136,8 +162,7 @@ namespace Model ///@{ /** *

    Configuration describing where the destination Apache Iceberg Tables are - * persisted.

    Amazon Data Firehose is in preview release and is subject to - * change.

    + * persisted.

    */ inline const CatalogConfiguration& GetCatalogConfiguration() const{ return m_catalogConfiguration; } inline bool CatalogConfigurationHasBeenSet() const { return m_catalogConfigurationHasBeenSet; } @@ -161,6 +186,12 @@ namespace Model Aws::Vector m_destinationTableConfigurationList; bool m_destinationTableConfigurationListHasBeenSet = false; + SchemaEvolutionConfiguration m_schemaEvolutionConfiguration; + bool m_schemaEvolutionConfigurationHasBeenSet = false; + + TableCreationConfiguration m_tableCreationConfiguration; + bool m_tableCreationConfigurationHasBeenSet = false; + BufferingHints m_bufferingHints; bool m_bufferingHintsHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationDescription.h index 3fb31f99943..b79bebf2bb2 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationDescription.h @@ -6,6 +6,8 @@ #pragma once #include #include +#include +#include #include #include #include @@ -33,9 +35,8 @@ namespace Model { /** - *

    Describes a destination in Apache Iceberg Tables.

    Amazon Data - * Firehose is in preview release and is subject to change.

    See - * Also:

    Describes a destination in Apache Iceberg Tables.

    See Also:

    + *
    AWS * API Reference

    */ @@ -51,8 +52,8 @@ namespace Model ///@{ /** *

    Provides a list of DestinationTableConfigurations which - * Firehose uses to deliver data to Apache Iceberg tables.

    Amazon Data - * Firehose is in preview release and is subject to change.

    + * Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data + * with insert if table specific configuration is not provided here.

    */ inline const Aws::Vector& GetDestinationTableConfigurationList() const{ return m_destinationTableConfigurationList; } inline bool DestinationTableConfigurationListHasBeenSet() const { return m_destinationTableConfigurationListHasBeenSet; } @@ -64,6 +65,32 @@ namespace Model inline IcebergDestinationDescription& AddDestinationTableConfigurationList(DestinationTableConfiguration&& value) { m_destinationTableConfigurationListHasBeenSet = true; m_destinationTableConfigurationList.push_back(std::move(value)); return *this; } ///@} + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const SchemaEvolutionConfiguration& GetSchemaEvolutionConfiguration() const{ return m_schemaEvolutionConfiguration; } + inline bool SchemaEvolutionConfigurationHasBeenSet() const { return m_schemaEvolutionConfigurationHasBeenSet; } + inline void SetSchemaEvolutionConfiguration(const SchemaEvolutionConfiguration& value) { m_schemaEvolutionConfigurationHasBeenSet = true; m_schemaEvolutionConfiguration = value; } + inline void SetSchemaEvolutionConfiguration(SchemaEvolutionConfiguration&& value) { m_schemaEvolutionConfigurationHasBeenSet = true; m_schemaEvolutionConfiguration = std::move(value); } + inline IcebergDestinationDescription& WithSchemaEvolutionConfiguration(const SchemaEvolutionConfiguration& value) { SetSchemaEvolutionConfiguration(value); return *this;} + inline IcebergDestinationDescription& WithSchemaEvolutionConfiguration(SchemaEvolutionConfiguration&& value) { SetSchemaEvolutionConfiguration(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const TableCreationConfiguration& GetTableCreationConfiguration() const{ return m_tableCreationConfiguration; } + inline bool TableCreationConfigurationHasBeenSet() const { return m_tableCreationConfigurationHasBeenSet; } + inline void SetTableCreationConfiguration(const TableCreationConfiguration& value) { m_tableCreationConfigurationHasBeenSet = true; m_tableCreationConfiguration = value; } + inline void SetTableCreationConfiguration(TableCreationConfiguration&& value) { m_tableCreationConfigurationHasBeenSet = true; m_tableCreationConfiguration = std::move(value); } + inline IcebergDestinationDescription& WithTableCreationConfiguration(const TableCreationConfiguration& value) { SetTableCreationConfiguration(value); return *this;} + inline IcebergDestinationDescription& WithTableCreationConfiguration(TableCreationConfiguration&& value) { SetTableCreationConfiguration(std::move(value)); return *this;} + ///@} + ///@{ inline const BufferingHints& GetBufferingHints() const{ return m_bufferingHints; } @@ -97,8 +124,7 @@ namespace Model ///@{ /** *

    Describes how Firehose will backup records. Currently,Firehose only supports - * FailedDataOnly for preview.

    Amazon Data Firehose is in - * preview release and is subject to change.

    + * FailedDataOnly.

    */ inline const IcebergS3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } inline bool S3BackupModeHasBeenSet() const { return m_s3BackupModeHasBeenSet; } @@ -120,8 +146,8 @@ namespace Model ///@{ /** - *

    The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + *

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + * calling Apache Iceberg Tables.

    */ inline const Aws::String& GetRoleARN() const{ return m_roleARN; } inline bool RoleARNHasBeenSet() const { return m_roleARNHasBeenSet; } @@ -136,7 +162,7 @@ namespace Model ///@{ /** *

    Configuration describing where the destination Iceberg tables are persisted. - *

    Amazon Data Firehose is in preview release and is subject to change.

    + *

    */ inline const CatalogConfiguration& GetCatalogConfiguration() const{ return m_catalogConfiguration; } inline bool CatalogConfigurationHasBeenSet() const { return m_catalogConfigurationHasBeenSet; } @@ -160,6 +186,12 @@ namespace Model Aws::Vector m_destinationTableConfigurationList; bool m_destinationTableConfigurationListHasBeenSet = false; + SchemaEvolutionConfiguration m_schemaEvolutionConfiguration; + bool m_schemaEvolutionConfigurationHasBeenSet = false; + + TableCreationConfiguration m_tableCreationConfiguration; + bool m_tableCreationConfigurationHasBeenSet = false; + BufferingHints m_bufferingHints; bool m_bufferingHintsHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationUpdate.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationUpdate.h index f78bd2fa85c..5272dd19990 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationUpdate.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/IcebergDestinationUpdate.h @@ -6,6 +6,8 @@ #pragma once #include #include +#include +#include #include #include #include @@ -33,9 +35,8 @@ namespace Model { /** - *

    Describes an update for a destination in Apache Iceberg Tables.

    - *

    Amazon Data Firehose is in preview release and is subject to - * change.

    See Also:

    Describes an update for a destination in Apache Iceberg Tables. + *

    See Also:

    AWS * API Reference

    */ @@ -51,8 +52,8 @@ namespace Model ///@{ /** *

    Provides a list of DestinationTableConfigurations which - * Firehose uses to deliver data to Apache Iceberg tables.

    Amazon Data - * Firehose is in preview release and is subject to change.

    + * Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data + * with insert if table specific configuration is not provided here.

    */ inline const Aws::Vector& GetDestinationTableConfigurationList() const{ return m_destinationTableConfigurationList; } inline bool DestinationTableConfigurationListHasBeenSet() const { return m_destinationTableConfigurationListHasBeenSet; } @@ -64,6 +65,32 @@ namespace Model inline IcebergDestinationUpdate& AddDestinationTableConfigurationList(DestinationTableConfiguration&& value) { m_destinationTableConfigurationListHasBeenSet = true; m_destinationTableConfigurationList.push_back(std::move(value)); return *this; } ///@} + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const SchemaEvolutionConfiguration& GetSchemaEvolutionConfiguration() const{ return m_schemaEvolutionConfiguration; } + inline bool SchemaEvolutionConfigurationHasBeenSet() const { return m_schemaEvolutionConfigurationHasBeenSet; } + inline void SetSchemaEvolutionConfiguration(const SchemaEvolutionConfiguration& value) { m_schemaEvolutionConfigurationHasBeenSet = true; m_schemaEvolutionConfiguration = value; } + inline void SetSchemaEvolutionConfiguration(SchemaEvolutionConfiguration&& value) { m_schemaEvolutionConfigurationHasBeenSet = true; m_schemaEvolutionConfiguration = std::move(value); } + inline IcebergDestinationUpdate& WithSchemaEvolutionConfiguration(const SchemaEvolutionConfiguration& value) { SetSchemaEvolutionConfiguration(value); return *this;} + inline IcebergDestinationUpdate& WithSchemaEvolutionConfiguration(SchemaEvolutionConfiguration&& value) { SetSchemaEvolutionConfiguration(std::move(value)); return *this;} + ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const TableCreationConfiguration& GetTableCreationConfiguration() const{ return m_tableCreationConfiguration; } + inline bool TableCreationConfigurationHasBeenSet() const { return m_tableCreationConfigurationHasBeenSet; } + inline void SetTableCreationConfiguration(const TableCreationConfiguration& value) { m_tableCreationConfigurationHasBeenSet = true; m_tableCreationConfiguration = value; } + inline void SetTableCreationConfiguration(TableCreationConfiguration&& value) { m_tableCreationConfigurationHasBeenSet = true; m_tableCreationConfiguration = std::move(value); } + inline IcebergDestinationUpdate& WithTableCreationConfiguration(const TableCreationConfiguration& value) { SetTableCreationConfiguration(value); return *this;} + inline IcebergDestinationUpdate& WithTableCreationConfiguration(TableCreationConfiguration&& value) { SetTableCreationConfiguration(std::move(value)); return *this;} + ///@} + ///@{ inline const BufferingHints& GetBufferingHints() const{ return m_bufferingHints; } @@ -97,8 +124,7 @@ namespace Model ///@{ /** *

    Describes how Firehose will backup records. Currently,Firehose only supports - * FailedDataOnly for preview.

    Amazon Data Firehose is in - * preview release and is subject to change.

    + * FailedDataOnly.

    */ inline const IcebergS3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } inline bool S3BackupModeHasBeenSet() const { return m_s3BackupModeHasBeenSet; } @@ -120,8 +146,8 @@ namespace Model ///@{ /** - *

    The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + *

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + * calling Apache Iceberg Tables.

    */ inline const Aws::String& GetRoleARN() const{ return m_roleARN; } inline bool RoleARNHasBeenSet() const { return m_roleARNHasBeenSet; } @@ -136,7 +162,7 @@ namespace Model ///@{ /** *

    Configuration describing where the destination Iceberg tables are persisted. - *

    Amazon Data Firehose is in preview release and is subject to change.

    + *

    */ inline const CatalogConfiguration& GetCatalogConfiguration() const{ return m_catalogConfiguration; } inline bool CatalogConfigurationHasBeenSet() const { return m_catalogConfigurationHasBeenSet; } @@ -160,6 +186,12 @@ namespace Model Aws::Vector m_destinationTableConfigurationList; bool m_destinationTableConfigurationListHasBeenSet = false; + SchemaEvolutionConfiguration m_schemaEvolutionConfiguration; + bool m_schemaEvolutionConfigurationHasBeenSet = false; + + TableCreationConfiguration m_tableCreationConfiguration; + bool m_tableCreationConfigurationHasBeenSet = false; + BufferingHints m_bufferingHints; bool m_bufferingHintsHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/InvalidKMSResourceException.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/InvalidKMSResourceException.h index ffb6c8d463d..223403a27e2 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/InvalidKMSResourceException.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/InvalidKMSResourceException.h @@ -25,7 +25,7 @@ namespace Model /** *

    Firehose throws this exception when an attempt to put records or to start or - * stop delivery stream encryption fails. This happens when the KMS service throws + * stop Firehose stream encryption fails. This happens when the KMS service throws * one of the following exception types: AccessDeniedException, * InvalidStateException, DisabledException, or * NotFoundException.

    See Also:

    The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream - * used as the source for a delivery stream.

    See Also:

    See Also:

    AWS * API Reference

    */ diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/KinesisStreamSourceDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/KinesisStreamSourceDescription.h index 8bf9e0353be..ae8b8125795 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/KinesisStreamSourceDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/KinesisStreamSourceDescription.h @@ -26,7 +26,7 @@ namespace Model /** *

    Details about a Kinesis data stream used as the source for a Firehose - * delivery stream.

    See Also:

    See Also:

    AWS * API Reference

    */ diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListDeliveryStreamsRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListDeliveryStreamsRequest.h index 71cf7ebd4ab..d23f42406ea 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListDeliveryStreamsRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListDeliveryStreamsRequest.h @@ -37,7 +37,7 @@ namespace Model ///@{ /** - *

    The maximum number of delivery streams to list. The default value is 10.

    + *

    The maximum number of Firehose streams to list. The default value is 10.

    */ inline int GetLimit() const{ return m_limit; } inline bool LimitHasBeenSet() const { return m_limitHasBeenSet; } @@ -47,11 +47,11 @@ namespace Model ///@{ /** - *

    The delivery stream type. This can be one of the following values:

      - *
    • DirectPut: Provider applications access the delivery + *

      The Firehose stream type. This can be one of the following values:

        + *
      • DirectPut: Provider applications access the Firehose * stream directly.

      • KinesisStreamAsSource: The - * delivery stream uses a Kinesis data stream as a source.

      This - * parameter is optional. If this parameter is omitted, delivery streams of all + * Firehose stream uses a Kinesis data stream as a source.

    This + * parameter is optional. If this parameter is omitted, Firehose streams of all * types are returned.

    */ inline const DeliveryStreamType& GetDeliveryStreamType() const{ return m_deliveryStreamType; } @@ -64,8 +64,8 @@ namespace Model ///@{ /** - *

    The list of delivery streams returned by this call to - * ListDeliveryStreams will start with the delivery stream whose name + *

    The list of Firehose streams returned by this call to + * ListDeliveryStreams will start with the Firehose stream whose name * comes alphabetically immediately after the name you specify in * ExclusiveStartDeliveryStreamName.

    */ diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListDeliveryStreamsResult.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListDeliveryStreamsResult.h index ea4e3ba2871..17e4a43e24f 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListDeliveryStreamsResult.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListDeliveryStreamsResult.h @@ -35,7 +35,7 @@ namespace Model ///@{ /** - *

    The names of the delivery streams.

    + *

    The names of the Firehose streams.

    */ inline const Aws::Vector& GetDeliveryStreamNames() const{ return m_deliveryStreamNames; } inline void SetDeliveryStreamNames(const Aws::Vector& value) { m_deliveryStreamNames = value; } @@ -49,7 +49,7 @@ namespace Model ///@{ /** - *

    Indicates whether there are more delivery streams available to list.

    + *

    Indicates whether there are more Firehose streams available to list.

    */ inline bool GetHasMoreDeliveryStreams() const{ return m_hasMoreDeliveryStreams; } inline void SetHasMoreDeliveryStreams(bool value) { m_hasMoreDeliveryStreams = value; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListTagsForDeliveryStreamRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListTagsForDeliveryStreamRequest.h index 9d74377c8ed..f1c2d96cc20 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListTagsForDeliveryStreamRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/ListTagsForDeliveryStreamRequest.h @@ -36,7 +36,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream whose tags you want to list.

    + *

    The name of the Firehose stream whose tags you want to list.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } @@ -67,7 +67,7 @@ namespace Model ///@{ /** *

    The number of tags to return. If this number is less than the total number of - * tags associated with the delivery stream, HasMoreTags is set to + * tags associated with the Firehose stream, HasMoreTags is set to * true in the response. To list additional tags, set * ExclusiveStartTagKey to the last key in the response.

    */ diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/MSKSourceDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/MSKSourceDescription.h index 68caaa8f9e4..06aea9917be 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/MSKSourceDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/MSKSourceDescription.h @@ -27,7 +27,7 @@ namespace Model /** *

    Details about the Amazon MSK cluster used as the source for a Firehose - * delivery stream.

    See Also:

    See Also:

    AWS * API Reference

    */ diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PartitionField.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PartitionField.h new file mode 100644 index 00000000000..1ca35f90b59 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PartitionField.h @@ -0,0 +1,63 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class PartitionField + { + public: + AWS_FIREHOSE_API PartitionField(); + AWS_FIREHOSE_API PartitionField(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API PartitionField& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::String& GetSourceName() const{ return m_sourceName; } + inline bool SourceNameHasBeenSet() const { return m_sourceNameHasBeenSet; } + inline void SetSourceName(const Aws::String& value) { m_sourceNameHasBeenSet = true; m_sourceName = value; } + inline void SetSourceName(Aws::String&& value) { m_sourceNameHasBeenSet = true; m_sourceName = std::move(value); } + inline void SetSourceName(const char* value) { m_sourceNameHasBeenSet = true; m_sourceName.assign(value); } + inline PartitionField& WithSourceName(const Aws::String& value) { SetSourceName(value); return *this;} + inline PartitionField& WithSourceName(Aws::String&& value) { SetSourceName(std::move(value)); return *this;} + inline PartitionField& WithSourceName(const char* value) { SetSourceName(value); return *this;} + ///@} + private: + + Aws::String m_sourceName; + bool m_sourceNameHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PartitionSpec.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PartitionSpec.h new file mode 100644 index 00000000000..918cf882a87 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PartitionSpec.h @@ -0,0 +1,64 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include +#include +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class PartitionSpec + { + public: + AWS_FIREHOSE_API PartitionSpec(); + AWS_FIREHOSE_API PartitionSpec(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API PartitionSpec& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const Aws::Vector& GetIdentity() const{ return m_identity; } + inline bool IdentityHasBeenSet() const { return m_identityHasBeenSet; } + inline void SetIdentity(const Aws::Vector& value) { m_identityHasBeenSet = true; m_identity = value; } + inline void SetIdentity(Aws::Vector&& value) { m_identityHasBeenSet = true; m_identity = std::move(value); } + inline PartitionSpec& WithIdentity(const Aws::Vector& value) { SetIdentity(value); return *this;} + inline PartitionSpec& WithIdentity(Aws::Vector&& value) { SetIdentity(std::move(value)); return *this;} + inline PartitionSpec& AddIdentity(const PartitionField& value) { m_identityHasBeenSet = true; m_identity.push_back(value); return *this; } + inline PartitionSpec& AddIdentity(PartitionField&& value) { m_identityHasBeenSet = true; m_identity.push_back(std::move(value)); return *this; } + ///@} + private: + + Aws::Vector m_identity; + bool m_identityHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordBatchRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordBatchRequest.h index c0f76c02c7d..dfb9cabd179 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordBatchRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordBatchRequest.h @@ -38,7 +38,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordBatchResponseEntry.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordBatchResponseEntry.h index 9bf08e76c41..c9f8e38e173 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordBatchResponseEntry.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordBatchResponseEntry.h @@ -25,8 +25,8 @@ namespace Model /** *

    Contains the result for an individual record from a PutRecordBatch - * request. If the record is successfully added to your delivery stream, it - * receives a record ID. If the record fails to be added to your delivery stream, + * request. If the record is successfully added to your Firehose stream, it + * receives a record ID. If the record fails to be added to your Firehose stream, * the result includes an error code and an error message.

    See Also:

    * AWS diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordRequest.h index 6bb1a72e880..5a28ef2f2c7 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/PutRecordRequest.h @@ -37,7 +37,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/Record.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/Record.h index b68eb6fbdc1..c0b8ddb2418 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/Record.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/Record.h @@ -24,7 +24,7 @@ namespace Model { /** - *

    The unit of data in a delivery stream.

    See Also:

    The unit of data in a Firehose stream.

    See Also:

    AWS API * Reference

    */ diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationConfiguration.h index d7f9aac4fcc..6dfa7c940bc 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationConfiguration.h @@ -161,9 +161,9 @@ namespace Model ///@{ /** - *

    The Amazon S3 backup mode. After you create a delivery stream, you can update + *

    The Amazon S3 backup mode. After you create a Firehose stream, you can update * it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't - * update the delivery stream to disable it.

    + * update the Firehose stream to disable it.

    */ inline const RedshiftS3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } inline bool S3BackupModeHasBeenSet() const { return m_s3BackupModeHasBeenSet; } @@ -187,7 +187,7 @@ namespace Model ///@{ /** - *

    The CloudWatch logging options for your delivery stream.

    + *

    The CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationDescription.h index 5bdcc6252e1..3d133b1709b 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationDescription.h @@ -164,7 +164,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationUpdate.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationUpdate.h index 9b6839c9146..d6ba706160e 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationUpdate.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RedshiftDestinationUpdate.h @@ -159,8 +159,8 @@ namespace Model ///@{ /** - *

    You can update a delivery stream to enable Amazon S3 backup if it is - * disabled. If backup is enabled, you can't update the delivery stream to disable + *

    You can update a Firehose stream to enable Amazon S3 backup if it is + * disabled. If backup is enabled, you can't update the Firehose stream to disable * it.

    */ inline const RedshiftS3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } @@ -185,7 +185,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RetryOptions.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RetryOptions.h index 608550e1f5c..5c0c53597d5 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RetryOptions.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/RetryOptions.h @@ -22,8 +22,8 @@ namespace Model { /** - *

    The retry behavior in case Firehose is unable to deliver data to an Amazon - * S3 prefix.

    See Also:

    The retry behavior in case Firehose is unable to deliver data to a + * destination.

    See Also:

    AWS * API Reference

    */ @@ -39,7 +39,7 @@ namespace Model ///@{ /** *

    The period of time during which Firehose retries to deliver data to the - * specified Amazon S3 prefix.

    + * specified destination.

    */ inline int GetDurationInSeconds() const{ return m_durationInSeconds; } inline bool DurationInSecondsHasBeenSet() const { return m_durationInSecondsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationConfiguration.h index 9ca158b1ba8..244f6168119 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationConfiguration.h @@ -154,7 +154,7 @@ namespace Model ///@{ /** - *

    The CloudWatch logging options for your delivery stream.

    + *

    The CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationDescription.h index e3c33673f9a..a83a106ca0f 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationDescription.h @@ -150,7 +150,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationUpdate.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationUpdate.h index 45790c51855..fe5242903f1 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationUpdate.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/S3DestinationUpdate.h @@ -154,7 +154,7 @@ namespace Model ///@{ /** - *

    The CloudWatch logging options for your delivery stream.

    + *

    The CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SSLMode.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SSLMode.h new file mode 100644 index 00000000000..8454a23f00d --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SSLMode.h @@ -0,0 +1,31 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + enum class SSLMode + { + NOT_SET, + Disabled, + Enabled + }; + +namespace SSLModeMapper +{ +AWS_FIREHOSE_API SSLMode GetSSLModeForName(const Aws::String& name); + +AWS_FIREHOSE_API Aws::String GetNameForSSLMode(SSLMode value); +} // namespace SSLModeMapper +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SchemaEvolutionConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SchemaEvolutionConfiguration.h new file mode 100644 index 00000000000..9a61a46cd9b --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SchemaEvolutionConfiguration.h @@ -0,0 +1,57 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class SchemaEvolutionConfiguration + { + public: + AWS_FIREHOSE_API SchemaEvolutionConfiguration(); + AWS_FIREHOSE_API SchemaEvolutionConfiguration(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API SchemaEvolutionConfiguration& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline bool GetEnabled() const{ return m_enabled; } + inline bool EnabledHasBeenSet() const { return m_enabledHasBeenSet; } + inline void SetEnabled(bool value) { m_enabledHasBeenSet = true; m_enabled = value; } + inline SchemaEvolutionConfiguration& WithEnabled(bool value) { SetEnabled(value); return *this;} + ///@} + private: + + bool m_enabled; + bool m_enabledHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SecretsManagerConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SecretsManagerConfiguration.h index cd9567f2f95..2978b4559d3 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SecretsManagerConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SecretsManagerConfiguration.h @@ -42,7 +42,7 @@ namespace Model /** *

    The ARN of the secret that stores your credentials. It must be in the same * region as the Firehose stream and the role. The secret ARN can reside in a - * different account than the delivery stream and role as Firehose supports + * different account than the Firehose stream and role as Firehose supports * cross-account secret access. This parameter is required when Enabled is * set to True.

    */ @@ -75,8 +75,8 @@ namespace Model ///@{ /** - *

    Specifies whether you want to use the the secrets manager feature. When set - * as True the secrets manager configuration overwrites the existing + *

    Specifies whether you want to use the secrets manager feature. When set as + * True the secrets manager configuration overwrites the existing * secrets in the destination configuration. When it's set to False * Firehose falls back to the credentials in the destination configuration.

    */ diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnapshotRequestedBy.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnapshotRequestedBy.h new file mode 100644 index 00000000000..e6e3b006f0f --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnapshotRequestedBy.h @@ -0,0 +1,31 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + enum class SnapshotRequestedBy + { + NOT_SET, + USER, + FIREHOSE + }; + +namespace SnapshotRequestedByMapper +{ +AWS_FIREHOSE_API SnapshotRequestedBy GetSnapshotRequestedByForName(const Aws::String& name); + +AWS_FIREHOSE_API Aws::String GetNameForSnapshotRequestedBy(SnapshotRequestedBy value); +} // namespace SnapshotRequestedByMapper +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnapshotStatus.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnapshotStatus.h new file mode 100644 index 00000000000..530bb515982 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnapshotStatus.h @@ -0,0 +1,32 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include +#include + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + enum class SnapshotStatus + { + NOT_SET, + IN_PROGRESS, + COMPLETE, + SUSPENDED + }; + +namespace SnapshotStatusMapper +{ +AWS_FIREHOSE_API SnapshotStatus GetSnapshotStatusForName(const Aws::String& name); + +AWS_FIREHOSE_API Aws::String GetNameForSnapshotStatus(SnapshotStatus value); +} // namespace SnapshotStatusMapper +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnowflakeBufferingHints.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnowflakeBufferingHints.h index da9b159c916..3754aa629bb 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnowflakeBufferingHints.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnowflakeBufferingHints.h @@ -39,8 +39,8 @@ namespace Model ///@{ /** - *

    Buffer incoming data to the specified size, in MBs, before delivering it to - * the destination. The default value is 1.

    + *

    Buffer incoming data to the specified size, in MBs, before delivering it to + * the destination. The default value is 128.

    */ inline int GetSizeInMBs() const{ return m_sizeInMBs; } inline bool SizeInMBsHasBeenSet() const { return m_sizeInMBsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnowflakeDestinationUpdate.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnowflakeDestinationUpdate.h index f844d1154c6..f21b748c0bd 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnowflakeDestinationUpdate.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SnowflakeDestinationUpdate.h @@ -273,7 +273,8 @@ namespace Model ///@{ /** - *

    Choose an S3 backup mode

    + *

    Choose an S3 backup mode. Once you set the mode as AllData, you + * can not change it to FailedDataOnly.

    */ inline const SnowflakeS3BackupMode& GetS3BackupMode() const{ return m_s3BackupMode; } inline bool S3BackupModeHasBeenSet() const { return m_s3BackupModeHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SourceDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SourceDescription.h index 27ac1d11e1f..32c47dacecf 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SourceDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SourceDescription.h @@ -7,6 +7,7 @@ #include #include #include +#include #include namespace Aws @@ -26,7 +27,7 @@ namespace Model /** *

    Details about a Kinesis data stream used as the source for a Firehose - * delivery stream.

    See Also:

    See Also:

    AWS * API Reference

    */ @@ -64,6 +65,19 @@ namespace Model inline SourceDescription& WithMSKSourceDescription(const MSKSourceDescription& value) { SetMSKSourceDescription(value); return *this;} inline SourceDescription& WithMSKSourceDescription(MSKSourceDescription&& value) { SetMSKSourceDescription(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline const DatabaseSourceDescription& GetDatabaseSourceDescription() const{ return m_databaseSourceDescription; } + inline bool DatabaseSourceDescriptionHasBeenSet() const { return m_databaseSourceDescriptionHasBeenSet; } + inline void SetDatabaseSourceDescription(const DatabaseSourceDescription& value) { m_databaseSourceDescriptionHasBeenSet = true; m_databaseSourceDescription = value; } + inline void SetDatabaseSourceDescription(DatabaseSourceDescription&& value) { m_databaseSourceDescriptionHasBeenSet = true; m_databaseSourceDescription = std::move(value); } + inline SourceDescription& WithDatabaseSourceDescription(const DatabaseSourceDescription& value) { SetDatabaseSourceDescription(value); return *this;} + inline SourceDescription& WithDatabaseSourceDescription(DatabaseSourceDescription&& value) { SetDatabaseSourceDescription(std::move(value)); return *this;} + ///@} private: KinesisStreamSourceDescription m_kinesisStreamSourceDescription; @@ -71,6 +85,9 @@ namespace Model MSKSourceDescription m_mSKSourceDescription; bool m_mSKSourceDescriptionHasBeenSet = false; + + DatabaseSourceDescription m_databaseSourceDescription; + bool m_databaseSourceDescriptionHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationConfiguration.h index a3c9bce0d4f..4a2c3b42d1b 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationConfiguration.h @@ -159,7 +159,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationDescription.h index 606669a04b0..85ce33f09f4 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationDescription.h @@ -156,7 +156,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationUpdate.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationUpdate.h index 676cc01734d..1ea218952f7 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationUpdate.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/SplunkDestinationUpdate.h @@ -159,7 +159,7 @@ namespace Model ///@{ /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    */ inline const CloudWatchLoggingOptions& GetCloudWatchLoggingOptions() const{ return m_cloudWatchLoggingOptions; } inline bool CloudWatchLoggingOptionsHasBeenSet() const { return m_cloudWatchLoggingOptionsHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/StartDeliveryStreamEncryptionRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/StartDeliveryStreamEncryptionRequest.h index a4f5d9b1909..1c5fad1ff1a 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/StartDeliveryStreamEncryptionRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/StartDeliveryStreamEncryptionRequest.h @@ -37,7 +37,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream for which you want to enable server-side + *

    The name of the Firehose stream for which you want to enable server-side * encryption (SSE).

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/StopDeliveryStreamEncryptionRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/StopDeliveryStreamEncryptionRequest.h index 1298914d2cc..f3688d84e79 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/StopDeliveryStreamEncryptionRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/StopDeliveryStreamEncryptionRequest.h @@ -36,7 +36,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream for which you want to disable server-side + *

    The name of the Firehose stream for which you want to disable server-side * encryption (SSE).

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/TableCreationConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/TableCreationConfiguration.h new file mode 100644 index 00000000000..65b40c32e06 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/TableCreationConfiguration.h @@ -0,0 +1,57 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include + +namespace Aws +{ +namespace Utils +{ +namespace Json +{ + class JsonValue; + class JsonView; +} // namespace Json +} // namespace Utils +namespace Firehose +{ +namespace Model +{ + + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    See Also:

    AWS + * API Reference

    + */ + class TableCreationConfiguration + { + public: + AWS_FIREHOSE_API TableCreationConfiguration(); + AWS_FIREHOSE_API TableCreationConfiguration(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API TableCreationConfiguration& operator=(Aws::Utils::Json::JsonView jsonValue); + AWS_FIREHOSE_API Aws::Utils::Json::JsonValue Jsonize() const; + + + ///@{ + /** + *

    Amazon Data Firehose is in preview release and is subject to + * change.

    + */ + inline bool GetEnabled() const{ return m_enabled; } + inline bool EnabledHasBeenSet() const { return m_enabledHasBeenSet; } + inline void SetEnabled(bool value) { m_enabledHasBeenSet = true; m_enabled = value; } + inline TableCreationConfiguration& WithEnabled(bool value) { SetEnabled(value); return *this;} + ///@} + private: + + bool m_enabled; + bool m_enabledHasBeenSet = false; + }; + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/Tag.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/Tag.h index 1366aa5267e..27feebd8ee7 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/Tag.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/Tag.h @@ -24,7 +24,7 @@ namespace Model { /** - *

    Metadata that you can assign to a delivery stream, consisting of a key-value + *

    Metadata that you can assign to a Firehose stream, consisting of a key-value * pair.

    See Also:

    AWS API * Reference

    diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/TagDeliveryStreamRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/TagDeliveryStreamRequest.h index 63956d56029..83f6ca47871 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/TagDeliveryStreamRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/TagDeliveryStreamRequest.h @@ -38,7 +38,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream to which you want to add the tags.

    + *

    The name of the Firehose stream to which you want to add the tags.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/UntagDeliveryStreamRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/UntagDeliveryStreamRequest.h index 693d8d57aef..bdc8f227186 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/UntagDeliveryStreamRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/UntagDeliveryStreamRequest.h @@ -37,7 +37,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/UpdateDestinationRequest.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/UpdateDestinationRequest.h index 2f259391825..39f5a4d664a 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/UpdateDestinationRequest.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/UpdateDestinationRequest.h @@ -45,7 +45,7 @@ namespace Model ///@{ /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    */ inline const Aws::String& GetDeliveryStreamName() const{ return m_deliveryStreamName; } inline bool DeliveryStreamNameHasBeenSet() const { return m_deliveryStreamNameHasBeenSet; } @@ -190,7 +190,6 @@ namespace Model ///@{ /** *

    Describes an update for a destination in Apache Iceberg Tables.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    */ inline const IcebergDestinationUpdate& GetIcebergDestinationUpdate() const{ return m_icebergDestinationUpdate; } inline bool IcebergDestinationUpdateHasBeenSet() const { return m_icebergDestinationUpdateHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/VpcConfiguration.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/VpcConfiguration.h index a6c90c8887a..700071459f3 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/VpcConfiguration.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/VpcConfiguration.h @@ -51,7 +51,7 @@ namespace Model * based on throughput. To enable Firehose to scale up the number of ENIs to match * throughput, ensure that you have sufficient quota. To help you calculate the * quota you need, assume that Firehose can create up to three ENIs for this - * delivery stream for each of the subnets specified here. For more information + * Firehose stream for each of the subnets specified here. For more information * about ENI quota, see Network * Interfaces in the Amazon VPC Quotas topic.

    @@ -69,7 +69,7 @@ namespace Model ///@{ /** - *

    The ARN of the IAM role that you want the delivery stream to use to create + *

    The ARN of the IAM role that you want the Firehose stream to use to create * endpoints in the destination VPC. You can use your existing Firehose delivery * role or you can specify a new role. In either case, make sure that the role * trusts the Firehose service principal and that it grants the following diff --git a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/VpcConfigurationDescription.h b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/VpcConfigurationDescription.h index 0c65679d8e0..82fa3a062bd 100644 --- a/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/VpcConfigurationDescription.h +++ b/generated/src/aws-cpp-sdk-firehose/include/aws/firehose/model/VpcConfigurationDescription.h @@ -50,7 +50,7 @@ namespace Model * the subnets specified here scales up and down automatically based on throughput. * To enable Firehose to scale up the number of ENIs to match throughput, ensure * that you have sufficient quota. To help you calculate the quota you need, assume - * that Firehose can create up to three ENIs for this delivery stream for each of + * that Firehose can create up to three ENIs for this Firehose stream for each of * the subnets specified here. For more information about ENI quota, see Network * Interfaces in the Amazon VPC Quotas topic.

    @@ -68,7 +68,7 @@ namespace Model ///@{ /** - *

    The ARN of the IAM role that the delivery stream uses to create endpoints in + *

    The ARN of the IAM role that the Firehose stream uses to create endpoints in * the destination VPC. You can use your existing Firehose delivery role or you can * specify a new role. In either case, make sure that the role trusts the Firehose * service principal and that it grants the following permissions:

    • @@ -80,7 +80,7 @@ namespace Model * ec2:CreateNetworkInterface

    • * ec2:CreateNetworkInterfacePermission

    • * ec2:DeleteNetworkInterface

    If you revoke these - * permissions after you create the delivery stream, Firehose can't scale out by + * permissions after you create the Firehose stream, Firehose can't scale out by * creating more ENIs when necessary. You might therefore see a degradation in * performance.

    */ @@ -102,7 +102,7 @@ namespace Model * groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's * security group. Also ensure that the Amazon ES domain's security group allows * HTTPS traffic from the security groups specified here. If you use the same - * security group for both your delivery stream and the Amazon ES domain, make sure + * security group for both your Firehose stream and the Amazon ES domain, make sure * the security group inbound rule allows HTTPS traffic. For more information about * security group rules, see Security diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/CatalogConfiguration.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/CatalogConfiguration.cpp index cbb6daf5cb0..010f3f5745a 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/CatalogConfiguration.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/CatalogConfiguration.cpp @@ -19,7 +19,8 @@ namespace Model { CatalogConfiguration::CatalogConfiguration() : - m_catalogARNHasBeenSet(false) + m_catalogARNHasBeenSet(false), + m_warehouseLocationHasBeenSet(false) { } @@ -38,6 +39,13 @@ CatalogConfiguration& CatalogConfiguration::operator =(JsonView jsonValue) m_catalogARNHasBeenSet = true; } + if(jsonValue.ValueExists("WarehouseLocation")) + { + m_warehouseLocation = jsonValue.GetString("WarehouseLocation"); + + m_warehouseLocationHasBeenSet = true; + } + return *this; } @@ -51,6 +59,12 @@ JsonValue CatalogConfiguration::Jsonize() const } + if(m_warehouseLocationHasBeenSet) + { + payload.WithString("WarehouseLocation", m_warehouseLocation); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/CreateDeliveryStreamRequest.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/CreateDeliveryStreamRequest.cpp index d822ff3829e..75e9853e7db 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/CreateDeliveryStreamRequest.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/CreateDeliveryStreamRequest.cpp @@ -28,7 +28,8 @@ CreateDeliveryStreamRequest::CreateDeliveryStreamRequest() : m_amazonOpenSearchServerlessDestinationConfigurationHasBeenSet(false), m_mSKSourceConfigurationHasBeenSet(false), m_snowflakeDestinationConfigurationHasBeenSet(false), - m_icebergDestinationConfigurationHasBeenSet(false) + m_icebergDestinationConfigurationHasBeenSet(false), + m_databaseSourceConfigurationHasBeenSet(false) { } @@ -130,6 +131,12 @@ Aws::String CreateDeliveryStreamRequest::SerializePayload() const } + if(m_databaseSourceConfigurationHasBeenSet) + { + payload.WithObject("DatabaseSourceConfiguration", m_databaseSourceConfiguration.Jsonize()); + + } + return payload.View().WriteReadable(); } diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseColumnList.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseColumnList.cpp new file mode 100644 index 00000000000..0ca21bf170b --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseColumnList.cpp @@ -0,0 +1,89 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +DatabaseColumnList::DatabaseColumnList() : + m_includeHasBeenSet(false), + m_excludeHasBeenSet(false) +{ +} + +DatabaseColumnList::DatabaseColumnList(JsonView jsonValue) + : DatabaseColumnList() +{ + *this = jsonValue; +} + +DatabaseColumnList& DatabaseColumnList::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Include")) + { + Aws::Utils::Array includeJsonList = jsonValue.GetArray("Include"); + for(unsigned includeIndex = 0; includeIndex < includeJsonList.GetLength(); ++includeIndex) + { + m_include.push_back(includeJsonList[includeIndex].AsString()); + } + m_includeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Exclude")) + { + Aws::Utils::Array excludeJsonList = jsonValue.GetArray("Exclude"); + for(unsigned excludeIndex = 0; excludeIndex < excludeJsonList.GetLength(); ++excludeIndex) + { + m_exclude.push_back(excludeJsonList[excludeIndex].AsString()); + } + m_excludeHasBeenSet = true; + } + + return *this; +} + +JsonValue DatabaseColumnList::Jsonize() const +{ + JsonValue payload; + + if(m_includeHasBeenSet) + { + Aws::Utils::Array includeJsonList(m_include.size()); + for(unsigned includeIndex = 0; includeIndex < includeJsonList.GetLength(); ++includeIndex) + { + includeJsonList[includeIndex].AsString(m_include[includeIndex]); + } + payload.WithArray("Include", std::move(includeJsonList)); + + } + + if(m_excludeHasBeenSet) + { + Aws::Utils::Array excludeJsonList(m_exclude.size()); + for(unsigned excludeIndex = 0; excludeIndex < excludeJsonList.GetLength(); ++excludeIndex) + { + excludeJsonList[excludeIndex].AsString(m_exclude[excludeIndex]); + } + payload.WithArray("Exclude", std::move(excludeJsonList)); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseList.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseList.cpp new file mode 100644 index 00000000000..fb38ab0318e --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseList.cpp @@ -0,0 +1,89 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +DatabaseList::DatabaseList() : + m_includeHasBeenSet(false), + m_excludeHasBeenSet(false) +{ +} + +DatabaseList::DatabaseList(JsonView jsonValue) + : DatabaseList() +{ + *this = jsonValue; +} + +DatabaseList& DatabaseList::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Include")) + { + Aws::Utils::Array includeJsonList = jsonValue.GetArray("Include"); + for(unsigned includeIndex = 0; includeIndex < includeJsonList.GetLength(); ++includeIndex) + { + m_include.push_back(includeJsonList[includeIndex].AsString()); + } + m_includeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Exclude")) + { + Aws::Utils::Array excludeJsonList = jsonValue.GetArray("Exclude"); + for(unsigned excludeIndex = 0; excludeIndex < excludeJsonList.GetLength(); ++excludeIndex) + { + m_exclude.push_back(excludeJsonList[excludeIndex].AsString()); + } + m_excludeHasBeenSet = true; + } + + return *this; +} + +JsonValue DatabaseList::Jsonize() const +{ + JsonValue payload; + + if(m_includeHasBeenSet) + { + Aws::Utils::Array includeJsonList(m_include.size()); + for(unsigned includeIndex = 0; includeIndex < includeJsonList.GetLength(); ++includeIndex) + { + includeJsonList[includeIndex].AsString(m_include[includeIndex]); + } + payload.WithArray("Include", std::move(includeJsonList)); + + } + + if(m_excludeHasBeenSet) + { + Aws::Utils::Array excludeJsonList(m_exclude.size()); + for(unsigned excludeIndex = 0; excludeIndex < excludeJsonList.GetLength(); ++excludeIndex) + { + excludeJsonList[excludeIndex].AsString(m_exclude[excludeIndex]); + } + payload.WithArray("Exclude", std::move(excludeJsonList)); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSnapshotInfo.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSnapshotInfo.cpp new file mode 100644 index 00000000000..99757c64ca4 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSnapshotInfo.cpp @@ -0,0 +1,128 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +DatabaseSnapshotInfo::DatabaseSnapshotInfo() : + m_idHasBeenSet(false), + m_tableHasBeenSet(false), + m_requestTimestampHasBeenSet(false), + m_requestedBy(SnapshotRequestedBy::NOT_SET), + m_requestedByHasBeenSet(false), + m_status(SnapshotStatus::NOT_SET), + m_statusHasBeenSet(false), + m_failureDescriptionHasBeenSet(false) +{ +} + +DatabaseSnapshotInfo::DatabaseSnapshotInfo(JsonView jsonValue) + : DatabaseSnapshotInfo() +{ + *this = jsonValue; +} + +DatabaseSnapshotInfo& DatabaseSnapshotInfo::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Id")) + { + m_id = jsonValue.GetString("Id"); + + m_idHasBeenSet = true; + } + + if(jsonValue.ValueExists("Table")) + { + m_table = jsonValue.GetString("Table"); + + m_tableHasBeenSet = true; + } + + if(jsonValue.ValueExists("RequestTimestamp")) + { + m_requestTimestamp = jsonValue.GetDouble("RequestTimestamp"); + + m_requestTimestampHasBeenSet = true; + } + + if(jsonValue.ValueExists("RequestedBy")) + { + m_requestedBy = SnapshotRequestedByMapper::GetSnapshotRequestedByForName(jsonValue.GetString("RequestedBy")); + + m_requestedByHasBeenSet = true; + } + + if(jsonValue.ValueExists("Status")) + { + m_status = SnapshotStatusMapper::GetSnapshotStatusForName(jsonValue.GetString("Status")); + + m_statusHasBeenSet = true; + } + + if(jsonValue.ValueExists("FailureDescription")) + { + m_failureDescription = jsonValue.GetObject("FailureDescription"); + + m_failureDescriptionHasBeenSet = true; + } + + return *this; +} + +JsonValue DatabaseSnapshotInfo::Jsonize() const +{ + JsonValue payload; + + if(m_idHasBeenSet) + { + payload.WithString("Id", m_id); + + } + + if(m_tableHasBeenSet) + { + payload.WithString("Table", m_table); + + } + + if(m_requestTimestampHasBeenSet) + { + payload.WithDouble("RequestTimestamp", m_requestTimestamp.SecondsWithMSPrecision()); + } + + if(m_requestedByHasBeenSet) + { + payload.WithString("RequestedBy", SnapshotRequestedByMapper::GetNameForSnapshotRequestedBy(m_requestedBy)); + } + + if(m_statusHasBeenSet) + { + payload.WithString("Status", SnapshotStatusMapper::GetNameForSnapshotStatus(m_status)); + } + + if(m_failureDescriptionHasBeenSet) + { + payload.WithObject("FailureDescription", m_failureDescription.Jsonize()); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceAuthenticationConfiguration.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceAuthenticationConfiguration.cpp new file mode 100644 index 00000000000..4e0f62aa19c --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceAuthenticationConfiguration.cpp @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +DatabaseSourceAuthenticationConfiguration::DatabaseSourceAuthenticationConfiguration() : + m_secretsManagerConfigurationHasBeenSet(false) +{ +} + +DatabaseSourceAuthenticationConfiguration::DatabaseSourceAuthenticationConfiguration(JsonView jsonValue) + : DatabaseSourceAuthenticationConfiguration() +{ + *this = jsonValue; +} + +DatabaseSourceAuthenticationConfiguration& DatabaseSourceAuthenticationConfiguration::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("SecretsManagerConfiguration")) + { + m_secretsManagerConfiguration = jsonValue.GetObject("SecretsManagerConfiguration"); + + m_secretsManagerConfigurationHasBeenSet = true; + } + + return *this; +} + +JsonValue DatabaseSourceAuthenticationConfiguration::Jsonize() const +{ + JsonValue payload; + + if(m_secretsManagerConfigurationHasBeenSet) + { + payload.WithObject("SecretsManagerConfiguration", m_secretsManagerConfiguration.Jsonize()); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceConfiguration.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceConfiguration.cpp new file mode 100644 index 00000000000..fea0fdaa361 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceConfiguration.cpp @@ -0,0 +1,208 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +DatabaseSourceConfiguration::DatabaseSourceConfiguration() : + m_type(DatabaseType::NOT_SET), + m_typeHasBeenSet(false), + m_endpointHasBeenSet(false), + m_port(0), + m_portHasBeenSet(false), + m_sSLMode(SSLMode::NOT_SET), + m_sSLModeHasBeenSet(false), + m_databasesHasBeenSet(false), + m_tablesHasBeenSet(false), + m_columnsHasBeenSet(false), + m_surrogateKeysHasBeenSet(false), + m_snapshotWatermarkTableHasBeenSet(false), + m_databaseSourceAuthenticationConfigurationHasBeenSet(false), + m_databaseSourceVPCConfigurationHasBeenSet(false) +{ +} + +DatabaseSourceConfiguration::DatabaseSourceConfiguration(JsonView jsonValue) + : DatabaseSourceConfiguration() +{ + *this = jsonValue; +} + +DatabaseSourceConfiguration& DatabaseSourceConfiguration::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Type")) + { + m_type = DatabaseTypeMapper::GetDatabaseTypeForName(jsonValue.GetString("Type")); + + m_typeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Endpoint")) + { + m_endpoint = jsonValue.GetString("Endpoint"); + + m_endpointHasBeenSet = true; + } + + if(jsonValue.ValueExists("Port")) + { + m_port = jsonValue.GetInteger("Port"); + + m_portHasBeenSet = true; + } + + if(jsonValue.ValueExists("SSLMode")) + { + m_sSLMode = SSLModeMapper::GetSSLModeForName(jsonValue.GetString("SSLMode")); + + m_sSLModeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Databases")) + { + m_databases = jsonValue.GetObject("Databases"); + + m_databasesHasBeenSet = true; + } + + if(jsonValue.ValueExists("Tables")) + { + m_tables = jsonValue.GetObject("Tables"); + + m_tablesHasBeenSet = true; + } + + if(jsonValue.ValueExists("Columns")) + { + m_columns = jsonValue.GetObject("Columns"); + + m_columnsHasBeenSet = true; + } + + if(jsonValue.ValueExists("SurrogateKeys")) + { + Aws::Utils::Array surrogateKeysJsonList = jsonValue.GetArray("SurrogateKeys"); + for(unsigned surrogateKeysIndex = 0; surrogateKeysIndex < surrogateKeysJsonList.GetLength(); ++surrogateKeysIndex) + { + m_surrogateKeys.push_back(surrogateKeysJsonList[surrogateKeysIndex].AsString()); + } + m_surrogateKeysHasBeenSet = true; + } + + if(jsonValue.ValueExists("SnapshotWatermarkTable")) + { + m_snapshotWatermarkTable = jsonValue.GetString("SnapshotWatermarkTable"); + + m_snapshotWatermarkTableHasBeenSet = true; + } + + if(jsonValue.ValueExists("DatabaseSourceAuthenticationConfiguration")) + { + m_databaseSourceAuthenticationConfiguration = jsonValue.GetObject("DatabaseSourceAuthenticationConfiguration"); + + m_databaseSourceAuthenticationConfigurationHasBeenSet = true; + } + + if(jsonValue.ValueExists("DatabaseSourceVPCConfiguration")) + { + m_databaseSourceVPCConfiguration = jsonValue.GetObject("DatabaseSourceVPCConfiguration"); + + m_databaseSourceVPCConfigurationHasBeenSet = true; + } + + return *this; +} + +JsonValue DatabaseSourceConfiguration::Jsonize() const +{ + JsonValue payload; + + if(m_typeHasBeenSet) + { + payload.WithString("Type", DatabaseTypeMapper::GetNameForDatabaseType(m_type)); + } + + if(m_endpointHasBeenSet) + { + payload.WithString("Endpoint", m_endpoint); + + } + + if(m_portHasBeenSet) + { + payload.WithInteger("Port", m_port); + + } + + if(m_sSLModeHasBeenSet) + { + payload.WithString("SSLMode", SSLModeMapper::GetNameForSSLMode(m_sSLMode)); + } + + if(m_databasesHasBeenSet) + { + payload.WithObject("Databases", m_databases.Jsonize()); + + } + + if(m_tablesHasBeenSet) + { + payload.WithObject("Tables", m_tables.Jsonize()); + + } + + if(m_columnsHasBeenSet) + { + payload.WithObject("Columns", m_columns.Jsonize()); + + } + + if(m_surrogateKeysHasBeenSet) + { + Aws::Utils::Array surrogateKeysJsonList(m_surrogateKeys.size()); + for(unsigned surrogateKeysIndex = 0; surrogateKeysIndex < surrogateKeysJsonList.GetLength(); ++surrogateKeysIndex) + { + surrogateKeysJsonList[surrogateKeysIndex].AsString(m_surrogateKeys[surrogateKeysIndex]); + } + payload.WithArray("SurrogateKeys", std::move(surrogateKeysJsonList)); + + } + + if(m_snapshotWatermarkTableHasBeenSet) + { + payload.WithString("SnapshotWatermarkTable", m_snapshotWatermarkTable); + + } + + if(m_databaseSourceAuthenticationConfigurationHasBeenSet) + { + payload.WithObject("DatabaseSourceAuthenticationConfiguration", m_databaseSourceAuthenticationConfiguration.Jsonize()); + + } + + if(m_databaseSourceVPCConfigurationHasBeenSet) + { + payload.WithObject("DatabaseSourceVPCConfiguration", m_databaseSourceVPCConfiguration.Jsonize()); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceDescription.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceDescription.cpp new file mode 100644 index 00000000000..c02d50723fe --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceDescription.cpp @@ -0,0 +1,230 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +DatabaseSourceDescription::DatabaseSourceDescription() : + m_type(DatabaseType::NOT_SET), + m_typeHasBeenSet(false), + m_endpointHasBeenSet(false), + m_port(0), + m_portHasBeenSet(false), + m_sSLMode(SSLMode::NOT_SET), + m_sSLModeHasBeenSet(false), + m_databasesHasBeenSet(false), + m_tablesHasBeenSet(false), + m_columnsHasBeenSet(false), + m_surrogateKeysHasBeenSet(false), + m_snapshotWatermarkTableHasBeenSet(false), + m_snapshotInfoHasBeenSet(false), + m_databaseSourceAuthenticationConfigurationHasBeenSet(false), + m_databaseSourceVPCConfigurationHasBeenSet(false) +{ +} + +DatabaseSourceDescription::DatabaseSourceDescription(JsonView jsonValue) + : DatabaseSourceDescription() +{ + *this = jsonValue; +} + +DatabaseSourceDescription& DatabaseSourceDescription::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Type")) + { + m_type = DatabaseTypeMapper::GetDatabaseTypeForName(jsonValue.GetString("Type")); + + m_typeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Endpoint")) + { + m_endpoint = jsonValue.GetString("Endpoint"); + + m_endpointHasBeenSet = true; + } + + if(jsonValue.ValueExists("Port")) + { + m_port = jsonValue.GetInteger("Port"); + + m_portHasBeenSet = true; + } + + if(jsonValue.ValueExists("SSLMode")) + { + m_sSLMode = SSLModeMapper::GetSSLModeForName(jsonValue.GetString("SSLMode")); + + m_sSLModeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Databases")) + { + m_databases = jsonValue.GetObject("Databases"); + + m_databasesHasBeenSet = true; + } + + if(jsonValue.ValueExists("Tables")) + { + m_tables = jsonValue.GetObject("Tables"); + + m_tablesHasBeenSet = true; + } + + if(jsonValue.ValueExists("Columns")) + { + m_columns = jsonValue.GetObject("Columns"); + + m_columnsHasBeenSet = true; + } + + if(jsonValue.ValueExists("SurrogateKeys")) + { + Aws::Utils::Array surrogateKeysJsonList = jsonValue.GetArray("SurrogateKeys"); + for(unsigned surrogateKeysIndex = 0; surrogateKeysIndex < surrogateKeysJsonList.GetLength(); ++surrogateKeysIndex) + { + m_surrogateKeys.push_back(surrogateKeysJsonList[surrogateKeysIndex].AsString()); + } + m_surrogateKeysHasBeenSet = true; + } + + if(jsonValue.ValueExists("SnapshotWatermarkTable")) + { + m_snapshotWatermarkTable = jsonValue.GetString("SnapshotWatermarkTable"); + + m_snapshotWatermarkTableHasBeenSet = true; + } + + if(jsonValue.ValueExists("SnapshotInfo")) + { + Aws::Utils::Array snapshotInfoJsonList = jsonValue.GetArray("SnapshotInfo"); + for(unsigned snapshotInfoIndex = 0; snapshotInfoIndex < snapshotInfoJsonList.GetLength(); ++snapshotInfoIndex) + { + m_snapshotInfo.push_back(snapshotInfoJsonList[snapshotInfoIndex].AsObject()); + } + m_snapshotInfoHasBeenSet = true; + } + + if(jsonValue.ValueExists("DatabaseSourceAuthenticationConfiguration")) + { + m_databaseSourceAuthenticationConfiguration = jsonValue.GetObject("DatabaseSourceAuthenticationConfiguration"); + + m_databaseSourceAuthenticationConfigurationHasBeenSet = true; + } + + if(jsonValue.ValueExists("DatabaseSourceVPCConfiguration")) + { + m_databaseSourceVPCConfiguration = jsonValue.GetObject("DatabaseSourceVPCConfiguration"); + + m_databaseSourceVPCConfigurationHasBeenSet = true; + } + + return *this; +} + +JsonValue DatabaseSourceDescription::Jsonize() const +{ + JsonValue payload; + + if(m_typeHasBeenSet) + { + payload.WithString("Type", DatabaseTypeMapper::GetNameForDatabaseType(m_type)); + } + + if(m_endpointHasBeenSet) + { + payload.WithString("Endpoint", m_endpoint); + + } + + if(m_portHasBeenSet) + { + payload.WithInteger("Port", m_port); + + } + + if(m_sSLModeHasBeenSet) + { + payload.WithString("SSLMode", SSLModeMapper::GetNameForSSLMode(m_sSLMode)); + } + + if(m_databasesHasBeenSet) + { + payload.WithObject("Databases", m_databases.Jsonize()); + + } + + if(m_tablesHasBeenSet) + { + payload.WithObject("Tables", m_tables.Jsonize()); + + } + + if(m_columnsHasBeenSet) + { + payload.WithObject("Columns", m_columns.Jsonize()); + + } + + if(m_surrogateKeysHasBeenSet) + { + Aws::Utils::Array surrogateKeysJsonList(m_surrogateKeys.size()); + for(unsigned surrogateKeysIndex = 0; surrogateKeysIndex < surrogateKeysJsonList.GetLength(); ++surrogateKeysIndex) + { + surrogateKeysJsonList[surrogateKeysIndex].AsString(m_surrogateKeys[surrogateKeysIndex]); + } + payload.WithArray("SurrogateKeys", std::move(surrogateKeysJsonList)); + + } + + if(m_snapshotWatermarkTableHasBeenSet) + { + payload.WithString("SnapshotWatermarkTable", m_snapshotWatermarkTable); + + } + + if(m_snapshotInfoHasBeenSet) + { + Aws::Utils::Array snapshotInfoJsonList(m_snapshotInfo.size()); + for(unsigned snapshotInfoIndex = 0; snapshotInfoIndex < snapshotInfoJsonList.GetLength(); ++snapshotInfoIndex) + { + snapshotInfoJsonList[snapshotInfoIndex].AsObject(m_snapshotInfo[snapshotInfoIndex].Jsonize()); + } + payload.WithArray("SnapshotInfo", std::move(snapshotInfoJsonList)); + + } + + if(m_databaseSourceAuthenticationConfigurationHasBeenSet) + { + payload.WithObject("DatabaseSourceAuthenticationConfiguration", m_databaseSourceAuthenticationConfiguration.Jsonize()); + + } + + if(m_databaseSourceVPCConfigurationHasBeenSet) + { + payload.WithObject("DatabaseSourceVPCConfiguration", m_databaseSourceVPCConfiguration.Jsonize()); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceVPCConfiguration.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceVPCConfiguration.cpp new file mode 100644 index 00000000000..832e8ad2fda --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseSourceVPCConfiguration.cpp @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +DatabaseSourceVPCConfiguration::DatabaseSourceVPCConfiguration() : + m_vpcEndpointServiceNameHasBeenSet(false) +{ +} + +DatabaseSourceVPCConfiguration::DatabaseSourceVPCConfiguration(JsonView jsonValue) + : DatabaseSourceVPCConfiguration() +{ + *this = jsonValue; +} + +DatabaseSourceVPCConfiguration& DatabaseSourceVPCConfiguration::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("VpcEndpointServiceName")) + { + m_vpcEndpointServiceName = jsonValue.GetString("VpcEndpointServiceName"); + + m_vpcEndpointServiceNameHasBeenSet = true; + } + + return *this; +} + +JsonValue DatabaseSourceVPCConfiguration::Jsonize() const +{ + JsonValue payload; + + if(m_vpcEndpointServiceNameHasBeenSet) + { + payload.WithString("VpcEndpointServiceName", m_vpcEndpointServiceName); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseTableList.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseTableList.cpp new file mode 100644 index 00000000000..d010d365c60 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseTableList.cpp @@ -0,0 +1,89 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +DatabaseTableList::DatabaseTableList() : + m_includeHasBeenSet(false), + m_excludeHasBeenSet(false) +{ +} + +DatabaseTableList::DatabaseTableList(JsonView jsonValue) + : DatabaseTableList() +{ + *this = jsonValue; +} + +DatabaseTableList& DatabaseTableList::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Include")) + { + Aws::Utils::Array includeJsonList = jsonValue.GetArray("Include"); + for(unsigned includeIndex = 0; includeIndex < includeJsonList.GetLength(); ++includeIndex) + { + m_include.push_back(includeJsonList[includeIndex].AsString()); + } + m_includeHasBeenSet = true; + } + + if(jsonValue.ValueExists("Exclude")) + { + Aws::Utils::Array excludeJsonList = jsonValue.GetArray("Exclude"); + for(unsigned excludeIndex = 0; excludeIndex < excludeJsonList.GetLength(); ++excludeIndex) + { + m_exclude.push_back(excludeJsonList[excludeIndex].AsString()); + } + m_excludeHasBeenSet = true; + } + + return *this; +} + +JsonValue DatabaseTableList::Jsonize() const +{ + JsonValue payload; + + if(m_includeHasBeenSet) + { + Aws::Utils::Array includeJsonList(m_include.size()); + for(unsigned includeIndex = 0; includeIndex < includeJsonList.GetLength(); ++includeIndex) + { + includeJsonList[includeIndex].AsString(m_include[includeIndex]); + } + payload.WithArray("Include", std::move(includeJsonList)); + + } + + if(m_excludeHasBeenSet) + { + Aws::Utils::Array excludeJsonList(m_exclude.size()); + for(unsigned excludeIndex = 0; excludeIndex < excludeJsonList.GetLength(); ++excludeIndex) + { + excludeJsonList[excludeIndex].AsString(m_exclude[excludeIndex]); + } + payload.WithArray("Exclude", std::move(excludeJsonList)); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseType.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseType.cpp new file mode 100644 index 00000000000..8cd5483722a --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DatabaseType.cpp @@ -0,0 +1,72 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include +#include +#include + +using namespace Aws::Utils; + + +namespace Aws +{ + namespace Firehose + { + namespace Model + { + namespace DatabaseTypeMapper + { + + static const int MySQL_HASH = HashingUtils::HashString("MySQL"); + static const int PostgreSQL_HASH = HashingUtils::HashString("PostgreSQL"); + + + DatabaseType GetDatabaseTypeForName(const Aws::String& name) + { + int hashCode = HashingUtils::HashString(name.c_str()); + if (hashCode == MySQL_HASH) + { + return DatabaseType::MySQL; + } + else if (hashCode == PostgreSQL_HASH) + { + return DatabaseType::PostgreSQL; + } + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + overflowContainer->StoreOverflow(hashCode, name); + return static_cast(hashCode); + } + + return DatabaseType::NOT_SET; + } + + Aws::String GetNameForDatabaseType(DatabaseType enumValue) + { + switch(enumValue) + { + case DatabaseType::NOT_SET: + return {}; + case DatabaseType::MySQL: + return "MySQL"; + case DatabaseType::PostgreSQL: + return "PostgreSQL"; + default: + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + return overflowContainer->RetrieveOverflow(static_cast(enumValue)); + } + + return {}; + } + } + + } // namespace DatabaseTypeMapper + } // namespace Model + } // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DeliveryStreamFailureType.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DeliveryStreamFailureType.cpp index c5e39c2fb32..85cbf5cf5d4 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/DeliveryStreamFailureType.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DeliveryStreamFailureType.cpp @@ -20,6 +20,8 @@ namespace Aws namespace DeliveryStreamFailureTypeMapper { + static const int VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND_HASH = HashingUtils::HashString("VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND"); + static const int VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED_HASH = HashingUtils::HashString("VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED"); static const int RETIRE_KMS_GRANT_FAILED_HASH = HashingUtils::HashString("RETIRE_KMS_GRANT_FAILED"); static const int CREATE_KMS_GRANT_FAILED_HASH = HashingUtils::HashString("CREATE_KMS_GRANT_FAILED"); static const int KMS_ACCESS_DENIED_HASH = HashingUtils::HashString("KMS_ACCESS_DENIED"); @@ -40,7 +42,15 @@ namespace Aws DeliveryStreamFailureType GetDeliveryStreamFailureTypeForName(const Aws::String& name) { int hashCode = HashingUtils::HashString(name.c_str()); - if (hashCode == RETIRE_KMS_GRANT_FAILED_HASH) + if (hashCode == VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND_HASH) + { + return DeliveryStreamFailureType::VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND; + } + else if (hashCode == VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED_HASH) + { + return DeliveryStreamFailureType::VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED; + } + else if (hashCode == RETIRE_KMS_GRANT_FAILED_HASH) { return DeliveryStreamFailureType::RETIRE_KMS_GRANT_FAILED; } @@ -116,6 +126,10 @@ namespace Aws { case DeliveryStreamFailureType::NOT_SET: return {}; + case DeliveryStreamFailureType::VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND: + return "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND"; + case DeliveryStreamFailureType::VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED: + return "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED"; case DeliveryStreamFailureType::RETIRE_KMS_GRANT_FAILED: return "RETIRE_KMS_GRANT_FAILED"; case DeliveryStreamFailureType::CREATE_KMS_GRANT_FAILED: diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DeliveryStreamType.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DeliveryStreamType.cpp index 395c3261740..f17a51be7e5 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/DeliveryStreamType.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DeliveryStreamType.cpp @@ -23,6 +23,7 @@ namespace Aws static const int DirectPut_HASH = HashingUtils::HashString("DirectPut"); static const int KinesisStreamAsSource_HASH = HashingUtils::HashString("KinesisStreamAsSource"); static const int MSKAsSource_HASH = HashingUtils::HashString("MSKAsSource"); + static const int DatabaseAsSource_HASH = HashingUtils::HashString("DatabaseAsSource"); DeliveryStreamType GetDeliveryStreamTypeForName(const Aws::String& name) @@ -40,6 +41,10 @@ namespace Aws { return DeliveryStreamType::MSKAsSource; } + else if (hashCode == DatabaseAsSource_HASH) + { + return DeliveryStreamType::DatabaseAsSource; + } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { @@ -62,6 +67,8 @@ namespace Aws return "KinesisStreamAsSource"; case DeliveryStreamType::MSKAsSource: return "MSKAsSource"; + case DeliveryStreamType::DatabaseAsSource: + return "DatabaseAsSource"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/DestinationTableConfiguration.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/DestinationTableConfiguration.cpp index 56ed92217e1..b8c6ee12b09 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/DestinationTableConfiguration.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/DestinationTableConfiguration.cpp @@ -22,6 +22,7 @@ DestinationTableConfiguration::DestinationTableConfiguration() : m_destinationTableNameHasBeenSet(false), m_destinationDatabaseNameHasBeenSet(false), m_uniqueKeysHasBeenSet(false), + m_partitionSpecHasBeenSet(false), m_s3ErrorOutputPrefixHasBeenSet(false) { } @@ -58,6 +59,13 @@ DestinationTableConfiguration& DestinationTableConfiguration::operator =(JsonVie m_uniqueKeysHasBeenSet = true; } + if(jsonValue.ValueExists("PartitionSpec")) + { + m_partitionSpec = jsonValue.GetObject("PartitionSpec"); + + m_partitionSpecHasBeenSet = true; + } + if(jsonValue.ValueExists("S3ErrorOutputPrefix")) { m_s3ErrorOutputPrefix = jsonValue.GetString("S3ErrorOutputPrefix"); @@ -95,6 +103,12 @@ JsonValue DestinationTableConfiguration::Jsonize() const } + if(m_partitionSpecHasBeenSet) + { + payload.WithObject("PartitionSpec", m_partitionSpec.Jsonize()); + + } + if(m_s3ErrorOutputPrefixHasBeenSet) { payload.WithString("S3ErrorOutputPrefix", m_s3ErrorOutputPrefix); diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationConfiguration.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationConfiguration.cpp index d9cf973434f..e1274226845 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationConfiguration.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationConfiguration.cpp @@ -20,6 +20,8 @@ namespace Model IcebergDestinationConfiguration::IcebergDestinationConfiguration() : m_destinationTableConfigurationListHasBeenSet(false), + m_schemaEvolutionConfigurationHasBeenSet(false), + m_tableCreationConfigurationHasBeenSet(false), m_bufferingHintsHasBeenSet(false), m_cloudWatchLoggingOptionsHasBeenSet(false), m_processingConfigurationHasBeenSet(false), @@ -50,6 +52,20 @@ IcebergDestinationConfiguration& IcebergDestinationConfiguration::operator =(Jso m_destinationTableConfigurationListHasBeenSet = true; } + if(jsonValue.ValueExists("SchemaEvolutionConfiguration")) + { + m_schemaEvolutionConfiguration = jsonValue.GetObject("SchemaEvolutionConfiguration"); + + m_schemaEvolutionConfigurationHasBeenSet = true; + } + + if(jsonValue.ValueExists("TableCreationConfiguration")) + { + m_tableCreationConfiguration = jsonValue.GetObject("TableCreationConfiguration"); + + m_tableCreationConfigurationHasBeenSet = true; + } + if(jsonValue.ValueExists("BufferingHints")) { m_bufferingHints = jsonValue.GetObject("BufferingHints"); @@ -124,6 +140,18 @@ JsonValue IcebergDestinationConfiguration::Jsonize() const } + if(m_schemaEvolutionConfigurationHasBeenSet) + { + payload.WithObject("SchemaEvolutionConfiguration", m_schemaEvolutionConfiguration.Jsonize()); + + } + + if(m_tableCreationConfigurationHasBeenSet) + { + payload.WithObject("TableCreationConfiguration", m_tableCreationConfiguration.Jsonize()); + + } + if(m_bufferingHintsHasBeenSet) { payload.WithObject("BufferingHints", m_bufferingHints.Jsonize()); diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationDescription.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationDescription.cpp index 37a16dea140..9d4828e34ca 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationDescription.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationDescription.cpp @@ -20,6 +20,8 @@ namespace Model IcebergDestinationDescription::IcebergDestinationDescription() : m_destinationTableConfigurationListHasBeenSet(false), + m_schemaEvolutionConfigurationHasBeenSet(false), + m_tableCreationConfigurationHasBeenSet(false), m_bufferingHintsHasBeenSet(false), m_cloudWatchLoggingOptionsHasBeenSet(false), m_processingConfigurationHasBeenSet(false), @@ -50,6 +52,20 @@ IcebergDestinationDescription& IcebergDestinationDescription::operator =(JsonVie m_destinationTableConfigurationListHasBeenSet = true; } + if(jsonValue.ValueExists("SchemaEvolutionConfiguration")) + { + m_schemaEvolutionConfiguration = jsonValue.GetObject("SchemaEvolutionConfiguration"); + + m_schemaEvolutionConfigurationHasBeenSet = true; + } + + if(jsonValue.ValueExists("TableCreationConfiguration")) + { + m_tableCreationConfiguration = jsonValue.GetObject("TableCreationConfiguration"); + + m_tableCreationConfigurationHasBeenSet = true; + } + if(jsonValue.ValueExists("BufferingHints")) { m_bufferingHints = jsonValue.GetObject("BufferingHints"); @@ -124,6 +140,18 @@ JsonValue IcebergDestinationDescription::Jsonize() const } + if(m_schemaEvolutionConfigurationHasBeenSet) + { + payload.WithObject("SchemaEvolutionConfiguration", m_schemaEvolutionConfiguration.Jsonize()); + + } + + if(m_tableCreationConfigurationHasBeenSet) + { + payload.WithObject("TableCreationConfiguration", m_tableCreationConfiguration.Jsonize()); + + } + if(m_bufferingHintsHasBeenSet) { payload.WithObject("BufferingHints", m_bufferingHints.Jsonize()); diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationUpdate.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationUpdate.cpp index 4c335315355..edb72d9bb0d 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationUpdate.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/IcebergDestinationUpdate.cpp @@ -20,6 +20,8 @@ namespace Model IcebergDestinationUpdate::IcebergDestinationUpdate() : m_destinationTableConfigurationListHasBeenSet(false), + m_schemaEvolutionConfigurationHasBeenSet(false), + m_tableCreationConfigurationHasBeenSet(false), m_bufferingHintsHasBeenSet(false), m_cloudWatchLoggingOptionsHasBeenSet(false), m_processingConfigurationHasBeenSet(false), @@ -50,6 +52,20 @@ IcebergDestinationUpdate& IcebergDestinationUpdate::operator =(JsonView jsonValu m_destinationTableConfigurationListHasBeenSet = true; } + if(jsonValue.ValueExists("SchemaEvolutionConfiguration")) + { + m_schemaEvolutionConfiguration = jsonValue.GetObject("SchemaEvolutionConfiguration"); + + m_schemaEvolutionConfigurationHasBeenSet = true; + } + + if(jsonValue.ValueExists("TableCreationConfiguration")) + { + m_tableCreationConfiguration = jsonValue.GetObject("TableCreationConfiguration"); + + m_tableCreationConfigurationHasBeenSet = true; + } + if(jsonValue.ValueExists("BufferingHints")) { m_bufferingHints = jsonValue.GetObject("BufferingHints"); @@ -124,6 +140,18 @@ JsonValue IcebergDestinationUpdate::Jsonize() const } + if(m_schemaEvolutionConfigurationHasBeenSet) + { + payload.WithObject("SchemaEvolutionConfiguration", m_schemaEvolutionConfiguration.Jsonize()); + + } + + if(m_tableCreationConfigurationHasBeenSet) + { + payload.WithObject("TableCreationConfiguration", m_tableCreationConfiguration.Jsonize()); + + } + if(m_bufferingHintsHasBeenSet) { payload.WithObject("BufferingHints", m_bufferingHints.Jsonize()); diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/PartitionField.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/PartitionField.cpp new file mode 100644 index 00000000000..077a894f7f4 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/PartitionField.cpp @@ -0,0 +1,59 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +PartitionField::PartitionField() : + m_sourceNameHasBeenSet(false) +{ +} + +PartitionField::PartitionField(JsonView jsonValue) + : PartitionField() +{ + *this = jsonValue; +} + +PartitionField& PartitionField::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("SourceName")) + { + m_sourceName = jsonValue.GetString("SourceName"); + + m_sourceNameHasBeenSet = true; + } + + return *this; +} + +JsonValue PartitionField::Jsonize() const +{ + JsonValue payload; + + if(m_sourceNameHasBeenSet) + { + payload.WithString("SourceName", m_sourceName); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/PartitionSpec.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/PartitionSpec.cpp new file mode 100644 index 00000000000..fefb0747615 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/PartitionSpec.cpp @@ -0,0 +1,67 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +PartitionSpec::PartitionSpec() : + m_identityHasBeenSet(false) +{ +} + +PartitionSpec::PartitionSpec(JsonView jsonValue) + : PartitionSpec() +{ + *this = jsonValue; +} + +PartitionSpec& PartitionSpec::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Identity")) + { + Aws::Utils::Array identityJsonList = jsonValue.GetArray("Identity"); + for(unsigned identityIndex = 0; identityIndex < identityJsonList.GetLength(); ++identityIndex) + { + m_identity.push_back(identityJsonList[identityIndex].AsObject()); + } + m_identityHasBeenSet = true; + } + + return *this; +} + +JsonValue PartitionSpec::Jsonize() const +{ + JsonValue payload; + + if(m_identityHasBeenSet) + { + Aws::Utils::Array identityJsonList(m_identity.size()); + for(unsigned identityIndex = 0; identityIndex < identityJsonList.GetLength(); ++identityIndex) + { + identityJsonList[identityIndex].AsObject(m_identity[identityIndex].Jsonize()); + } + payload.WithArray("Identity", std::move(identityJsonList)); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/SSLMode.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/SSLMode.cpp new file mode 100644 index 00000000000..aad6b249a63 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/SSLMode.cpp @@ -0,0 +1,72 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include +#include +#include + +using namespace Aws::Utils; + + +namespace Aws +{ + namespace Firehose + { + namespace Model + { + namespace SSLModeMapper + { + + static const int Disabled_HASH = HashingUtils::HashString("Disabled"); + static const int Enabled_HASH = HashingUtils::HashString("Enabled"); + + + SSLMode GetSSLModeForName(const Aws::String& name) + { + int hashCode = HashingUtils::HashString(name.c_str()); + if (hashCode == Disabled_HASH) + { + return SSLMode::Disabled; + } + else if (hashCode == Enabled_HASH) + { + return SSLMode::Enabled; + } + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + overflowContainer->StoreOverflow(hashCode, name); + return static_cast(hashCode); + } + + return SSLMode::NOT_SET; + } + + Aws::String GetNameForSSLMode(SSLMode enumValue) + { + switch(enumValue) + { + case SSLMode::NOT_SET: + return {}; + case SSLMode::Disabled: + return "Disabled"; + case SSLMode::Enabled: + return "Enabled"; + default: + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + return overflowContainer->RetrieveOverflow(static_cast(enumValue)); + } + + return {}; + } + } + + } // namespace SSLModeMapper + } // namespace Model + } // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/SchemaEvolutionConfiguration.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/SchemaEvolutionConfiguration.cpp new file mode 100644 index 00000000000..fbf4ae76157 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/SchemaEvolutionConfiguration.cpp @@ -0,0 +1,60 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +SchemaEvolutionConfiguration::SchemaEvolutionConfiguration() : + m_enabled(false), + m_enabledHasBeenSet(false) +{ +} + +SchemaEvolutionConfiguration::SchemaEvolutionConfiguration(JsonView jsonValue) + : SchemaEvolutionConfiguration() +{ + *this = jsonValue; +} + +SchemaEvolutionConfiguration& SchemaEvolutionConfiguration::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Enabled")) + { + m_enabled = jsonValue.GetBool("Enabled"); + + m_enabledHasBeenSet = true; + } + + return *this; +} + +JsonValue SchemaEvolutionConfiguration::Jsonize() const +{ + JsonValue payload; + + if(m_enabledHasBeenSet) + { + payload.WithBool("Enabled", m_enabled); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/SnapshotRequestedBy.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/SnapshotRequestedBy.cpp new file mode 100644 index 00000000000..fdf4a675b76 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/SnapshotRequestedBy.cpp @@ -0,0 +1,72 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include +#include +#include + +using namespace Aws::Utils; + + +namespace Aws +{ + namespace Firehose + { + namespace Model + { + namespace SnapshotRequestedByMapper + { + + static const int USER_HASH = HashingUtils::HashString("USER"); + static const int FIREHOSE_HASH = HashingUtils::HashString("FIREHOSE"); + + + SnapshotRequestedBy GetSnapshotRequestedByForName(const Aws::String& name) + { + int hashCode = HashingUtils::HashString(name.c_str()); + if (hashCode == USER_HASH) + { + return SnapshotRequestedBy::USER; + } + else if (hashCode == FIREHOSE_HASH) + { + return SnapshotRequestedBy::FIREHOSE; + } + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + overflowContainer->StoreOverflow(hashCode, name); + return static_cast(hashCode); + } + + return SnapshotRequestedBy::NOT_SET; + } + + Aws::String GetNameForSnapshotRequestedBy(SnapshotRequestedBy enumValue) + { + switch(enumValue) + { + case SnapshotRequestedBy::NOT_SET: + return {}; + case SnapshotRequestedBy::USER: + return "USER"; + case SnapshotRequestedBy::FIREHOSE: + return "FIREHOSE"; + default: + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + return overflowContainer->RetrieveOverflow(static_cast(enumValue)); + } + + return {}; + } + } + + } // namespace SnapshotRequestedByMapper + } // namespace Model + } // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/SnapshotStatus.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/SnapshotStatus.cpp new file mode 100644 index 00000000000..52e14571352 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/SnapshotStatus.cpp @@ -0,0 +1,79 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include +#include +#include + +using namespace Aws::Utils; + + +namespace Aws +{ + namespace Firehose + { + namespace Model + { + namespace SnapshotStatusMapper + { + + static const int IN_PROGRESS_HASH = HashingUtils::HashString("IN_PROGRESS"); + static const int COMPLETE_HASH = HashingUtils::HashString("COMPLETE"); + static const int SUSPENDED_HASH = HashingUtils::HashString("SUSPENDED"); + + + SnapshotStatus GetSnapshotStatusForName(const Aws::String& name) + { + int hashCode = HashingUtils::HashString(name.c_str()); + if (hashCode == IN_PROGRESS_HASH) + { + return SnapshotStatus::IN_PROGRESS; + } + else if (hashCode == COMPLETE_HASH) + { + return SnapshotStatus::COMPLETE; + } + else if (hashCode == SUSPENDED_HASH) + { + return SnapshotStatus::SUSPENDED; + } + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + overflowContainer->StoreOverflow(hashCode, name); + return static_cast(hashCode); + } + + return SnapshotStatus::NOT_SET; + } + + Aws::String GetNameForSnapshotStatus(SnapshotStatus enumValue) + { + switch(enumValue) + { + case SnapshotStatus::NOT_SET: + return {}; + case SnapshotStatus::IN_PROGRESS: + return "IN_PROGRESS"; + case SnapshotStatus::COMPLETE: + return "COMPLETE"; + case SnapshotStatus::SUSPENDED: + return "SUSPENDED"; + default: + EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); + if(overflowContainer) + { + return overflowContainer->RetrieveOverflow(static_cast(enumValue)); + } + + return {}; + } + } + + } // namespace SnapshotStatusMapper + } // namespace Model + } // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/SourceDescription.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/SourceDescription.cpp index 3b5349f2a06..1e2497e46a0 100644 --- a/generated/src/aws-cpp-sdk-firehose/source/model/SourceDescription.cpp +++ b/generated/src/aws-cpp-sdk-firehose/source/model/SourceDescription.cpp @@ -20,7 +20,8 @@ namespace Model SourceDescription::SourceDescription() : m_kinesisStreamSourceDescriptionHasBeenSet(false), - m_mSKSourceDescriptionHasBeenSet(false) + m_mSKSourceDescriptionHasBeenSet(false), + m_databaseSourceDescriptionHasBeenSet(false) { } @@ -46,6 +47,13 @@ SourceDescription& SourceDescription::operator =(JsonView jsonValue) m_mSKSourceDescriptionHasBeenSet = true; } + if(jsonValue.ValueExists("DatabaseSourceDescription")) + { + m_databaseSourceDescription = jsonValue.GetObject("DatabaseSourceDescription"); + + m_databaseSourceDescriptionHasBeenSet = true; + } + return *this; } @@ -65,6 +73,12 @@ JsonValue SourceDescription::Jsonize() const } + if(m_databaseSourceDescriptionHasBeenSet) + { + payload.WithObject("DatabaseSourceDescription", m_databaseSourceDescription.Jsonize()); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-firehose/source/model/TableCreationConfiguration.cpp b/generated/src/aws-cpp-sdk-firehose/source/model/TableCreationConfiguration.cpp new file mode 100644 index 00000000000..99e7b3b8f96 --- /dev/null +++ b/generated/src/aws-cpp-sdk-firehose/source/model/TableCreationConfiguration.cpp @@ -0,0 +1,60 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include +#include + +#include + +using namespace Aws::Utils::Json; +using namespace Aws::Utils; + +namespace Aws +{ +namespace Firehose +{ +namespace Model +{ + +TableCreationConfiguration::TableCreationConfiguration() : + m_enabled(false), + m_enabledHasBeenSet(false) +{ +} + +TableCreationConfiguration::TableCreationConfiguration(JsonView jsonValue) + : TableCreationConfiguration() +{ + *this = jsonValue; +} + +TableCreationConfiguration& TableCreationConfiguration::operator =(JsonView jsonValue) +{ + if(jsonValue.ValueExists("Enabled")) + { + m_enabled = jsonValue.GetBool("Enabled"); + + m_enabledHasBeenSet = true; + } + + return *this; +} + +JsonValue TableCreationConfiguration::Jsonize() const +{ + JsonValue payload; + + if(m_enabledHasBeenSet) + { + payload.WithBool("Enabled", m_enabled); + + } + + return payload; +} + +} // namespace Model +} // namespace Firehose +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/CreateFunctionRequest.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/CreateFunctionRequest.h index c3c65e17898..4cfd0dd0179 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/CreateFunctionRequest.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/CreateFunctionRequest.h @@ -250,16 +250,26 @@ namespace Model ///@{ /** *

    The ARN of the Key Management Service (KMS) customer managed key that's used - * to encrypt your function's

    • The function's environment - * variables. When .

    • The function's Lambda - * SnapStart is activated, Lambda also uses this key is to encrypt your - * function's snapshot. If you deploy your function using a container image, Lambda - * also uses this key to encrypt your function when it's deployed. Note that this - * is not the same key that's used to protect your container image in the Amazon - * Elastic Container Registry (Amazon ECR). If you don't provide a customer managed - * key, Lambda uses a default service key.

      + * SnapStart snapshots.

    • When used with + * SourceKMSKeyArn, the unzipped version of the .zip deployment + * package that's used for function invocations. For more information, see + * Specifying a customer managed key for Lambda.

    • The + * optimized version of the container image that's used for function invocations. + * Note that this is not the same key that's used to protect your container image + * in the Amazon Elastic Container Registry (Amazon ECR). For more information, see + * Function + * lifecycle.

    If you don't provide a customer managed key, + * Lambda uses an Amazon + * Web Services owned key or an Amazon + * Web Services managed key.

    */ inline const Aws::String& GetKMSKeyArn() const{ return m_kMSKeyArn; } inline bool KMSKeyArnHasBeenSet() const { return m_kMSKeyArnHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/CreateFunctionResult.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/CreateFunctionResult.h index 712f1534944..a77f5ca99ff 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/CreateFunctionResult.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/CreateFunctionResult.h @@ -249,13 +249,27 @@ namespace Model ///@{ /** - *

    The KMS key that's used to encrypt the function's The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt the following resources:

    If you don't provide a customer managed key, + * Lambda uses an Amazon + * Web Services owned key or an Amazon + * Web Services managed key.

    */ inline const Aws::String& GetKMSKeyArn() const{ return m_kMSKeyArn; } inline void SetKMSKeyArn(const Aws::String& value) { m_kMSKeyArn = value; } diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionCode.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionCode.h index 1c497bbe133..ee0d8f8f8b1 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionCode.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionCode.h @@ -112,6 +112,24 @@ namespace Model inline FunctionCode& WithImageUri(Aws::String&& value) { SetImageUri(std::move(value)); return *this;} inline FunctionCode& WithImageUri(const char* value) { SetImageUri(value); return *this;} ///@} + + ///@{ + /** + *

    The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt your function's .zip deployment package. If you don't provide a + * customer managed key, Lambda uses an Amazon + * Web Services owned key.

    + */ + inline const Aws::String& GetSourceKMSKeyArn() const{ return m_sourceKMSKeyArn; } + inline bool SourceKMSKeyArnHasBeenSet() const { return m_sourceKMSKeyArnHasBeenSet; } + inline void SetSourceKMSKeyArn(const Aws::String& value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn = value; } + inline void SetSourceKMSKeyArn(Aws::String&& value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn = std::move(value); } + inline void SetSourceKMSKeyArn(const char* value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn.assign(value); } + inline FunctionCode& WithSourceKMSKeyArn(const Aws::String& value) { SetSourceKMSKeyArn(value); return *this;} + inline FunctionCode& WithSourceKMSKeyArn(Aws::String&& value) { SetSourceKMSKeyArn(std::move(value)); return *this;} + inline FunctionCode& WithSourceKMSKeyArn(const char* value) { SetSourceKMSKeyArn(value); return *this;} + ///@} private: Aws::Utils::CryptoBuffer m_zipFile; @@ -128,6 +146,9 @@ namespace Model Aws::String m_imageUri; bool m_imageUriHasBeenSet = false; + + Aws::String m_sourceKMSKeyArn; + bool m_sourceKMSKeyArnHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionCodeLocation.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionCodeLocation.h index 1c1394fca77..a4995661a00 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionCodeLocation.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionCodeLocation.h @@ -92,6 +92,24 @@ namespace Model inline FunctionCodeLocation& WithResolvedImageUri(Aws::String&& value) { SetResolvedImageUri(std::move(value)); return *this;} inline FunctionCodeLocation& WithResolvedImageUri(const char* value) { SetResolvedImageUri(value); return *this;} ///@} + + ///@{ + /** + *

    The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt your function's .zip deployment package. If you don't provide a + * customer managed key, Lambda uses an Amazon + * Web Services owned key.

    + */ + inline const Aws::String& GetSourceKMSKeyArn() const{ return m_sourceKMSKeyArn; } + inline bool SourceKMSKeyArnHasBeenSet() const { return m_sourceKMSKeyArnHasBeenSet; } + inline void SetSourceKMSKeyArn(const Aws::String& value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn = value; } + inline void SetSourceKMSKeyArn(Aws::String&& value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn = std::move(value); } + inline void SetSourceKMSKeyArn(const char* value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn.assign(value); } + inline FunctionCodeLocation& WithSourceKMSKeyArn(const Aws::String& value) { SetSourceKMSKeyArn(value); return *this;} + inline FunctionCodeLocation& WithSourceKMSKeyArn(Aws::String&& value) { SetSourceKMSKeyArn(std::move(value)); return *this;} + inline FunctionCodeLocation& WithSourceKMSKeyArn(const char* value) { SetSourceKMSKeyArn(value); return *this;} + ///@} private: Aws::String m_repositoryType; @@ -105,6 +123,9 @@ namespace Model Aws::String m_resolvedImageUri; bool m_resolvedImageUriHasBeenSet = false; + + Aws::String m_sourceKMSKeyArn; + bool m_sourceKMSKeyArnHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionConfiguration.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionConfiguration.h index 8d26c319f16..ce7ef250575 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionConfiguration.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/FunctionConfiguration.h @@ -264,13 +264,27 @@ namespace Model ///@{ /** - *

    The KMS key that's used to encrypt the function's The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt the following resources:

    If you don't provide a customer managed key, + * Lambda uses an Amazon + * Web Services owned key or an Amazon + * Web Services managed key.

    */ inline const Aws::String& GetKMSKeyArn() const{ return m_kMSKeyArn; } inline bool KMSKeyArnHasBeenSet() const { return m_kMSKeyArnHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetFunctionConfigurationResult.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetFunctionConfigurationResult.h index aa7556a8476..0cc9d0ca741 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetFunctionConfigurationResult.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetFunctionConfigurationResult.h @@ -249,13 +249,27 @@ namespace Model ///@{ /** - *

    The KMS key that's used to encrypt the function's The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt the following resources:

    If you don't provide a customer managed key, + * Lambda uses an Amazon + * Web Services owned key or an Amazon + * Web Services managed key.

    */ inline const Aws::String& GetKMSKeyArn() const{ return m_kMSKeyArn; } inline void SetKMSKeyArn(const Aws::String& value) { m_kMSKeyArn = value; } diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetFunctionResult.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetFunctionResult.h index 91b58849f99..43c4afaedb5 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetFunctionResult.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetFunctionResult.h @@ -64,7 +64,7 @@ namespace Model *

    The function's tags. * Lambda returns tag data only if you have explicit allow permissions for lambda:ListTags.

    + * href="https://docs.aws.amazon.com/lambda/latest/api/API_ListTags.html">lambda:ListTags.

    */ inline const Aws::Map& GetTags() const{ return m_tags; } inline void SetTags(const Aws::Map& value) { m_tags = value; } diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetPolicyRequest.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetPolicyRequest.h index 7260f4861e8..609b99d2c97 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetPolicyRequest.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/GetPolicyRequest.h @@ -45,7 +45,7 @@ namespace Model * my-function (name-only), my-function:v1 (with * alias).

  • Function ARN – * arn:aws:lambda:us-west-2:123456789012:function:my-function.

    - *
  • Partial ARN – + *

  • Partial ARN �� * 123456789012:function:my-function.

  • You can * append a version number or alias to any of the formats. The length constraint * applies only to the full ARN. If you specify only the function name, it is diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/PublishVersionResult.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/PublishVersionResult.h index 029dc0824a2..7d6fe8652cd 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/PublishVersionResult.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/PublishVersionResult.h @@ -249,13 +249,27 @@ namespace Model ///@{ /** - *

    The KMS key that's used to encrypt the function's The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt the following resources:

    If you don't provide a customer managed key, + * Lambda uses an Amazon + * Web Services owned key or an Amazon + * Web Services managed key.

    */ inline const Aws::String& GetKMSKeyArn() const{ return m_kMSKeyArn; } inline void SetKMSKeyArn(const Aws::String& value) { m_kMSKeyArn = value; } diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionCodeRequest.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionCodeRequest.h index a6dcaf4e801..1a9eb89b8fc 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionCodeRequest.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionCodeRequest.h @@ -184,6 +184,22 @@ namespace Model inline UpdateFunctionCodeRequest& AddArchitectures(const Architecture& value) { m_architecturesHasBeenSet = true; m_architectures.push_back(value); return *this; } inline UpdateFunctionCodeRequest& AddArchitectures(Architecture&& value) { m_architecturesHasBeenSet = true; m_architectures.push_back(std::move(value)); return *this; } ///@} + + ///@{ + /** + *

    The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt your function's .zip deployment package. If you don't provide a + * customer managed key, Lambda uses an Amazon Web Services managed key.

    + */ + inline const Aws::String& GetSourceKMSKeyArn() const{ return m_sourceKMSKeyArn; } + inline bool SourceKMSKeyArnHasBeenSet() const { return m_sourceKMSKeyArnHasBeenSet; } + inline void SetSourceKMSKeyArn(const Aws::String& value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn = value; } + inline void SetSourceKMSKeyArn(Aws::String&& value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn = std::move(value); } + inline void SetSourceKMSKeyArn(const char* value) { m_sourceKMSKeyArnHasBeenSet = true; m_sourceKMSKeyArn.assign(value); } + inline UpdateFunctionCodeRequest& WithSourceKMSKeyArn(const Aws::String& value) { SetSourceKMSKeyArn(value); return *this;} + inline UpdateFunctionCodeRequest& WithSourceKMSKeyArn(Aws::String&& value) { SetSourceKMSKeyArn(std::move(value)); return *this;} + inline UpdateFunctionCodeRequest& WithSourceKMSKeyArn(const char* value) { SetSourceKMSKeyArn(value); return *this;} + ///@} private: Aws::String m_functionName; @@ -215,6 +231,9 @@ namespace Model Aws::Vector m_architectures; bool m_architecturesHasBeenSet = false; + + Aws::String m_sourceKMSKeyArn; + bool m_sourceKMSKeyArnHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionCodeResult.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionCodeResult.h index 07061160499..9d3d68b6931 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionCodeResult.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionCodeResult.h @@ -249,13 +249,27 @@ namespace Model ///@{ /** - *

    The KMS key that's used to encrypt the function's The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt the following resources:

    If you don't provide a customer managed key, + * Lambda uses an Amazon + * Web Services owned key or an Amazon + * Web Services managed key.

    */ inline const Aws::String& GetKMSKeyArn() const{ return m_kMSKeyArn; } inline void SetKMSKeyArn(const Aws::String& value) { m_kMSKeyArn = value; } diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionConfigurationRequest.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionConfigurationRequest.h index dc9bcf0e676..ea912b8ac3b 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionConfigurationRequest.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionConfigurationRequest.h @@ -211,16 +211,26 @@ namespace Model ///@{ /** *

    The ARN of the Key Management Service (KMS) customer managed key that's used - * to encrypt your function's

    • The function's environment - * variables. When .

    • The function's Lambda - * SnapStart is activated, Lambda also uses this key is to encrypt your - * function's snapshot. If you deploy your function using a container image, Lambda - * also uses this key to encrypt your function when it's deployed. Note that this - * is not the same key that's used to protect your container image in the Amazon - * Elastic Container Registry (Amazon ECR). If you don't provide a customer managed - * key, Lambda uses a default service key.

      + * SnapStart snapshots.

    • When used with + * SourceKMSKeyArn, the unzipped version of the .zip deployment + * package that's used for function invocations. For more information, see + * Specifying a customer managed key for Lambda.

    • The + * optimized version of the container image that's used for function invocations. + * Note that this is not the same key that's used to protect your container image + * in the Amazon Elastic Container Registry (Amazon ECR). For more information, see + * Function + * lifecycle.

    If you don't provide a customer managed key, + * Lambda uses an Amazon + * Web Services owned key or an Amazon + * Web Services managed key.

    */ inline const Aws::String& GetKMSKeyArn() const{ return m_kMSKeyArn; } inline bool KMSKeyArnHasBeenSet() const { return m_kMSKeyArnHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionConfigurationResult.h b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionConfigurationResult.h index 72692a4bccf..81ba245043c 100644 --- a/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionConfigurationResult.h +++ b/generated/src/aws-cpp-sdk-lambda/include/aws/lambda/model/UpdateFunctionConfigurationResult.h @@ -249,13 +249,27 @@ namespace Model ///@{ /** - *

    The KMS key that's used to encrypt the function's The ARN of the Key Management Service (KMS) customer managed key that's used + * to encrypt the following resources:

    If you don't provide a customer managed key, + * Lambda uses an Amazon + * Web Services owned key or an Amazon + * Web Services managed key.

    */ inline const Aws::String& GetKMSKeyArn() const{ return m_kMSKeyArn; } inline void SetKMSKeyArn(const Aws::String& value) { m_kMSKeyArn = value; } diff --git a/generated/src/aws-cpp-sdk-lambda/source/model/FunctionCode.cpp b/generated/src/aws-cpp-sdk-lambda/source/model/FunctionCode.cpp index 347c40679f1..6d2b60e1cf8 100644 --- a/generated/src/aws-cpp-sdk-lambda/source/model/FunctionCode.cpp +++ b/generated/src/aws-cpp-sdk-lambda/source/model/FunctionCode.cpp @@ -24,7 +24,8 @@ FunctionCode::FunctionCode() : m_s3BucketHasBeenSet(false), m_s3KeyHasBeenSet(false), m_s3ObjectVersionHasBeenSet(false), - m_imageUriHasBeenSet(false) + m_imageUriHasBeenSet(false), + m_sourceKMSKeyArnHasBeenSet(false) { } @@ -70,6 +71,13 @@ FunctionCode& FunctionCode::operator =(JsonView jsonValue) m_imageUriHasBeenSet = true; } + if(jsonValue.ValueExists("SourceKMSKeyArn")) + { + m_sourceKMSKeyArn = jsonValue.GetString("SourceKMSKeyArn"); + + m_sourceKMSKeyArnHasBeenSet = true; + } + return *this; } @@ -106,6 +114,12 @@ JsonValue FunctionCode::Jsonize() const } + if(m_sourceKMSKeyArnHasBeenSet) + { + payload.WithString("SourceKMSKeyArn", m_sourceKMSKeyArn); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-lambda/source/model/FunctionCodeLocation.cpp b/generated/src/aws-cpp-sdk-lambda/source/model/FunctionCodeLocation.cpp index 771a2836ca7..8673e0c6401 100644 --- a/generated/src/aws-cpp-sdk-lambda/source/model/FunctionCodeLocation.cpp +++ b/generated/src/aws-cpp-sdk-lambda/source/model/FunctionCodeLocation.cpp @@ -22,7 +22,8 @@ FunctionCodeLocation::FunctionCodeLocation() : m_repositoryTypeHasBeenSet(false), m_locationHasBeenSet(false), m_imageUriHasBeenSet(false), - m_resolvedImageUriHasBeenSet(false) + m_resolvedImageUriHasBeenSet(false), + m_sourceKMSKeyArnHasBeenSet(false) { } @@ -62,6 +63,13 @@ FunctionCodeLocation& FunctionCodeLocation::operator =(JsonView jsonValue) m_resolvedImageUriHasBeenSet = true; } + if(jsonValue.ValueExists("SourceKMSKeyArn")) + { + m_sourceKMSKeyArn = jsonValue.GetString("SourceKMSKeyArn"); + + m_sourceKMSKeyArnHasBeenSet = true; + } + return *this; } @@ -93,6 +101,12 @@ JsonValue FunctionCodeLocation::Jsonize() const } + if(m_sourceKMSKeyArnHasBeenSet) + { + payload.WithString("SourceKMSKeyArn", m_sourceKMSKeyArn); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-lambda/source/model/UpdateFunctionCodeRequest.cpp b/generated/src/aws-cpp-sdk-lambda/source/model/UpdateFunctionCodeRequest.cpp index b0d30ed647e..9b4112fa2a9 100644 --- a/generated/src/aws-cpp-sdk-lambda/source/model/UpdateFunctionCodeRequest.cpp +++ b/generated/src/aws-cpp-sdk-lambda/source/model/UpdateFunctionCodeRequest.cpp @@ -25,7 +25,8 @@ UpdateFunctionCodeRequest::UpdateFunctionCodeRequest() : m_dryRun(false), m_dryRunHasBeenSet(false), m_revisionIdHasBeenSet(false), - m_architecturesHasBeenSet(false) + m_architecturesHasBeenSet(false), + m_sourceKMSKeyArnHasBeenSet(false) { } @@ -91,6 +92,12 @@ Aws::String UpdateFunctionCodeRequest::SerializePayload() const } + if(m_sourceKMSKeyArnHasBeenSet) + { + payload.WithString("SourceKMSKeyArn", m_sourceKMSKeyArn); + + } + return payload.View().WriteReadable(); } diff --git a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/CreateRegistrationResult.h b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/CreateRegistrationResult.h index 8352cfe949b..63d773817bf 100644 --- a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/CreateRegistrationResult.h +++ b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/CreateRegistrationResult.h @@ -79,19 +79,20 @@ namespace Model ///@{ /** - *

    The status of the registration.

    • CREATED: Your + *

      The status of the registration.

      • CLOSED: The + * phone number or sender ID has been deleted and you must also delete the + * registration for the number.

      • CREATED: Your * registration is created but not submitted.

      • - * SUBMITTED: Your registration has been submitted and is awaiting - * review.

      • REVIEWING: Your registration has been - * accepted and is being reviewed.

      • PROVISIONING: + * COMPLETE: Your registration has been approved and your origination + * identity has been created.

      • DELETED: The + * registration has been deleted.

      • PROVISIONING: * Your registration has been approved and your origination identity is being - * created.

      • COMPLETE: Your registration has been - * approved and and your origination identity has been created.

      • - * REQUIRES_UPDATES: You must fix your registration and resubmit - * it.

      • CLOSED: The phone number or sender ID has - * been deleted and you must also delete the registration for the number.

      • - *
      • DELETED: The registration has been deleted.

      • - *
      + * created.

    • REQUIRES_AUTHENTICATION: You need to + * complete email authentication.

    • REQUIRES_UPDATES: + * You must fix your registration and resubmit it.

    • + * REVIEWING: Your registration has been accepted and is being + * reviewed.

    • SUBMITTED: Your registration has been + * submitted and is awaiting review.

    */ inline const RegistrationStatus& GetRegistrationStatus() const{ return m_registrationStatus; } inline void SetRegistrationStatus(const RegistrationStatus& value) { m_registrationStatus = value; } diff --git a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/CreateRegistrationVersionResult.h b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/CreateRegistrationVersionResult.h index 47a2268ba6a..a08e0e77b26 100644 --- a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/CreateRegistrationVersionResult.h +++ b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/CreateRegistrationVersionResult.h @@ -71,18 +71,19 @@ namespace Model ///@{ /** - *

    The status of the registration.

    • DRAFT: The + *

      The status of the registration.

      • APPROVED: Your + * registration has been approved.

      • ARCHIVED: Your + * previously approved registration version moves into this status when a more + * recently submitted version is approved.

      • DENIED: + * You must fix your registration and resubmit it.

      • + * DISCARDED: You've abandon this version of their registration to + * start over with a new version.

      • DRAFT: The * initial status of a registration version after it’s created.

      • - * SUBMITTED: Your registration has been submitted.

      • - * REVIEWING: Your registration has been accepted and is being - * reviewed.

      • APPROVED: Your registration has been - * approved.

      • DISCARDED: You've abandon this version - * of their registration to start over with a new version.

      • - * DENIED: You must fix your registration and resubmit it.

      • - *
      • REVOKED: Your previously approved registration has been - * revoked.

      • ARCHIVED: Your previously approved - * registration version moves into this status when a more recently submitted - * version is approved.

      + * REQUIRES_AUTHENTICATION: You need to complete email + * authentication.

    • REVIEWING: Your registration has + * been accepted and is being reviewed.

    • REVOKED: + * Your previously approved registration has been revoked.

    • + * SUBMITTED: Your registration has been submitted.

    */ inline const RegistrationVersionStatus& GetRegistrationVersionStatus() const{ return m_registrationVersionStatus; } inline void SetRegistrationVersionStatus(const RegistrationVersionStatus& value) { m_registrationVersionStatus = value; } diff --git a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/DeleteRegistrationResult.h b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/DeleteRegistrationResult.h index b603257dd08..293808ff8cc 100644 --- a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/DeleteRegistrationResult.h +++ b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/DeleteRegistrationResult.h @@ -77,19 +77,20 @@ namespace Model ///@{ /** - *

    The status of the registration.

    • CREATED: Your + *

      The status of the registration.

      • CLOSED: The + * phone number or sender ID has been deleted and you must also delete the + * registration for the number.

      • CREATED: Your * registration is created but not submitted.

      • - * SUBMITTED: Your registration has been submitted and is awaiting - * review.

      • REVIEWING: Your registration has been - * accepted and is being reviewed.

      • PROVISIONING: + * COMPLETE: Your registration has been approved and your origination + * identity has been created.

      • DELETED: The + * registration has been deleted.

      • PROVISIONING: * Your registration has been approved and your origination identity is being - * created.

      • COMPLETE: Your registration has been - * approved and and your origination identity has been created.

      • - * REQUIRES_UPDATES: You must fix your registration and resubmit - * it.

      • CLOSED: The phone number or sender ID has - * been deleted and you must also delete the registration for the number.

      • - *
      • DELETED: The registration has been deleted.

      • - *
      + * created.

    • REQUIRES_AUTHENTICATION: You need to + * complete email authentication.

    • REQUIRES_UPDATES: + * You must fix your registration and resubmit it.

    • + * REVIEWING: Your registration has been accepted and is being + * reviewed.

    • SUBMITTED: Your registration has been + * submitted and is awaiting review.

    */ inline const RegistrationStatus& GetRegistrationStatus() const{ return m_registrationStatus; } inline void SetRegistrationStatus(const RegistrationStatus& value) { m_registrationStatus = value; } diff --git a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/DiscardRegistrationVersionResult.h b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/DiscardRegistrationVersionResult.h index 659c165287d..02f3eccc16e 100644 --- a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/DiscardRegistrationVersionResult.h +++ b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/DiscardRegistrationVersionResult.h @@ -71,18 +71,20 @@ namespace Model ///@{ /** - *

    The status of the registration version.

    • DRAFT: - * The initial status of a registration version after it’s created.

    • - *

      SUBMITTED: Your registration has been submitted.

    • - *

      REVIEWING: Your registration has been accepted and is being - * reviewed.

    • APPROVED: Your registration has been - * approved.

    • DISCARDED: You've abandon this version - * of their registration to start over with a new version.

    • - * DENIED: You must fix your registration and resubmit it.

    • - *
    • REVOKED: Your previously approved registration has been - * revoked.

    • ARCHIVED: Your previously approved - * registration version moves into this status when a more recently submitted - * version is approved.

    + *

    The status of the registration version.

    • + * APPROVED: Your registration has been approved.

    • + * ARCHIVED: Your previously approved registration version moves into + * this status when a more recently submitted version is approved.

    • + *

      DENIED: You must fix your registration and resubmit it.

      + *
    • DISCARDED: You've abandon this version of their + * registration to start over with a new version.

    • + * DRAFT: The initial status of a registration version after it’s + * created.

    • REQUIRES_AUTHENTICATION: You need to + * complete email authentication.

    • REVIEWING: Your + * registration has been accepted and is being reviewed.

    • + * REVOKED: Your previously approved registration has been + * revoked.

    • SUBMITTED: Your registration has been + * submitted.

    */ inline const RegistrationVersionStatus& GetRegistrationVersionStatus() const{ return m_registrationVersionStatus; } inline void SetRegistrationVersionStatus(const RegistrationVersionStatus& value) { m_registrationVersionStatus = value; } diff --git a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationInformation.h b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationInformation.h index 8736438c51d..e88092cc705 100644 --- a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationInformation.h +++ b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationInformation.h @@ -86,19 +86,20 @@ namespace Model ///@{ /** - *

    The status of the registration.

    • CREATED: Your + *

      The status of the registration.

      • CLOSED: The + * phone number or sender ID has been deleted and you must also delete the + * registration for the number.

      • CREATED: Your * registration is created but not submitted.

      • - * SUBMITTED: Your registration has been submitted and is awaiting - * review.

      • REVIEWING: Your registration has been - * accepted and is being reviewed.

      • PROVISIONING: + * COMPLETE: Your registration has been approved and your origination + * identity has been created.

      • DELETED: The + * registration has been deleted.

      • PROVISIONING: * Your registration has been approved and your origination identity is being - * created.

      • COMPLETE: Your registration has been - * approved and and your origination identity has been created.

      • - * REQUIRES_UPDATES: You must fix your registration and resubmit - * it.

      • CLOSED: The phone number or sender ID has - * been deleted and you must also delete the registration for the number.

      • - *
      • DELETED: The registration has been deleted.

      • - *
      + * created.

    • REQUIRES_AUTHENTICATION: You need to + * complete email authentication.

    • REQUIRES_UPDATES: + * You must fix your registration and resubmit it.

    • + * REVIEWING: Your registration has been accepted and is being + * reviewed.

    • SUBMITTED: Your registration has been + * submitted and is awaiting review.

    */ inline const RegistrationStatus& GetRegistrationStatus() const{ return m_registrationStatus; } inline bool RegistrationStatusHasBeenSet() const { return m_registrationStatusHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationVersionInformation.h b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationVersionInformation.h index ae0b8a04c6c..019d6cc9cb3 100644 --- a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationVersionInformation.h +++ b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationVersionInformation.h @@ -53,18 +53,19 @@ namespace Model ///@{ /** - *

    The status of the registration.

    • DRAFT: The + *

      The status of the registration.

      • APPROVED: Your + * registration has been approved.

      • ARCHIVED: Your + * previously approved registration version moves into this status when a more + * recently submitted version is approved.

      • DENIED: + * You must fix your registration and resubmit it.

      • + * DISCARDED: You've abandon this version of their registration to + * start over with a new version.

      • DRAFT: The * initial status of a registration version after it’s created.

      • - * SUBMITTED: Your registration has been submitted.

      • - * REVIEWING: Your registration has been accepted and is being - * reviewed.

      • APPROVED: Your registration has been - * approved.

      • DISCARDED: You've abandon this version - * of their registration to start over with a new version.

      • - * DENIED: You must fix your registration and resubmit it.

      • - *
      • REVOKED: Your previously approved registration has been - * revoked.

      • ARCHIVED: Your previously approved - * registration version moves into this status when a more recently submitted - * version is approved.

      + * REQUIRES_AUTHENTICATION: You need to complete email + * authentication.

    • REVIEWING: Your registration has + * been accepted and is being reviewed.

    • REVOKED: + * Your previously approved registration has been revoked.

    • + * SUBMITTED: Your registration has been submitted.

    */ inline const RegistrationVersionStatus& GetRegistrationVersionStatus() const{ return m_registrationVersionStatus; } inline bool RegistrationVersionStatusHasBeenSet() const { return m_registrationVersionStatusHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationVersionStatusHistory.h b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationVersionStatusHistory.h index b3119d3184e..add2f2fa04b 100644 --- a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationVersionStatusHistory.h +++ b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/include/aws/pinpoint-sms-voice-v2/model/RegistrationVersionStatusHistory.h @@ -77,6 +77,19 @@ namespace Model inline RegistrationVersionStatusHistory& WithReviewingTimestamp(Aws::Utils::DateTime&& value) { SetReviewingTimestamp(std::move(value)); return *this;} ///@} + ///@{ + /** + *

    The time when the registration was in the requires authentication state, in + * UNIX epoch time format.

    + */ + inline const Aws::Utils::DateTime& GetRequiresAuthenticationTimestamp() const{ return m_requiresAuthenticationTimestamp; } + inline bool RequiresAuthenticationTimestampHasBeenSet() const { return m_requiresAuthenticationTimestampHasBeenSet; } + inline void SetRequiresAuthenticationTimestamp(const Aws::Utils::DateTime& value) { m_requiresAuthenticationTimestampHasBeenSet = true; m_requiresAuthenticationTimestamp = value; } + inline void SetRequiresAuthenticationTimestamp(Aws::Utils::DateTime&& value) { m_requiresAuthenticationTimestampHasBeenSet = true; m_requiresAuthenticationTimestamp = std::move(value); } + inline RegistrationVersionStatusHistory& WithRequiresAuthenticationTimestamp(const Aws::Utils::DateTime& value) { SetRequiresAuthenticationTimestamp(value); return *this;} + inline RegistrationVersionStatusHistory& WithRequiresAuthenticationTimestamp(Aws::Utils::DateTime&& value) { SetRequiresAuthenticationTimestamp(std::move(value)); return *this;} + ///@} + ///@{ /** *

    The time when the registration was in the approved state, in The status of the registration version.

    • DRAFT: - * The initial status of a registration version after it’s created.

    • - *

      SUBMITTED: Your registration has been submitted.

    • - *

      REVIEWING: Your registration has been accepted and is being - * reviewed.

    • APPROVED: Your registration has been - * approved.

    • DISCARDED: You've abandon this version - * of their registration to start over with a new version.

    • - * DENIED: You must fix your registration and resubmit it.

    • - *
    • REVOKED: Your previously approved registration has been - * revoked.

    • ARCHIVED: Your previously approved - * registration version moves into this status when a more recently submitted - * version is approved.

    + *

    The status of the registration version.

    • + * APPROVED: Your registration has been approved.

    • + * ARCHIVED: Your previously approved registration version moves into + * this status when a more recently submitted version is approved.

    • + *

      DENIED: You must fix your registration and resubmit it.

      + *
    • DISCARDED: You've abandon this version of their + * registration to start over with a new version.

    • + * DRAFT: The initial status of a registration version after it’s + * created.

    • REQUIRES_AUTHENTICATION: You need to + * complete email authentication.

    • REVIEWING: Your + * registration has been accepted and is being reviewed.

    • + * REVOKED: Your previously approved registration has been + * revoked.

    • SUBMITTED: Your registration has been + * submitted.

    */ inline const RegistrationVersionStatus& GetRegistrationVersionStatus() const{ return m_registrationVersionStatus; } inline void SetRegistrationVersionStatus(const RegistrationVersionStatus& value) { m_registrationVersionStatus = value; } diff --git a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/source/model/RegistrationVersionStatusHistory.cpp b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/source/model/RegistrationVersionStatusHistory.cpp index 9d5bd21bcf2..2185285693e 100644 --- a/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/source/model/RegistrationVersionStatusHistory.cpp +++ b/generated/src/aws-cpp-sdk-pinpoint-sms-voice-v2/source/model/RegistrationVersionStatusHistory.cpp @@ -22,6 +22,7 @@ RegistrationVersionStatusHistory::RegistrationVersionStatusHistory() : m_draftTimestampHasBeenSet(false), m_submittedTimestampHasBeenSet(false), m_reviewingTimestampHasBeenSet(false), + m_requiresAuthenticationTimestampHasBeenSet(false), m_approvedTimestampHasBeenSet(false), m_discardedTimestampHasBeenSet(false), m_deniedTimestampHasBeenSet(false), @@ -59,6 +60,13 @@ RegistrationVersionStatusHistory& RegistrationVersionStatusHistory::operator =(J m_reviewingTimestampHasBeenSet = true; } + if(jsonValue.ValueExists("RequiresAuthenticationTimestamp")) + { + m_requiresAuthenticationTimestamp = jsonValue.GetDouble("RequiresAuthenticationTimestamp"); + + m_requiresAuthenticationTimestampHasBeenSet = true; + } + if(jsonValue.ValueExists("ApprovedTimestamp")) { m_approvedTimestamp = jsonValue.GetDouble("ApprovedTimestamp"); @@ -116,6 +124,11 @@ JsonValue RegistrationVersionStatusHistory::Jsonize() const payload.WithDouble("ReviewingTimestamp", m_reviewingTimestamp.SecondsWithMSPrecision()); } + if(m_requiresAuthenticationTimestampHasBeenSet) + { + payload.WithDouble("RequiresAuthenticationTimestamp", m_requiresAuthenticationTimestamp.SecondsWithMSPrecision()); + } + if(m_approvedTimestampHasBeenSet) { payload.WithDouble("ApprovedTimestamp", m_approvedTimestamp.SecondsWithMSPrecision()); diff --git a/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/QBusinessClient.h b/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/QBusinessClient.h index 474731f125b..c40ded8432a 100644 --- a/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/QBusinessClient.h +++ b/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/QBusinessClient.h @@ -207,7 +207,12 @@ namespace QBusiness * Amazon Q Business Lite and what's included in Amazon Q Business Pro, see
    Amazon * Q Business tiers. You must use the Amazon Q Business console to assign - * subscription tiers to users.

    See Also:

    A Amazon Q Apps service linked role will + * be created if it's absent in the Amazon Web Services account when the + * QAppsConfiguration is enabled in the request. For more information, see + * Using service-linked roles for Q Apps

    See Also:

    + * AWS * API Reference

    */ @@ -1366,7 +1371,12 @@ namespace QBusiness } /** - *

    Updates an existing Amazon Q Business application.

    See Also:

    + *

    Updates an existing Amazon Q Business application.

    A Amazon Q + * Apps service-linked role will be created if it's absent in the Amazon Web + * Services account when the QAppsConfiguration is enabled in the request. For more + * information, see + * Using service-linked roles for Q Apps

    See Also:

    * AWS * API Reference

    diff --git a/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/model/GroupMembers.h b/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/model/GroupMembers.h index d2cabeb526b..1130c9b4ac0 100644 --- a/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/model/GroupMembers.h +++ b/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/model/GroupMembers.h @@ -6,6 +6,7 @@ #pragma once #include #include +#include #include #include #include @@ -71,6 +72,16 @@ namespace Model inline GroupMembers& AddMemberUsers(const MemberUser& value) { m_memberUsersHasBeenSet = true; m_memberUsers.push_back(value); return *this; } inline GroupMembers& AddMemberUsers(MemberUser&& value) { m_memberUsersHasBeenSet = true; m_memberUsers.push_back(std::move(value)); return *this; } ///@} + + ///@{ + + inline const S3& GetS3PathForGroupMembers() const{ return m_s3PathForGroupMembers; } + inline bool S3PathForGroupMembersHasBeenSet() const { return m_s3PathForGroupMembersHasBeenSet; } + inline void SetS3PathForGroupMembers(const S3& value) { m_s3PathForGroupMembersHasBeenSet = true; m_s3PathForGroupMembers = value; } + inline void SetS3PathForGroupMembers(S3&& value) { m_s3PathForGroupMembersHasBeenSet = true; m_s3PathForGroupMembers = std::move(value); } + inline GroupMembers& WithS3PathForGroupMembers(const S3& value) { SetS3PathForGroupMembers(value); return *this;} + inline GroupMembers& WithS3PathForGroupMembers(S3&& value) { SetS3PathForGroupMembers(std::move(value)); return *this;} + ///@} private: Aws::Vector m_memberGroups; @@ -78,6 +89,9 @@ namespace Model Aws::Vector m_memberUsers; bool m_memberUsersHasBeenSet = false; + + S3 m_s3PathForGroupMembers; + bool m_s3PathForGroupMembersHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/model/PutGroupRequest.h b/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/model/PutGroupRequest.h index 596f0218b27..d747a6626a8 100644 --- a/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/model/PutGroupRequest.h +++ b/generated/src/aws-cpp-sdk-qbusiness/include/aws/qbusiness/model/PutGroupRequest.h @@ -121,6 +121,23 @@ namespace Model inline PutGroupRequest& WithGroupMembers(const GroupMembers& value) { SetGroupMembers(value); return *this;} inline PutGroupRequest& WithGroupMembers(GroupMembers&& value) { SetGroupMembers(std::move(value)); return *this;} ///@} + + ///@{ + /** + *

    The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file + * that contains your list of users that belong to a group.The Amazon Resource Name + * (ARN) of an IAM role that has access to the S3 file that contains your list of + * users that belong to a group.

    + */ + inline const Aws::String& GetRoleArn() const{ return m_roleArn; } + inline bool RoleArnHasBeenSet() const { return m_roleArnHasBeenSet; } + inline void SetRoleArn(const Aws::String& value) { m_roleArnHasBeenSet = true; m_roleArn = value; } + inline void SetRoleArn(Aws::String&& value) { m_roleArnHasBeenSet = true; m_roleArn = std::move(value); } + inline void SetRoleArn(const char* value) { m_roleArnHasBeenSet = true; m_roleArn.assign(value); } + inline PutGroupRequest& WithRoleArn(const Aws::String& value) { SetRoleArn(value); return *this;} + inline PutGroupRequest& WithRoleArn(Aws::String&& value) { SetRoleArn(std::move(value)); return *this;} + inline PutGroupRequest& WithRoleArn(const char* value) { SetRoleArn(value); return *this;} + ///@} private: Aws::String m_applicationId; @@ -140,6 +157,9 @@ namespace Model GroupMembers m_groupMembers; bool m_groupMembersHasBeenSet = false; + + Aws::String m_roleArn; + bool m_roleArnHasBeenSet = false; }; } // namespace Model diff --git a/generated/src/aws-cpp-sdk-qbusiness/source/model/GroupMembers.cpp b/generated/src/aws-cpp-sdk-qbusiness/source/model/GroupMembers.cpp index 997d3c0725d..0883f097d64 100644 --- a/generated/src/aws-cpp-sdk-qbusiness/source/model/GroupMembers.cpp +++ b/generated/src/aws-cpp-sdk-qbusiness/source/model/GroupMembers.cpp @@ -20,7 +20,8 @@ namespace Model GroupMembers::GroupMembers() : m_memberGroupsHasBeenSet(false), - m_memberUsersHasBeenSet(false) + m_memberUsersHasBeenSet(false), + m_s3PathForGroupMembersHasBeenSet(false) { } @@ -52,6 +53,13 @@ GroupMembers& GroupMembers::operator =(JsonView jsonValue) m_memberUsersHasBeenSet = true; } + if(jsonValue.ValueExists("s3PathForGroupMembers")) + { + m_s3PathForGroupMembers = jsonValue.GetObject("s3PathForGroupMembers"); + + m_s3PathForGroupMembersHasBeenSet = true; + } + return *this; } @@ -81,6 +89,12 @@ JsonValue GroupMembers::Jsonize() const } + if(m_s3PathForGroupMembersHasBeenSet) + { + payload.WithObject("s3PathForGroupMembers", m_s3PathForGroupMembers.Jsonize()); + + } + return payload; } diff --git a/generated/src/aws-cpp-sdk-qbusiness/source/model/PutGroupRequest.cpp b/generated/src/aws-cpp-sdk-qbusiness/source/model/PutGroupRequest.cpp index c9412b431d1..1faec4f89d4 100644 --- a/generated/src/aws-cpp-sdk-qbusiness/source/model/PutGroupRequest.cpp +++ b/generated/src/aws-cpp-sdk-qbusiness/source/model/PutGroupRequest.cpp @@ -19,7 +19,8 @@ PutGroupRequest::PutGroupRequest() : m_dataSourceIdHasBeenSet(false), m_type(MembershipType::NOT_SET), m_typeHasBeenSet(false), - m_groupMembersHasBeenSet(false) + m_groupMembersHasBeenSet(false), + m_roleArnHasBeenSet(false) { } @@ -50,6 +51,12 @@ Aws::String PutGroupRequest::SerializePayload() const } + if(m_roleArnHasBeenSet) + { + payload.WithString("roleArn", m_roleArn); + + } + return payload.View().WriteReadable(); } diff --git a/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h b/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h index 658d435f040..01300515e4c 100644 --- a/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h +++ b/src/aws-cpp-sdk-core/include/aws/core/VersionConfig.h @@ -4,7 +4,7 @@ */ #pragma once -#define AWS_SDK_VERSION_STRING "1.11.442" +#define AWS_SDK_VERSION_STRING "1.11.443" #define AWS_SDK_VERSION_MAJOR 1 #define AWS_SDK_VERSION_MINOR 11 -#define AWS_SDK_VERSION_PATCH 442 +#define AWS_SDK_VERSION_PATCH 443 diff --git a/tools/code-generation/api-descriptions/batch-2016-08-10.normal.json b/tools/code-generation/api-descriptions/batch-2016-08-10.normal.json index 2946cad2ef0..bdb73b7e2f5 100644 --- a/tools/code-generation/api-descriptions/batch-2016-08-10.normal.json +++ b/tools/code-generation/api-descriptions/batch-2016-08-10.normal.json @@ -2934,11 +2934,41 @@ }, "version":{ "shape":"String", - "documentation":"

    The version number of the launch template, $Latest, or $Default.

    If the value is $Latest, the latest version of the launch template is used. If the value is $Default, the default version of the launch template is used.

    If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the updateToLatestImageVersion parameter for the compute environment is set to true. During an infrastructure update, if either $Latest or $Default is specified, Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

    Default: $Default.

    " + "documentation":"

    The version number of the launch template, $Default, or $Latest.

    If the value is $Default, the default version of the launch template is used. If the value is $Latest, the latest version of the launch template is used.

    If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the updateToLatestImageVersion parameter for the compute environment is set to true. During an infrastructure update, if either $Default or $Latest is specified, Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

    Default: $Default

    Latest: $Latest

    " + }, + "overrides":{ + "shape":"LaunchTemplateSpecificationOverrideList", + "documentation":"

    A launch template to use in place of the default launch template. You must specify either the launch template ID or launch template name in the request, but not both.

    You can specify up to ten (10) launch template overrides that are associated to unique instance types or families for each compute environment.

    To unset all override templates for a compute environment, you can pass an empty array to the UpdateComputeEnvironment.overrides parameter, or not include the overrides parameter when submitting the UpdateComputeEnvironment API operation.

    " } }, "documentation":"

    An object that represents a launch template that's associated with a compute resource. You must specify either the launch template ID or launch template name in the request, but not both.

    If security groups are specified using both the securityGroupIds parameter of CreateComputeEnvironment and the launch template, the values in the securityGroupIds parameter of CreateComputeEnvironment will be used.

    This object isn't applicable to jobs that are running on Fargate resources.

    " }, + "LaunchTemplateSpecificationOverride":{ + "type":"structure", + "members":{ + "launchTemplateId":{ + "shape":"String", + "documentation":"

    The ID of the launch template.

    Note: If you specify the launchTemplateId you can't specify the launchTemplateName as well.

    " + }, + "launchTemplateName":{ + "shape":"String", + "documentation":"

    The name of the launch template.

    Note: If you specify the launchTemplateName you can't specify the launchTemplateId as well.

    " + }, + "version":{ + "shape":"String", + "documentation":"

    The version number of the launch template, $Default, or $Latest.

    If the value is $Default, the default version of the launch template is used. If the value is $Latest, the latest version of the launch template is used.

    If the AMI ID that's used in a compute environment is from the launch template, the AMI isn't changed when the compute environment is updated. It's only changed if the updateToLatestImageVersion parameter for the compute environment is set to true. During an infrastructure update, if either $Default or $Latest is specified, Batch re-evaluates the launch template version, and it might use a different version of the launch template. This is the case even if the launch template isn't specified in the update. When updating a compute environment, changing the launch template requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

    Default: $Default

    Latest: $Latest

    " + }, + "targetInstanceTypes":{ + "shape":"StringList", + "documentation":"

    The instance type or family that this this override launch template should be applied to.

    This parameter is required when defining a launch template override.

    Information included in this parameter must meet the following requirements:

    • Must be a valid Amazon EC2 instance type or family.

    • optimal isn't allowed.

    • targetInstanceTypes can target only instance types and families that are included within the ComputeResource.instanceTypes set. targetInstanceTypes doesn't need to include all of the instances from the instanceType set, but at least a subset. For example, if ComputeResource.instanceTypes includes [m5, g5], targetInstanceTypes can include [m5.2xlarge] and [m5.large] but not [c5.large].

    • targetInstanceTypes included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.

    " + } + }, + "documentation":"

    An object that represents a launch template to use in place of the default launch template. You must specify either the launch template ID or launch template name in the request, but not both.

    If security groups are specified using both the securityGroupIds parameter of CreateComputeEnvironment and the launch template, the values in the securityGroupIds parameter of CreateComputeEnvironment will be used.

    You can define up to ten (10) overrides for each compute environment.

    This object isn't applicable to jobs that are running on Fargate resources.

    To unset all override templates for a compute environment, you can pass an empty array to the UpdateComputeEnvironment.overrides parameter, or not include the overrides parameter when submitting the UpdateComputeEnvironment API operation.

    " + }, + "LaunchTemplateSpecificationOverrideList":{ + "type":"list", + "member":{"shape":"LaunchTemplateSpecificationOverride"} + }, "LinuxParameters":{ "type":"structure", "members":{ diff --git a/tools/code-generation/api-descriptions/bedrock-agent-runtime-2023-07-26.normal.json b/tools/code-generation/api-descriptions/bedrock-agent-runtime-2023-07-26.normal.json index cb83299431b..6e3f692d33a 100644 --- a/tools/code-generation/api-descriptions/bedrock-agent-runtime-2023-07-26.normal.json +++ b/tools/code-generation/api-descriptions/bedrock-agent-runtime-2023-07-26.normal.json @@ -394,7 +394,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"^(arn:aws(-[^:]{1,10})?:bedrock:[a-z0-9-]{1,20}:([0-9]{12})?:([a-z-]{1,20}/)?)?([a-z0-9.-]{1,63}){0,2}(([:][a-z0-9-]{1,63}){0,2})?(/[a-z0-9]{1,12})?$" + "pattern":"^(arn:aws(-[^:]+)?:(bedrock|sagemaker):[a-z0-9-]{1,20}:([0-9]{12})?:([a-z-]+/)?)?([a-z0-9.-]{1,63}){0,2}(([:][a-z0-9-]{1,63}){0,2})?(/[a-z0-9]{1,12})?$" }, "Boolean":{ "type":"boolean", @@ -823,7 +823,7 @@ "documentation":"

    The reason that the flow completed.

    " } }, - "documentation":"

    Contains information about why a flow completed.

    This data type is used in the following API operations:

    ", + "documentation":"

    Contains information about why a flow completed.

    ", "event":true, "sensitive":true }, @@ -858,7 +858,7 @@ "documentation":"

    The name of the output from the flow input node that begins the prompt flow.

    " } }, - "documentation":"

    Contains information about an input into the prompt flow and where to send it.

    This data type is used in the following API operations:

    " + "documentation":"

    Contains information about an input into the prompt flow and where to send it.

    " }, "FlowInputContent":{ "type":"structure", @@ -868,7 +868,7 @@ "documentation":"

    The input to send to the prompt flow input node.

    " } }, - "documentation":"

    Contains information about an input into the flow.

    This data type is used in the following API operations:

    ", + "documentation":"

    Contains information about an input into the flow.

    ", "sensitive":true, "union":true }, @@ -886,7 +886,7 @@ "documentation":"

    The content in the output.

    " } }, - "documentation":"

    Contains information about the content in an output from prompt flow invocation.

    This data type is used in the following API operations:

    ", + "documentation":"

    Contains information about the content in an output from prompt flow invocation.

    ", "union":true }, "FlowOutputEvent":{ @@ -910,7 +910,7 @@ "documentation":"

    The type of the node that the output is from.

    " } }, - "documentation":"

    Contains information about an output from prompt flow invoction.

    This data type is used in the following API operations:

    ", + "documentation":"

    Contains information about an output from prompt flow invoction.

    ", "event":true, "sensitive":true }, @@ -941,6 +941,10 @@ "shape":"FlowOutputEvent", "documentation":"

    Contains information about an output from flow invocation.

    " }, + "flowTraceEvent":{ + "shape":"FlowTraceEvent", + "documentation":"

    Contains information about a trace, which tracks an input or output for a node in the flow.

    " + }, "internalServerException":{ "shape":"InternalServerException", "documentation":"

    An internal server error occurred. Retry your request.

    " @@ -962,9 +966,204 @@ "documentation":"

    Input validation failed. Check your request parameters and retry the request.

    " } }, - "documentation":"

    The output of the flow.

    This data type is used in the following API operations:

    ", + "documentation":"

    The output of the flow.

    ", "eventstream":true }, + "FlowTrace":{ + "type":"structure", + "members":{ + "conditionNodeResultTrace":{ + "shape":"FlowTraceConditionNodeResultEvent", + "documentation":"

    Contains information about an output from a condition node.

    " + }, + "nodeInputTrace":{ + "shape":"FlowTraceNodeInputEvent", + "documentation":"

    Contains information about the input into a node.

    " + }, + "nodeOutputTrace":{ + "shape":"FlowTraceNodeOutputEvent", + "documentation":"

    Contains information about the output from a node.

    " + } + }, + "documentation":"

    Contains information about an input or output for a node in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "sensitive":true, + "union":true + }, + "FlowTraceCondition":{ + "type":"structure", + "required":["conditionName"], + "members":{ + "conditionName":{ + "shape":"String", + "documentation":"

    The name of the condition.

    " + } + }, + "documentation":"

    Contains information about a condition that was satisfied. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "sensitive":true + }, + "FlowTraceConditionNodeResultEvent":{ + "type":"structure", + "required":[ + "nodeName", + "satisfiedConditions", + "timestamp" + ], + "members":{ + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the condition node.

    " + }, + "satisfiedConditions":{ + "shape":"FlowTraceConditions", + "documentation":"

    An array of objects containing information about the conditions that were satisfied.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The date and time that the trace was returned.

    " + } + }, + "documentation":"

    Contains information about an output from a condition node. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "sensitive":true + }, + "FlowTraceConditions":{ + "type":"list", + "member":{"shape":"FlowTraceCondition"}, + "max":5, + "min":1 + }, + "FlowTraceEvent":{ + "type":"structure", + "required":["trace"], + "members":{ + "trace":{ + "shape":"FlowTrace", + "documentation":"

    The trace object containing information about an input or output for a node in the flow.

    " + } + }, + "documentation":"

    Contains information about a trace, which tracks an input or output for a node in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "event":true + }, + "FlowTraceNodeInputContent":{ + "type":"structure", + "members":{ + "document":{ + "shape":"Document", + "documentation":"

    The content of the node input.

    " + } + }, + "documentation":"

    Contains the content of the node input. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "sensitive":true, + "union":true + }, + "FlowTraceNodeInputEvent":{ + "type":"structure", + "required":[ + "fields", + "nodeName", + "timestamp" + ], + "members":{ + "fields":{ + "shape":"FlowTraceNodeInputFields", + "documentation":"

    An array of objects containing information about each field in the input.

    " + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the node that received the input.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The date and time that the trace was returned.

    " + } + }, + "documentation":"

    Contains information about the input into a node. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "sensitive":true + }, + "FlowTraceNodeInputField":{ + "type":"structure", + "required":[ + "content", + "nodeInputName" + ], + "members":{ + "content":{ + "shape":"FlowTraceNodeInputContent", + "documentation":"

    The content of the node input.

    " + }, + "nodeInputName":{ + "shape":"NodeInputName", + "documentation":"

    The name of the node input.

    " + } + }, + "documentation":"

    Contains information about a field in the input into a node. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "sensitive":true + }, + "FlowTraceNodeInputFields":{ + "type":"list", + "member":{"shape":"FlowTraceNodeInputField"}, + "max":5, + "min":1 + }, + "FlowTraceNodeOutputContent":{ + "type":"structure", + "members":{ + "document":{ + "shape":"Document", + "documentation":"

    The content of the node output.

    " + } + }, + "documentation":"

    Contains the content of the node output. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "union":true + }, + "FlowTraceNodeOutputEvent":{ + "type":"structure", + "required":[ + "fields", + "nodeName", + "timestamp" + ], + "members":{ + "fields":{ + "shape":"FlowTraceNodeOutputFields", + "documentation":"

    An array of objects containing information about each field in the output.

    " + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the node that yielded the output.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The date and time that the trace was returned.

    " + } + }, + "documentation":"

    Contains information about the output from a node. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "sensitive":true + }, + "FlowTraceNodeOutputField":{ + "type":"structure", + "required":[ + "content", + "nodeOutputName" + ], + "members":{ + "content":{ + "shape":"FlowTraceNodeOutputContent", + "documentation":"

    The content of the node output.

    " + }, + "nodeOutputName":{ + "shape":"NodeOutputName", + "documentation":"

    The name of the node output.

    " + } + }, + "documentation":"

    Contains information about a field in the output from a node. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    ", + "sensitive":true + }, + "FlowTraceNodeOutputFields":{ + "type":"list", + "member":{"shape":"FlowTraceNodeOutputField"}, + "max":2, + "min":1 + }, "Function":{ "type":"string", "sensitive":true @@ -1068,7 +1267,7 @@ }, "promptTemplate":{ "shape":"PromptTemplate", - "documentation":"

    Contains the template for the prompt that's sent to the model for response generation.

    " + "documentation":"

    Contains the template for the prompt that's sent to the model for response generation. Generation prompts must include the $search_results$ variable. For more information, see Use placeholder variables in the user guide.

    " } }, "documentation":"

    Contains configurations for response generation based on the knowledge base query results.

    This data type is used in the following API operations:

    " @@ -1756,6 +1955,10 @@ "inputs" ], "members":{ + "enableTrace":{ + "shape":"Boolean", + "documentation":"

    Specifies whether to return the trace for the flow or not. Traces track inputs and outputs for nodes in the flow. For more information, see Track each step in your prompt flow by viewing its trace in Amazon Bedrock.

    " + }, "flowAliasIdentifier":{ "shape":"FlowAliasIdentifier", "documentation":"

    The unique identifier of the flow alias.

    ", @@ -2083,6 +2286,10 @@ "min":1, "pattern":"^\\S*$" }, + "NodeInputName":{ + "type":"string", + "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" + }, "NodeName":{ "type":"string", "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" @@ -2155,7 +2362,7 @@ }, "promptTemplate":{ "shape":"PromptTemplate", - "documentation":"

    Contains the template for the prompt that's sent to the model for response generation.

    " + "documentation":"

    Contains the template for the prompt that's sent to the model. Orchestration prompts must include the $conversation_history$ and $output_format_instructions$ variables. For more information, see Use placeholder variables in the user guide.

    " }, "queryTransformationConfiguration":{ "shape":"QueryTransformationConfiguration", diff --git a/tools/code-generation/api-descriptions/chime-sdk-media-pipelines-2021-07-15.normal.json b/tools/code-generation/api-descriptions/chime-sdk-media-pipelines-2021-07-15.normal.json index c4e1dbd7286..5dbe998c187 100644 --- a/tools/code-generation/api-descriptions/chime-sdk-media-pipelines-2021-07-15.normal.json +++ b/tools/code-generation/api-descriptions/chime-sdk-media-pipelines-2021-07-15.normal.json @@ -783,7 +783,7 @@ }, "IdentifyMultipleLanguages":{ "shape":"Boolean", - "documentation":"

    Turns language identification on or off for multiple languages.

    " + "documentation":"

    Turns language identification on or off for multiple languages.

    Calls to this API must include a LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages parameter. If you include more than one of those parameters, your transcription job fails.

    " }, "LanguageOptions":{ "shape":"LanguageOptions", @@ -1288,6 +1288,14 @@ "shape":"ChimeSdkMeetingConfiguration", "documentation":"

    The configuration for a specified media pipeline. SourceType must be ChimeSdkMeeting.

    " }, + "SseAwsKeyManagementParams":{ + "shape":"SseAwsKeyManagementParams", + "documentation":"

    An object that contains server side encryption parameters to be used by media capture pipeline. The parameters can also be used by media concatenation pipeline taking media capture pipeline as a media source.

    " + }, + "SinkIamRoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the sink role to be used with AwsKmsKeyId in SseAwsKeyManagementParams. Can only interact with S3Bucket sink type. The role must belong to the caller’s account and be able to act on behalf of the caller during the API call. All minimum policy permissions requirements for the caller to perform sink-related actions are the same for SinkIamRoleArn.

    Additionally, the role must have permission to kms:GenerateDataKey using KMS key supplied as AwsKmsKeyId in SseAwsKeyManagementParams. If media concatenation will be required later, the role must also have permission to kms:Decrypt for the same KMS key.

    " + }, "Tags":{ "shape":"TagList", "documentation":"

    The tag key-value pairs.

    " @@ -2416,6 +2424,14 @@ "ChimeSdkMeetingConfiguration":{ "shape":"ChimeSdkMeetingConfiguration", "documentation":"

    The configuration for a specified media pipeline. SourceType must be ChimeSdkMeeting.

    " + }, + "SseAwsKeyManagementParams":{ + "shape":"SseAwsKeyManagementParams", + "documentation":"

    An object that contains server side encryption parameters to be used by media capture pipeline. The parameters can also be used by media concatenation pipeline taking media capture pipeline as a media source.

    " + }, + "SinkIamRoleArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the sink role to be used with AwsKmsKeyId in SseAwsKeyManagementParams.

    " } }, "documentation":"

    A media pipeline object consisting of an ID, source type, source ARN, a sink type, a sink ARN, and a configuration object.

    " @@ -3326,6 +3342,21 @@ }, "documentation":"

    The configuration settings for the SQS sink.

    " }, + "SseAwsKeyManagementParams":{ + "type":"structure", + "required":["AwsKmsKeyId"], + "members":{ + "AwsKmsKeyId":{ + "shape":"String", + "documentation":"

    The KMS key you want to use to encrypt your media pipeline output. Decryption is required for concatenation pipeline. If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of four ways:

    • Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    • Use an alias for the KMS key ID. For example, alias/ExampleAlias.

    • Use the Amazon Resource Name (ARN) for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    • Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias.

    If using a key located in a different Amazon Web Services account than the current Amazon Web Services account, you can specify your KMS key in one of two ways:

    • Use the ARN for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    • Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias.

    If you don't specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).

    Note that the role specified in the SinkIamRoleArn request parameter must have permission to use the specified KMS key.

    " + }, + "AwsKmsEncryptionContext":{ + "shape":"String", + "documentation":"

    Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as non-secret key-value pair known as encryption context pairs, that provides an added layer of security for your data. For more information, see KMS encryption context and Asymmetric keys in KMS in the Key Management Service Developer Guide.

    " + } + }, + "documentation":"

    Contains server side encryption parameters to be used by media capture pipeline. The parameters can also be used by media concatenation pipeline taking media capture pipeline as a media source.

    " + }, "StartSpeakerSearchTaskRequest":{ "type":"structure", "required":[ diff --git a/tools/code-generation/api-descriptions/controlcatalog-2018-05-10.normal.json b/tools/code-generation/api-descriptions/controlcatalog-2018-05-10.normal.json index 5dc6c918de2..139fd5ad558 100644 --- a/tools/code-generation/api-descriptions/controlcatalog-2018-05-10.normal.json +++ b/tools/code-generation/api-descriptions/controlcatalog-2018-05-10.normal.json @@ -218,6 +218,21 @@ "DETECTIVE" ] }, + "ControlParameter":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

    The parameter name. This name is the parameter key when you call EnableControl or UpdateEnabledControl .

    " + } + }, + "documentation":"

    Four types of control parameters are supported.

    • AllowedRegions: List of Amazon Web Services Regions exempted from the control. Each string is expected to be an Amazon Web Services Region code. This parameter is mandatory for the OU Region deny control, CT.MULTISERVICE.PV.1.

      Example: [\"us-east-1\",\"us-west-2\"]

    • ExemptedActions: List of Amazon Web Services IAM actions exempted from the control. Each string is expected to be an IAM action.

      Example: [\"logs:DescribeLogGroups\",\"logs:StartQuery\",\"logs:GetQueryResults\"]

    • ExemptedPrincipalArns: List of Amazon Web Services IAM principal ARNs exempted from the control. Each string is expected to be an IAM principal that follows the pattern ^arn:(aws|aws-us-gov):(iam|sts)::.+:.+$

      Example: [\"arn:aws:iam::*:role/ReadOnly\",\"arn:aws:sts::*:assumed-role/ReadOnly/*\"]

    • ExemptedResourceArns: List of resource ARNs exempted from the control. Each string is expected to be a resource ARN.

      Example: [\"arn:aws:s3:::my-bucket-name\"]

    " + }, + "ControlParameters":{ + "type":"list", + "member":{"shape":"ControlParameter"} + }, "ControlScope":{ "type":"string", "enum":[ @@ -347,11 +362,36 @@ }, "Behavior":{ "shape":"ControlBehavior", - "documentation":"

    A term that identifies the control's functional behavior. One of Preventive, Deteictive, Proactive

    " + "documentation":"

    A term that identifies the control's functional behavior. One of Preventive, Detective, Proactive

    " + }, + "RegionConfiguration":{"shape":"RegionConfiguration"}, + "Implementation":{ + "shape":"ImplementationDetails", + "documentation":"

    Returns information about the control, as an ImplementationDetails object that shows the underlying implementation type for a control.

    " }, - "RegionConfiguration":{"shape":"RegionConfiguration"} + "Parameters":{ + "shape":"ControlParameters", + "documentation":"

    Returns an array of ControlParameter objects that specify the parameters a control supports. An empty list is returned for controls that don’t support parameters.

    " + } } }, + "ImplementationDetails":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"ImplementationType", + "documentation":"

    A string that describes a control's implementation type.

    " + } + }, + "documentation":"

    An object that describes the implementation type for a control.

    Our ImplementationDetails Type format has three required segments:

    • SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME

    For example, AWS::Config::ConfigRule or AWS::SecurityHub::SecurityControl resources have the format with three required segments.

    Our ImplementationDetails Type format has an optional fourth segment, which is present for applicable implementation types. The format is as follows:

    • SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME::RESOURCE-TYPE-DESCRIPTION

    For example, AWS::Organizations::Policy::SERVICE_CONTROL_POLICY or AWS::CloudFormation::Type::HOOK have the format with four segments.

    Although the format is similar, the values for the Type field do not match any Amazon Web Services CloudFormation values, and we do not use CloudFormation to implement these controls.

    " + }, + "ImplementationType":{ + "type":"string", + "max":2048, + "min":7, + "pattern":"[A-Za-z0-9]+(::[A-Za-z0-9_]+){2,3}" + }, "InternalServerException":{ "type":"structure", "members":{ @@ -613,7 +653,7 @@ "documentation":"

    Regions in which the control is available to be deployed.

    " } }, - "documentation":"

    Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control currently is available for deployment.

    If you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A,B,and C while the control is available in Regions A, B, C, and D, you'd see a response with DeployableRegions of A, B, C, and D for a control with REGIONAL scope, even though you may not intend to deploy the control in Region D, because you do not govern it through your landing zone.

    " + "documentation":"

    Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control currently is available for deployment. For more information about scope, see Global services.

    If you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A,B,and C while the control is available in Regions A, B, C, and D, you'd see a response with DeployableRegions of A, B, C, and D for a control with REGIONAL scope, even though you may not intend to deploy the control in Region D, because you do not govern it through your landing zone.

    " }, "ResourceNotFoundException":{ "type":"structure", diff --git a/tools/code-generation/api-descriptions/eks-2017-11-01.normal.json b/tools/code-generation/api-descriptions/eks-2017-11-01.normal.json index 6fa05b7709d..c5b551fc8e3 100644 --- a/tools/code-generation/api-descriptions/eks-2017-11-01.normal.json +++ b/tools/code-generation/api-descriptions/eks-2017-11-01.normal.json @@ -3557,7 +3557,7 @@ "members":{ "code":{ "shape":"NodegroupIssueCode", - "documentation":"

    A brief description of the error.

    • AccessDenied: Amazon EKS or one or more of your managed nodes is failing to authenticate or authorize with your Kubernetes cluster API server.

    • AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch instances.

    • AutoScalingGroupNotFound: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover.

    • ClusterUnreachable: Amazon EKS or one or more of your managed nodes is unable to to communicate with your Kubernetes cluster API server. This can happen if there are network disruptions or if API servers are timing out processing requests.

    • Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover.

    • Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the version that Amazon EKS created to recover.

    • Ec2SecurityGroupDeletionFailure: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group.

    • Ec2SecurityGroupNotFound: We couldn't find the cluster security group for the cluster. You must recreate your cluster.

    • Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified for a node group do not automatically assign public IP addresses to instances launched into it. If you want your instances to be assigned a public IP address, then you need to enable the auto-assign public IP address setting for the subnet. See Modifying the public IPv4 addressing attribute for your subnet in the Amazon VPC User Guide.

    • IamInstanceProfileNotFound: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover.

    • IamNodeRoleNotFound: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover.

    • InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover.

    • InsufficientFreeAddresses: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes.

    • InternalFailure: These errors are usually caused by an Amazon EKS server-side issue.

    • NodeCreationFailure: Your launched instances are unable to register with your Amazon EKS cluster. Common causes of this failure are insufficient node IAM role permissions or lack of outbound internet access for the nodes.

    " + "documentation":"

    A brief description of the error.

    • AccessDenied: Amazon EKS or one or more of your managed nodes is failing to authenticate or authorize with your Kubernetes cluster API server.

    • AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch instances.

    • AutoScalingGroupNotFound: We couldn't find the Auto Scaling group associated with the managed node group. You may be able to recreate an Auto Scaling group with the same settings to recover.

    • ClusterUnreachable: Amazon EKS or one or more of your managed nodes is unable to to communicate with your Kubernetes cluster API server. This can happen if there are network disruptions or if API servers are timing out processing requests.

    • Ec2InstanceTypeDoesNotExist: One or more of the supplied Amazon EC2 instance types do not exist. Amazon EKS checked for the instance types that you provided in this Amazon Web Services Region, and one or more aren't available.

    • Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template for your managed node group. You may be able to recreate a launch template with the same settings to recover.

    • Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not match the version that Amazon EKS created. You may be able to revert to the version that Amazon EKS created to recover.

    • Ec2SecurityGroupDeletionFailure: We could not delete the remote access security group for your managed node group. Remove any dependencies from the security group.

    • Ec2SecurityGroupNotFound: We couldn't find the cluster security group for the cluster. You must recreate your cluster.

    • Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified for a node group do not automatically assign public IP addresses to instances launched into it. If you want your instances to be assigned a public IP address, then you need to enable the auto-assign public IP address setting for the subnet. See Modifying the public IPv4 addressing attribute for your subnet in the Amazon VPC User Guide.

    • IamInstanceProfileNotFound: We couldn't find the IAM instance profile for your managed node group. You may be able to recreate an instance profile with the same settings to recover.

    • IamNodeRoleNotFound: We couldn't find the IAM role for your managed node group. You may be able to recreate an IAM role with the same settings to recover.

    • InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance type. You may be able to request an Amazon EC2 instance limit increase to recover.

    • InsufficientFreeAddresses: One or more of the subnets associated with your managed node group does not have enough available IP addresses for new nodes.

    • InternalFailure: These errors are usually caused by an Amazon EKS server-side issue.

    • NodeCreationFailure: Your launched instances are unable to register with your Amazon EKS cluster. Common causes of this failure are insufficient node IAM role permissions or lack of outbound internet access for the nodes.

    " }, "message":{ "shape":"String", @@ -4394,7 +4394,8 @@ "Unknown", "AutoScalingGroupInstanceRefreshActive", "KubernetesLabelInvalid", - "Ec2LaunchTemplateVersionMaxLimitExceeded" + "Ec2LaunchTemplateVersionMaxLimitExceeded", + "Ec2InstanceTypeDoesNotExist" ] }, "NodegroupResources":{ diff --git a/tools/code-generation/api-descriptions/firehose-2015-08-04.normal.json b/tools/code-generation/api-descriptions/firehose-2015-08-04.normal.json index 9c5be929ebe..cca217dd08a 100644 --- a/tools/code-generation/api-descriptions/firehose-2015-08-04.normal.json +++ b/tools/code-generation/api-descriptions/firehose-2015-08-04.normal.json @@ -29,7 +29,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

    Creates a Firehose delivery stream.

    By default, you can create up to 50 delivery streams per Amazon Web Services Region.

    This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

    If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

    To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

    A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

    When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

    A few notes about Amazon Redshift as a destination:

    • An Amazon Redshift destination requires an S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter.

    • The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    • We strongly recommend that you use the user name and password you provide exclusively with Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.

    Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.

    " + "documentation":"

    Creates a Firehose stream.

    By default, you can create up to 50 Firehose streams per Amazon Web Services Region.

    This is an asynchronous operation that immediately returns. The initial status of the Firehose stream is CREATING. After the Firehose stream is created, its status is ACTIVE and it now accepts data. If the Firehose stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a Firehose stream, use DescribeDeliveryStream.

    If the status of a Firehose stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    A Firehose stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

    To create a Firehose stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing Firehose stream that doesn't have SSE enabled.

    A Firehose stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

    When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

    A few notes about Amazon Redshift as a destination:

    • An Amazon Redshift destination requires an S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter.

    • The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats.

    • We strongly recommend that you use the user name and password you provide exclusively with Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.

    Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.

    " }, "DeleteDeliveryStream":{ "name":"DeleteDeliveryStream", @@ -43,7 +43,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Deletes a delivery stream and its data.

    You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. To check the state of a delivery stream, use DescribeDeliveryStream.

    DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING state.While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

    Removal of a delivery stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state to be removed.

    " + "documentation":"

    Deletes a Firehose stream and its data.

    You can delete a Firehose stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a Firehose stream that is in the CREATING state. To check the state of a Firehose stream, use DescribeDeliveryStream.

    DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the Firehose stream is marked for deletion, and it goes into the DELETING state.While the Firehose stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a Firehose stream.

    Removal of a Firehose stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state to be removed.

    " }, "DescribeDeliveryStream":{ "name":"DescribeDeliveryStream", @@ -56,7 +56,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.

    If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

    " + "documentation":"

    Describes the specified Firehose stream and its status. For example, after your Firehose stream is created, call DescribeDeliveryStream to see whether the Firehose stream is ACTIVE and therefore ready for data to be sent to it.

    If the status of a Firehose stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

    " }, "ListDeliveryStreams":{ "name":"ListDeliveryStreams", @@ -66,7 +66,7 @@ }, "input":{"shape":"ListDeliveryStreamsInput"}, "output":{"shape":"ListDeliveryStreamsOutput"}, - "documentation":"

    Lists your delivery streams in alphabetical order of their names.

    The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.

    " + "documentation":"

    Lists your Firehose streams in alphabetical order of their names.

    The number of Firehose streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of Firehose streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more Firehose streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last Firehose stream returned in the last call.

    " }, "ListTagsForDeliveryStream":{ "name":"ListTagsForDeliveryStream", @@ -81,7 +81,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.

    " + "documentation":"

    Lists the tags for the specified Firehose stream. This operation has a limit of five transactions per second per account.

    " }, "PutRecord":{ "name":"PutRecord", @@ -98,7 +98,7 @@ {"shape":"InvalidSourceException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

    By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

    Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

    The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

    If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

    Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

    " + "documentation":"

    Writes a single data record into an Firehose stream. To write multiple data records into a Firehose stream, use PutRecordBatch. Applications using these operations are referred to as producers.

    By default, each Firehose stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each Firehose stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    You must specify the name of the Firehose stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

    For multi record de-aggregation, you can not put more than 500 records even if the data blob length is less than 1000 KiB. If you include more than 500 records, the request succeeds but the record de-aggregation doesn't work as expected and transformation lambda is invoked with the complete base64 encoded data blob instead of de-aggregated base64 decoded records.

    Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

    The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

    If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the Firehose stream.

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

    Data records sent to Firehose are stored for 24 hours from the time they are added to a Firehose stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

    " }, "PutRecordBatch":{ "name":"PutRecordBatch", @@ -115,7 +115,7 @@ {"shape":"InvalidSourceException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    For information about service quota, see Amazon Firehose Quota.

    Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

    Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

    The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

    A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

    If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

    If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

    Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

    " + "documentation":"

    Writes multiple data records into a Firehose stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a Firehose stream, use PutRecord. Applications using these operations are referred to as producers.

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    For information about service quota, see Amazon Firehose Quota.

    Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

    You must specify the name of the Firehose stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

    For multi record de-aggregation, you can not put more than 500 records even if the data blob length is less than 1000 KiB. If you include more than 500 records, the request succeeds but the record de-aggregation doesn't work as expected and transformation lambda is invoked with the complete base64 encoded data blob instead of de-aggregated base64 decoded records.

    Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

    The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

    A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

    If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

    If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the Firehose stream.

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

    Data records sent to Firehose are stored for 24 hours from the time they are added to a Firehose stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

    Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

    " }, "StartDeliveryStreamEncryption":{ "name":"StartDeliveryStreamEncryption", @@ -132,7 +132,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

    Enables server-side encryption (SSE) for the delivery stream.

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

    To check the encryption status of a delivery stream, use DescribeDeliveryStream.

    Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

    For the KMS grant creation to be successful, the Firehose API operations StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

    If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

    If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.

    You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

    The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

    " + "documentation":"

    Enables server-side encryption (SSE) for the Firehose stream.

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a Firehose stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your Firehose stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the Firehose stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

    To check the encryption status of a Firehose stream, use DescribeDeliveryStream.

    Even if encryption is currently enabled for a Firehose stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

    For the KMS grant creation to be successful, the Firehose API operations StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

    If a Firehose stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

    If the encryption status of your Firehose stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.

    You can enable SSE for a Firehose stream only if it's a Firehose stream that uses DirectPut as its source.

    The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per Firehose stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same Firehose stream in a 24-hour period.

    " }, "StopDeliveryStreamEncryption":{ "name":"StopDeliveryStreamEncryption", @@ -148,7 +148,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Disables server-side encryption (SSE) for the delivery stream.

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

    To check the encryption state of a delivery stream, use DescribeDeliveryStream.

    If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

    The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

    " + "documentation":"

    Disables server-side encryption (SSE) for the Firehose stream.

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the Firehose stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

    To check the encryption state of a Firehose stream, use DescribeDeliveryStream.

    If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

    The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per Firehose stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same Firehose stream in a 24-hour period.

    " }, "TagDeliveryStream":{ "name":"TagDeliveryStream", @@ -164,7 +164,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

    Each delivery stream can have up to 50 tags.

    This operation has a limit of five transactions per second per account.

    " + "documentation":"

    Adds or updates tags for the specified Firehose stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

    Each Firehose stream can have up to 50 tags.

    This operation has a limit of five transactions per second per account.

    " }, "UntagDeliveryStream":{ "name":"UntagDeliveryStream", @@ -180,7 +180,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.

    If you specify a tag that doesn't exist, the operation ignores it.

    This operation has a limit of five transactions per second per account.

    " + "documentation":"

    Removes tags from the specified Firehose stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.

    If you specify a tag that doesn't exist, the operation ignores it.

    This operation has a limit of five transactions per second per account.

    " }, "UpdateDestination":{ "name":"UpdateDestination", @@ -196,7 +196,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

    Updates the specified destination of the specified delivery stream.

    Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

    Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

    If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

    If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

    Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

    " + "documentation":"

    Updates the specified destination of the specified Firehose stream.

    Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target Firehose stream remains active while the configurations are updated, so data writes to the Firehose stream can continue during this process. The updated configurations are usually effective within a few minutes.

    Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

    If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

    If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

    Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

    " } }, "shapes":{ @@ -204,7 +204,7 @@ "type":"string", "max":512, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:kms:[a-zA-Z0-9\\-]+:\\d{12}:(key|alias)/[a-zA-Z_0-9+=,.@\\-_/]+" }, "AmazonOpenSearchServerlessBufferingHints":{ "type":"structure", @@ -215,7 +215,7 @@ }, "SizeInMBs":{ "shape":"AmazonOpenSearchServerlessBufferingSizeInMBs", - "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " + "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " } }, "documentation":"

    Describes the buffering to perform before delivering data to the Serverless offering for Amazon OpenSearch Service destination.

    " @@ -375,7 +375,7 @@ }, "SizeInMBs":{ "shape":"AmazonopensearchserviceBufferingSizeInMBs", - "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " + "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " } }, "documentation":"

    Describes the buffering to perform before delivering data to the Amazon OpenSearch Service destination.

    " @@ -522,7 +522,7 @@ }, "TypeName":{ "shape":"AmazonopensearchserviceTypeName", - "documentation":"

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

    " + "documentation":"

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your Firehose stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your Firehose stream with a new index name, provide an empty string for TypeName.

    " }, "IndexRotationPeriod":{ "shape":"AmazonopensearchserviceIndexRotationPeriod", @@ -550,7 +550,7 @@ "type":"string", "max":512, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:es:[a-zA-Z0-9\\-]+:\\d{12}:domain/[a-z][-0-9a-z]{2,27}" }, "AmazonopensearchserviceIndexName":{ "type":"string", @@ -623,14 +623,14 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:s3:::[\\w\\.\\-]{1,255}" }, "BufferingHints":{ "type":"structure", "members":{ "SizeInMBs":{ "shape":"SizeInMBs", - "documentation":"

    Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds, and vice versa.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.

    " + "documentation":"

    Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds, and vice versa.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.

    " }, "IntervalInSeconds":{ "shape":"IntervalInSeconds", @@ -644,10 +644,14 @@ "members":{ "CatalogARN":{ "shape":"GlueDataCatalogARN", - "documentation":"

    Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Specifies the Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog.

    " + }, + "WarehouseLocation":{ + "shape":"WarehouseLocation", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " } }, - "documentation":"

    Describes the containers where the destination Apache Iceberg Tables are persisted.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes the containers where the destination Apache Iceberg Tables are persisted.

    " }, "CloudWatchLoggingOptions":{ "type":"structure", @@ -665,13 +669,13 @@ "documentation":"

    The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.

    " } }, - "documentation":"

    Describes the Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    " }, "ClusterJDBCURL":{ "type":"string", "max":512, "min":1, - "pattern":"jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?The name of the delivery stream. This name must be unique per Amazon Web Services account in the same Amazon Web Services Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.

    " + "documentation":"

    The name of the Firehose stream. This name must be unique per Amazon Web Services account in the same Amazon Web Services Region. If the Firehose streams are in different accounts or different Regions, you can have multiple Firehose streams with the same name.

    " }, "DeliveryStreamType":{ "shape":"DeliveryStreamType", - "documentation":"

    The delivery stream type. This parameter can be one of the following values:

    • DirectPut: Provider applications access the delivery stream directly.

    • KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source.

    " + "documentation":"

    The Firehose stream type. This parameter can be one of the following values:

    • DirectPut: Provider applications access the Firehose stream directly.

    • KinesisStreamAsSource: The Firehose stream uses a Kinesis data stream as a source.

    " }, "KinesisStreamSourceConfiguration":{ "shape":"KinesisStreamSourceConfiguration", - "documentation":"

    When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.

    " + "documentation":"

    When a Kinesis data stream is used as the source for the Firehose stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.

    " }, "DeliveryStreamEncryptionConfigurationInput":{ "shape":"DeliveryStreamEncryptionConfigurationInput", @@ -789,7 +793,7 @@ }, "Tags":{ "shape":"TagDeliveryStreamInputTagList", - "documentation":"

    A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

    You can specify up to 50 tags when creating a delivery stream.

    If you specify tags in the CreateDeliveryStream action, Amazon Data Firehose performs an additional authorization on the firehose:TagDeliveryStream action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an AccessDeniedException such as following.

    AccessDeniedException

    User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.

    For an example IAM policy, see Tag example.

    " + "documentation":"

    A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

    You can specify up to 50 tags when creating a Firehose stream.

    If you specify tags in the CreateDeliveryStream action, Amazon Data Firehose performs an additional authorization on the firehose:TagDeliveryStream action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an AccessDeniedException such as following.

    AccessDeniedException

    User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.

    For an example IAM policy, see Tag example.

    " }, "AmazonOpenSearchServerlessDestinationConfiguration":{ "shape":"AmazonOpenSearchServerlessDestinationConfiguration", @@ -802,7 +806,11 @@ }, "IcebergDestinationConfiguration":{ "shape":"IcebergDestinationConfiguration", - "documentation":"

    Configure Apache Iceberg Tables destination.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Configure Apache Iceberg Tables destination.

    " + }, + "DatabaseSourceConfiguration":{ + "shape":"DatabaseSourceConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " } } }, @@ -811,14 +819,15 @@ "members":{ "DeliveryStreamARN":{ "shape":"DeliveryStreamARN", - "documentation":"

    The ARN of the delivery stream.

    " + "documentation":"

    The ARN of the Firehose stream.

    " } } }, "CustomTimeZone":{ "type":"string", "max":50, - "min":0 + "min":0, + "pattern":"^$|[a-zA-Z/_]+" }, "Data":{ "type":"blob", @@ -849,7 +858,7 @@ }, "DataTableColumns":{ "type":"string", - "max":204800, + "max":10240, "min":0, "pattern":".*" }, @@ -859,6 +868,271 @@ "min":1, "pattern":".*" }, + "DatabaseColumnIncludeOrExcludeList":{ + "type":"list", + "member":{"shape":"DatabaseColumnName"} + }, + "DatabaseColumnList":{ + "type":"structure", + "members":{ + "Include":{ + "shape":"DatabaseColumnIncludeOrExcludeList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Exclude":{ + "shape":"DatabaseColumnIncludeOrExcludeList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseColumnName":{ + "type":"string", + "max":194, + "min":1, + "pattern":"[\\u0001-\\uFFFF]*" + }, + "DatabaseEndpoint":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(?!\\s*$).+" + }, + "DatabaseIncludeOrExcludeList":{ + "type":"list", + "member":{"shape":"DatabaseName"} + }, + "DatabaseList":{ + "type":"structure", + "members":{ + "Include":{ + "shape":"DatabaseIncludeOrExcludeList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Exclude":{ + "shape":"DatabaseIncludeOrExcludeList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\u0001-\\uFFFF]*" + }, + "DatabasePort":{ + "type":"integer", + "max":65535, + "min":0 + }, + "DatabaseSnapshotInfo":{ + "type":"structure", + "required":[ + "Id", + "Table", + "RequestTimestamp", + "RequestedBy", + "Status" + ], + "members":{ + "Id":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Table":{ + "shape":"DatabaseTableName", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "RequestTimestamp":{ + "shape":"Timestamp", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "RequestedBy":{ + "shape":"SnapshotRequestedBy", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Status":{ + "shape":"SnapshotStatus", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "FailureDescription":{"shape":"FailureDescription"} + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSnapshotInfoList":{ + "type":"list", + "member":{"shape":"DatabaseSnapshotInfo"} + }, + "DatabaseSourceAuthenticationConfiguration":{ + "type":"structure", + "required":["SecretsManagerConfiguration"], + "members":{ + "SecretsManagerConfiguration":{"shape":"SecretsManagerConfiguration"} + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSourceConfiguration":{ + "type":"structure", + "required":[ + "Type", + "Endpoint", + "Port", + "Databases", + "Tables", + "SnapshotWatermarkTable", + "DatabaseSourceAuthenticationConfiguration", + "DatabaseSourceVPCConfiguration" + ], + "members":{ + "Type":{ + "shape":"DatabaseType", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Endpoint":{ + "shape":"DatabaseEndpoint", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Port":{ + "shape":"DatabasePort", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "SSLMode":{ + "shape":"SSLMode", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Databases":{ + "shape":"DatabaseList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Tables":{ + "shape":"DatabaseTableList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Columns":{ + "shape":"DatabaseColumnList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "SurrogateKeys":{ + "shape":"DatabaseSurrogateKeyList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "SnapshotWatermarkTable":{ + "shape":"DatabaseTableName", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSourceAuthenticationConfiguration":{ + "shape":"DatabaseSourceAuthenticationConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSourceVPCConfiguration":{ + "shape":"DatabaseSourceVPCConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSourceDescription":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"DatabaseType", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Endpoint":{ + "shape":"DatabaseEndpoint", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Port":{ + "shape":"DatabasePort", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "SSLMode":{ + "shape":"SSLMode", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Databases":{ + "shape":"DatabaseList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Tables":{ + "shape":"DatabaseTableList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Columns":{ + "shape":"DatabaseColumnList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "SurrogateKeys":{ + "shape":"DatabaseColumnIncludeOrExcludeList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "SnapshotWatermarkTable":{ + "shape":"DatabaseTableName", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "SnapshotInfo":{ + "shape":"DatabaseSnapshotInfoList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSourceAuthenticationConfiguration":{ + "shape":"DatabaseSourceAuthenticationConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSourceVPCConfiguration":{ + "shape":"DatabaseSourceVPCConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSourceVPCConfiguration":{ + "type":"structure", + "required":["VpcEndpointServiceName"], + "members":{ + "VpcEndpointServiceName":{ + "shape":"VpcEndpointServiceName", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseSurrogateKeyList":{ + "type":"list", + "member":{"shape":"NonEmptyStringWithoutWhitespace"} + }, + "DatabaseTableIncludeOrExcludeList":{ + "type":"list", + "member":{"shape":"DatabaseTableName"} + }, + "DatabaseTableList":{ + "type":"structure", + "members":{ + "Include":{ + "shape":"DatabaseTableIncludeOrExcludeList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "Exclude":{ + "shape":"DatabaseTableIncludeOrExcludeList", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "DatabaseTableName":{ + "type":"string", + "max":129, + "min":1, + "pattern":"[\\u0001-\\uFFFF]*" + }, + "DatabaseType":{ + "type":"string", + "enum":[ + "MySQL", + "PostgreSQL" + ] + }, "DefaultDocumentIdFormat":{ "type":"string", "enum":[ @@ -872,11 +1146,11 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream.

    " + "documentation":"

    The name of the Firehose stream.

    " }, "AllowForceDelete":{ "shape":"BooleanObject", - "documentation":"

    Set this to true if you want to delete the delivery stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation.

    The default value is false.

    " + "documentation":"

    Set this to true if you want to delete the Firehose stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation.

    The default value is false.

    " } } }, @@ -890,7 +1164,7 @@ "type":"string", "max":512, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:firehose:[a-zA-Z0-9\\-]+:\\d{12}:deliverystream/[a-zA-Z0-9._-]+" }, "DeliveryStreamDescription":{ "type":"structure", @@ -906,15 +1180,15 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream.

    " + "documentation":"

    The name of the Firehose stream.

    " }, "DeliveryStreamARN":{ "shape":"DeliveryStreamARN", - "documentation":"

    The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Firehose stream. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    " }, "DeliveryStreamStatus":{ "shape":"DeliveryStreamStatus", - "documentation":"

    The status of the delivery stream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    " + "documentation":"

    The status of the Firehose stream. If the status of a Firehose stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    " }, "FailureDescription":{ "shape":"FailureDescription", @@ -922,23 +1196,23 @@ }, "DeliveryStreamEncryptionConfiguration":{ "shape":"DeliveryStreamEncryptionConfiguration", - "documentation":"

    Indicates the server-side encryption (SSE) status for the delivery stream.

    " + "documentation":"

    Indicates the server-side encryption (SSE) status for the Firehose stream.

    " }, "DeliveryStreamType":{ "shape":"DeliveryStreamType", - "documentation":"

    The delivery stream type. This can be one of the following values:

    • DirectPut: Provider applications access the delivery stream directly.

    • KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source.

    " + "documentation":"

    The Firehose stream type. This can be one of the following values:

    • DirectPut: Provider applications access the Firehose stream directly.

    • KinesisStreamAsSource: The Firehose stream uses a Kinesis data stream as a source.

    " }, "VersionId":{ "shape":"DeliveryStreamVersionId", - "documentation":"

    Each time the destination is updated for a delivery stream, the version ID is changed, and the current version ID is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.

    " + "documentation":"

    Each time the destination is updated for a Firehose stream, the version ID is changed, and the current version ID is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.

    " }, "CreateTimestamp":{ "shape":"Timestamp", - "documentation":"

    The date and time that the delivery stream was created.

    " + "documentation":"

    The date and time that the Firehose stream was created.

    " }, "LastUpdateTimestamp":{ "shape":"Timestamp", - "documentation":"

    The date and time that the delivery stream was last updated.

    " + "documentation":"

    The date and time that the Firehose stream was last updated.

    " }, "Source":{ "shape":"SourceDescription", @@ -953,7 +1227,7 @@ "documentation":"

    Indicates whether there are more destinations available to list.

    " } }, - "documentation":"

    Contains information about a delivery stream.

    " + "documentation":"

    Contains information about a Firehose stream.

    " }, "DeliveryStreamEncryptionConfiguration":{ "type":"structure", @@ -968,7 +1242,7 @@ }, "Status":{ "shape":"DeliveryStreamEncryptionStatus", - "documentation":"

    This is the server-side encryption (SSE) status for the delivery stream. For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED or DISABLING_FAILED, it is the status of the most recent attempt to enable or disable SSE, respectively.

    " + "documentation":"

    This is the server-side encryption (SSE) status for the Firehose stream. For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED or DISABLING_FAILED, it is the status of the most recent attempt to enable or disable SSE, respectively.

    " }, "FailureDescription":{ "shape":"FailureDescription", @@ -987,7 +1261,7 @@ }, "KeyType":{ "shape":"KeyType", - "documentation":"

    Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Firehose service to use the customer managed CMK to perform encryption and decryption. Firehose manages that grant.

    When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Firehose schedules the grant it had on the old CMK for retirement.

    You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Firehose throws a LimitExceededException.

    To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

    " + "documentation":"

    Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Firehose service to use the customer managed CMK to perform encryption and decryption. Firehose manages that grant.

    When you invoke StartDeliveryStreamEncryption to change the CMK for a Firehose stream that is encrypted with a customer managed CMK, Firehose schedules the grant it had on the old CMK for retirement.

    You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 Firehose streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Firehose throws a LimitExceededException.

    To encrypt your Firehose stream, use symmetric CMKs. Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

    " } }, "documentation":"

    Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).

    " @@ -1006,6 +1280,8 @@ "DeliveryStreamFailureType":{ "type":"string", "enum":[ + "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND", + "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED", "RETIRE_KMS_GRANT_FAILED", "CREATE_KMS_GRANT_FAILED", "KMS_ACCESS_DENIED", @@ -1048,7 +1324,8 @@ "enum":[ "DirectPut", "KinesisStreamAsSource", - "MSKAsSource" + "MSKAsSource", + "DatabaseAsSource" ] }, "DeliveryStreamVersionId":{ @@ -1063,15 +1340,15 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream.

    " + "documentation":"

    The name of the Firehose stream.

    " }, "Limit":{ "shape":"DescribeDeliveryStreamInputLimit", - "documentation":"

    The limit on the number of destinations to return. You can have one destination per delivery stream.

    " + "documentation":"

    The limit on the number of destinations to return. You can have one destination per Firehose stream.

    " }, "ExclusiveStartDestinationId":{ "shape":"DestinationId", - "documentation":"

    The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream.

    " + "documentation":"

    The ID of the destination to start returning the destination information. Firehose supports one destination per Firehose stream.

    " } } }, @@ -1086,7 +1363,7 @@ "members":{ "DeliveryStreamDescription":{ "shape":"DeliveryStreamDescription", - "documentation":"

    Information about the delivery stream.

    " + "documentation":"

    Information about the Firehose stream.

    " } } }, @@ -1150,10 +1427,10 @@ }, "IcebergDestinationDescription":{ "shape":"IcebergDestinationDescription", - "documentation":"

    Describes a destination in Apache Iceberg Tables.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes a destination in Apache Iceberg Tables.

    " } }, - "documentation":"

    Describes the destination for a delivery stream.

    " + "documentation":"

    Describes the destination for a Firehose stream.

    " }, "DestinationDescriptionList":{ "type":"list", @@ -1173,23 +1450,27 @@ ], "members":{ "DestinationTableName":{ - "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

    Specifies the name of the Apache Iceberg Table.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "shape":"StringWithLettersDigitsUnderscoresDots", + "documentation":"

    Specifies the name of the Apache Iceberg Table.

    " }, "DestinationDatabaseName":{ - "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

    The name of the Apache Iceberg database.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "shape":"StringWithLettersDigitsUnderscoresDots", + "documentation":"

    The name of the Apache Iceberg database.

    " }, "UniqueKeys":{ "shape":"ListOfNonEmptyStringsWithoutWhitespace", - "documentation":"

    A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create/Update/Delete operations on the given Iceberg table.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table.

    " + }, + "PartitionSpec":{ + "shape":"PartitionSpec", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " }, "S3ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

    The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.

    " } }, - "documentation":"

    Describes the configuration of a destination in Apache Iceberg Tables.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes the configuration of a destination in Apache Iceberg Tables.

    " }, "DestinationTableConfigurationList":{ "type":"list", @@ -1215,7 +1496,7 @@ }, "Enabled":{ "shape":"BooleanObject", - "documentation":"

    Specifies that the dynamic partitioning is enabled for this Firehose delivery stream.

    " + "documentation":"

    Specifies that the dynamic partitioning is enabled for this Firehose Firehose stream.

    " } }, "documentation":"

    The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

    " @@ -1229,7 +1510,7 @@ }, "SizeInMBs":{ "shape":"ElasticsearchBufferingSizeInMBs", - "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " + "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " } }, "documentation":"

    Describes the buffering to perform before delivering data to the Amazon ES destination.

    " @@ -1292,7 +1573,7 @@ }, "S3BackupMode":{ "shape":"ElasticsearchS3BackupMode", - "documentation":"

    Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

    You can't change this backup mode after you create the delivery stream.

    " + "documentation":"

    Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

    You can't change this backup mode after you create the Firehose stream.

    " }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -1304,7 +1585,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "VpcConfiguration":{ "shape":"VpcConfiguration", @@ -1400,7 +1681,7 @@ }, "TypeName":{ "shape":"ElasticsearchTypeName", - "documentation":"

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

    " + "documentation":"

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your Firehose stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your Firehose stream with a new index name, provide an empty string for TypeName.

    " }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", @@ -1424,7 +1705,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The CloudWatch logging options for your Firehose stream.

    " }, "DocumentIdOptions":{ "shape":"DocumentIdOptions", @@ -1437,7 +1718,7 @@ "type":"string", "max":512, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:es:[a-zA-Z0-9\\-]+:\\d{12}:domain/[a-z][-0-9a-z]{2,27}" }, "ElasticsearchIndexName":{ "type":"string", @@ -1542,7 +1823,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "ProcessingConfiguration":{ "shape":"ProcessingConfiguration", @@ -1550,7 +1831,7 @@ }, "S3BackupMode":{ "shape":"S3BackupMode", - "documentation":"

    The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.

    " + "documentation":"

    The Amazon S3 backup mode. After you create a Firehose stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.

    " }, "S3BackupConfiguration":{ "shape":"S3DestinationConfiguration", @@ -1615,7 +1896,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "ProcessingConfiguration":{ "shape":"ProcessingConfiguration", @@ -1681,7 +1962,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "ProcessingConfiguration":{ "shape":"ProcessingConfiguration", @@ -1689,7 +1970,7 @@ }, "S3BackupMode":{ "shape":"S3BackupMode", - "documentation":"

    You can update a delivery stream to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.

    " + "documentation":"

    You can update a Firehose stream to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.

    " }, "S3BackupUpdate":{ "shape":"S3DestinationUpdate", @@ -1742,7 +2023,7 @@ "type":"string", "max":512, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:glue:.*:\\d{12}:catalog" }, "HECAcknowledgmentTimeoutInSeconds":{ "type":"integer", @@ -1804,7 +2085,7 @@ "members":{ "SizeInMBs":{ "shape":"HttpEndpointBufferingSizeInMBs", - "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " + "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.

    We recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " }, "IntervalInSeconds":{ "shape":"HttpEndpointBufferingIntervalInSeconds", @@ -2054,81 +2335,105 @@ "members":{ "DestinationTableConfigurationList":{ "shape":"DestinationTableConfigurationList", - "documentation":"

    Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    " + }, + "SchemaEvolutionConfiguration":{ + "shape":"SchemaEvolutionConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "TableCreationConfiguration":{ + "shape":"TableCreationConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " }, "BufferingHints":{"shape":"BufferingHints"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "S3BackupMode":{ "shape":"IcebergS3BackupMode", - "documentation":"

    Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes how Firehose will backup records. Currently,S3 backup only supports FailedDataOnly.

    " }, "RetryOptions":{"shape":"RetryOptions"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the Apache Iceberg tables role.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.

    " }, "CatalogConfiguration":{ "shape":"CatalogConfiguration", - "documentation":"

    Configuration describing where the destination Apache Iceberg Tables are persisted.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Configuration describing where the destination Apache Iceberg Tables are persisted.

    " }, "S3Configuration":{"shape":"S3DestinationConfiguration"} }, - "documentation":"

    Specifies the destination configure settings for Apache Iceberg Table.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Specifies the destination configure settings for Apache Iceberg Table.

    " }, "IcebergDestinationDescription":{ "type":"structure", "members":{ "DestinationTableConfigurationList":{ "shape":"DestinationTableConfigurationList", - "documentation":"

    Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    " + }, + "SchemaEvolutionConfiguration":{ + "shape":"SchemaEvolutionConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "TableCreationConfiguration":{ + "shape":"TableCreationConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " }, "BufferingHints":{"shape":"BufferingHints"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "S3BackupMode":{ "shape":"IcebergS3BackupMode", - "documentation":"

    Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly.

    " }, "RetryOptions":{"shape":"RetryOptions"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.

    " }, "CatalogConfiguration":{ "shape":"CatalogConfiguration", - "documentation":"

    Configuration describing where the destination Iceberg tables are persisted.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Configuration describing where the destination Iceberg tables are persisted.

    " }, "S3DestinationDescription":{"shape":"S3DestinationDescription"} }, - "documentation":"

    Describes a destination in Apache Iceberg Tables.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes a destination in Apache Iceberg Tables.

    " }, "IcebergDestinationUpdate":{ "type":"structure", "members":{ "DestinationTableConfigurationList":{ "shape":"DestinationTableConfigurationList", - "documentation":"

    Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg tables.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Provides a list of DestinationTableConfigurations which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    " + }, + "SchemaEvolutionConfiguration":{ + "shape":"SchemaEvolutionConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "TableCreationConfiguration":{ + "shape":"TableCreationConfiguration", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " }, "BufferingHints":{"shape":"BufferingHints"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "S3BackupMode":{ "shape":"IcebergS3BackupMode", - "documentation":"

    Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly for preview.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes how Firehose will backup records. Currently,Firehose only supports FailedDataOnly.

    " }, "RetryOptions":{"shape":"RetryOptions"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.

    " }, "CatalogConfiguration":{ "shape":"CatalogConfiguration", - "documentation":"

    Configuration describing where the destination Iceberg tables are persisted.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Configuration describing where the destination Iceberg tables are persisted.

    " }, "S3Configuration":{"shape":"S3DestinationConfiguration"} }, - "documentation":"

    Describes an update for a destination in Apache Iceberg Tables.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes an update for a destination in Apache Iceberg Tables.

    " }, "IcebergS3BackupMode":{ "type":"string", @@ -2169,7 +2474,7 @@ "code":{"shape":"ErrorCode"}, "message":{"shape":"ErrorMessage"} }, - "documentation":"

    Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

    ", + "documentation":"

    Firehose throws this exception when an attempt to put records or to start or stop Firehose stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

    ", "exception":true }, "InvalidSourceException":{ @@ -2203,7 +2508,7 @@ "type":"string", "max":512, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:kinesis:[a-zA-Z0-9\\-]+:\\d{12}:stream/[a-zA-Z0-9_.-]+" }, "KinesisStreamSourceConfiguration":{ "type":"structure", @@ -2221,7 +2526,7 @@ "documentation":"

    The ARN of the role that provides access to the source Kinesis data stream. For more information, see Amazon Web Services Identity and Access Management (IAM) ARN Format.

    " } }, - "documentation":"

    The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a delivery stream.

    " + "documentation":"

    The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as the source for a Firehose stream.

    " }, "KinesisStreamSourceDescription":{ "type":"structure", @@ -2239,7 +2544,7 @@ "documentation":"

    Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

    " } }, - "documentation":"

    Details about a Kinesis data stream used as the source for a Firehose delivery stream.

    " + "documentation":"

    Details about a Kinesis data stream used as the source for a Firehose Firehose stream.

    " }, "LimitExceededException":{ "type":"structure", @@ -2257,15 +2562,15 @@ "members":{ "Limit":{ "shape":"ListDeliveryStreamsInputLimit", - "documentation":"

    The maximum number of delivery streams to list. The default value is 10.

    " + "documentation":"

    The maximum number of Firehose streams to list. The default value is 10.

    " }, "DeliveryStreamType":{ "shape":"DeliveryStreamType", - "documentation":"

    The delivery stream type. This can be one of the following values:

    • DirectPut: Provider applications access the delivery stream directly.

    • KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source.

    This parameter is optional. If this parameter is omitted, delivery streams of all types are returned.

    " + "documentation":"

    The Firehose stream type. This can be one of the following values:

    • DirectPut: Provider applications access the Firehose stream directly.

    • KinesisStreamAsSource: The Firehose stream uses a Kinesis data stream as a source.

    This parameter is optional. If this parameter is omitted, Firehose streams of all types are returned.

    " }, "ExclusiveStartDeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The list of delivery streams returned by this call to ListDeliveryStreams will start with the delivery stream whose name comes alphabetically immediately after the name you specify in ExclusiveStartDeliveryStreamName.

    " + "documentation":"

    The list of Firehose streams returned by this call to ListDeliveryStreams will start with the Firehose stream whose name comes alphabetically immediately after the name you specify in ExclusiveStartDeliveryStreamName.

    " } } }, @@ -2283,11 +2588,11 @@ "members":{ "DeliveryStreamNames":{ "shape":"DeliveryStreamNameList", - "documentation":"

    The names of the delivery streams.

    " + "documentation":"

    The names of the Firehose streams.

    " }, "HasMoreDeliveryStreams":{ "shape":"BooleanObject", - "documentation":"

    Indicates whether there are more delivery streams available to list.

    " + "documentation":"

    Indicates whether there are more Firehose streams available to list.

    " } } }, @@ -2305,7 +2610,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream whose tags you want to list.

    " + "documentation":"

    The name of the Firehose stream whose tags you want to list.

    " }, "ExclusiveStartTagKey":{ "shape":"TagKey", @@ -2313,7 +2618,7 @@ }, "Limit":{ "shape":"ListTagsForDeliveryStreamInputLimit", - "documentation":"

    The number of tags to return. If this number is less than the total number of tags associated with the delivery stream, HasMoreTags is set to true in the response. To list additional tags, set ExclusiveStartTagKey to the last key in the response.

    " + "documentation":"

    The number of tags to return. If this number is less than the total number of tags associated with the Firehose stream, HasMoreTags is set to true in the response. To list additional tags, set ExclusiveStartTagKey to the last key in the response.

    " } } }, @@ -2414,7 +2719,7 @@ "documentation":"

    The start date and time in UTC for the offset position within your MSK topic from where Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active.

    If you want to create a Firehose stream with Earliest start position from SDK or CLI, you need to set the ReadFromTimestampUTC parameter to Epoch (1970-01-01T00:00:00Z).

    " } }, - "documentation":"

    Details about the Amazon MSK cluster used as the source for a Firehose delivery stream.

    " + "documentation":"

    Details about the Amazon MSK cluster used as the source for a Firehose Firehose stream.

    " }, "NoEncryptionConfig":{ "type":"string", @@ -2582,6 +2887,31 @@ "V2" ] }, + "PartitionField":{ + "type":"structure", + "required":["SourceName"], + "members":{ + "SourceName":{ + "shape":"NonEmptyStringWithoutWhitespace", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, + "PartitionFields":{ + "type":"list", + "member":{"shape":"PartitionField"} + }, + "PartitionSpec":{ + "type":"structure", + "members":{ + "Identity":{ + "shape":"PartitionFields", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, "Password":{ "type":"string", "max":512, @@ -2697,7 +3027,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream.

    " + "documentation":"

    The name of the Firehose stream.

    " }, "Records":{ "shape":"PutRecordBatchRequestEntryList", @@ -2748,7 +3078,7 @@ "documentation":"

    The error message for an individual record result.

    " } }, - "documentation":"

    Contains the result for an individual record from a PutRecordBatch request. If the record is successfully added to your delivery stream, it receives a record ID. If the record fails to be added to your delivery stream, the result includes an error code and an error message.

    " + "documentation":"

    Contains the result for an individual record from a PutRecordBatch request. If the record is successfully added to your Firehose stream, it receives a record ID. If the record fails to be added to your Firehose stream, the result includes an error code and an error message.

    " }, "PutRecordBatchResponseEntryList":{ "type":"list", @@ -2765,7 +3095,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream.

    " + "documentation":"

    The name of the Firehose stream.

    " }, "Record":{ "shape":"Record", @@ -2801,7 +3131,7 @@ "documentation":"

    The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KiB.

    " } }, - "documentation":"

    The unit of data in a delivery stream.

    " + "documentation":"

    The unit of data in a Firehose stream.

    " }, "RedshiftDestinationConfiguration":{ "type":"structure", @@ -2846,7 +3176,7 @@ }, "S3BackupMode":{ "shape":"RedshiftS3BackupMode", - "documentation":"

    The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.

    " + "documentation":"

    The Amazon S3 backup mode. After you create a Firehose stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.

    " }, "S3BackupConfiguration":{ "shape":"S3DestinationConfiguration", @@ -2854,7 +3184,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The CloudWatch logging options for your Firehose stream.

    " }, "SecretsManagerConfiguration":{ "shape":"SecretsManagerConfiguration", @@ -2910,7 +3240,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "SecretsManagerConfiguration":{ "shape":"SecretsManagerConfiguration", @@ -2956,7 +3286,7 @@ }, "S3BackupMode":{ "shape":"RedshiftS3BackupMode", - "documentation":"

    You can update a delivery stream to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.

    " + "documentation":"

    You can update a Firehose stream to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.

    " }, "S3BackupUpdate":{ "shape":"S3DestinationUpdate", @@ -2964,7 +3294,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "SecretsManagerConfiguration":{ "shape":"SecretsManagerConfiguration", @@ -3027,16 +3357,16 @@ "members":{ "DurationInSeconds":{ "shape":"RetryDurationInSeconds", - "documentation":"

    The period of time during which Firehose retries to deliver data to the specified Amazon S3 prefix.

    " + "documentation":"

    The period of time during which Firehose retries to deliver data to the specified destination.

    " } }, - "documentation":"

    The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.

    " + "documentation":"

    The retry behavior in case Firehose is unable to deliver data to a destination.

    " }, "RoleARN":{ "type":"string", "max":512, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:iam::\\d{12}:role/[a-zA-Z_0-9+=,.@\\-_/]+" }, "S3BackupMode":{ "type":"string", @@ -3082,7 +3412,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The CloudWatch logging options for your Firehose stream.

    " } }, "documentation":"

    Describes the configuration of a destination in Amazon S3.

    " @@ -3127,7 +3457,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "documentation":"

    Describes a destination in Amazon S3.

    " @@ -3165,11 +3495,18 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The CloudWatch logging options for your Firehose stream.

    " } }, "documentation":"

    Describes an update for a destination in Amazon S3.

    " }, + "SSLMode":{ + "type":"string", + "enum":[ + "Disabled", + "Enabled" + ] + }, "SchemaConfiguration":{ "type":"structure", "members":{ @@ -3200,11 +3537,22 @@ }, "documentation":"

    Specifies the schema to which you want Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

    " }, + "SchemaEvolutionConfiguration":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"BooleanObject", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, "SecretARN":{ "type":"string", "max":2048, "min":1, - "pattern":"arn:.*" + "pattern":"arn:.*:secretsmanager:[a-zA-Z0-9\\-]+:\\d{12}:secret:[a-zA-Z0-9\\-/_+=.@]+" }, "SecretsManagerConfiguration":{ "type":"structure", @@ -3212,7 +3560,7 @@ "members":{ "SecretARN":{ "shape":"SecretARN", - "documentation":"

    The ARN of the secret that stores your credentials. It must be in the same region as the Firehose stream and the role. The secret ARN can reside in a different account than the delivery stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True.

    " + "documentation":"

    The ARN of the secret that stores your credentials. It must be in the same region as the Firehose stream and the role. The secret ARN can reside in a different account than the Firehose stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True.

    " }, "RoleARN":{ "shape":"RoleARN", @@ -3220,7 +3568,7 @@ }, "Enabled":{ "shape":"BooleanObject", - "documentation":"

    Specifies whether you want to use the the secrets manager feature. When set as True the secrets manager configuration overwrites the existing secrets in the destination configuration. When it's set to False Firehose falls back to the credentials in the destination configuration.

    " + "documentation":"

    Specifies whether you want to use the secrets manager feature. When set as True the secrets manager configuration overwrites the existing secrets in the destination configuration. When it's set to False Firehose falls back to the credentials in the destination configuration.

    " } }, "documentation":"

    The structure that defines how Firehose accesses the secret.

    " @@ -3253,7 +3601,7 @@ "documentation":"

    A message that provides information about the error.

    " } }, - "documentation":"

    The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Firehose Limits.

    ", + "documentation":"

    The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the Firehose stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Firehose Limits.

    ", "exception":true, "fault":true }, @@ -3262,6 +3610,21 @@ "max":128, "min":1 }, + "SnapshotRequestedBy":{ + "type":"string", + "enum":[ + "USER", + "FIREHOSE" + ] + }, + "SnapshotStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETE", + "SUSPENDED" + ] + }, "SnowflakeAccountUrl":{ "type":"string", "max":2048, @@ -3274,7 +3637,7 @@ "members":{ "SizeInMBs":{ "shape":"SnowflakeBufferingSizeInMBs", - "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 1.

    " + "documentation":"

    Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 128.

    " }, "IntervalInSeconds":{ "shape":"SnowflakeBufferingIntervalInSeconds", @@ -3526,7 +3889,7 @@ }, "S3BackupMode":{ "shape":"SnowflakeS3BackupMode", - "documentation":"

    Choose an S3 backup mode

    " + "documentation":"

    Choose an S3 backup mode. Once you set the mode as AllData, you can not change it to FailedDataOnly.

    " }, "S3Update":{"shape":"S3DestinationUpdate"}, "SecretsManagerConfiguration":{ @@ -3647,9 +4010,13 @@ "MSKSourceDescription":{ "shape":"MSKSourceDescription", "documentation":"

    The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.

    " + }, + "DatabaseSourceDescription":{ + "shape":"DatabaseSourceDescription", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " } }, - "documentation":"

    Details about a Kinesis data stream used as the source for a Firehose delivery stream.

    " + "documentation":"

    Details about a Kinesis data stream used as the source for a Firehose Firehose stream.

    " }, "SplunkBufferingHints":{ "type":"structure", @@ -3717,7 +4084,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "BufferingHints":{ "shape":"SplunkBufferingHints", @@ -3767,7 +4134,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "BufferingHints":{ "shape":"SplunkBufferingHints", @@ -3817,7 +4184,7 @@ }, "CloudWatchLoggingOptions":{ "shape":"CloudWatchLoggingOptions", - "documentation":"

    The Amazon CloudWatch logging options for your delivery stream.

    " + "documentation":"

    The Amazon CloudWatch logging options for your Firehose stream.

    " }, "BufferingHints":{ "shape":"SplunkBufferingHints", @@ -3858,7 +4225,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream for which you want to enable server-side encryption (SSE).

    " + "documentation":"

    The name of the Firehose stream for which you want to enable server-side encryption (SSE).

    " }, "DeliveryStreamEncryptionConfigurationInput":{ "shape":"DeliveryStreamEncryptionConfigurationInput", @@ -3877,7 +4244,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream for which you want to disable server-side encryption (SSE).

    " + "documentation":"

    The name of the Firehose stream for which you want to disable server-side encryption (SSE).

    " } } }, @@ -3886,12 +4253,29 @@ "members":{ } }, + "StringWithLettersDigitsUnderscoresDots":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9\\.\\_]+" + }, "SubnetIdList":{ "type":"list", "member":{"shape":"NonEmptyStringWithoutWhitespace"}, "max":16, "min":1 }, + "TableCreationConfiguration":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"BooleanObject", + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "documentation":"

    Amazon Data Firehose is in preview release and is subject to change.

    " + }, "Tag":{ "type":"structure", "required":["Key"], @@ -3905,7 +4289,7 @@ "documentation":"

    An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @

    " } }, - "documentation":"

    Metadata that you can assign to a delivery stream, consisting of a key-value pair.

    " + "documentation":"

    Metadata that you can assign to a Firehose stream, consisting of a key-value pair.

    " }, "TagDeliveryStreamInput":{ "type":"structure", @@ -3916,7 +4300,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream to which you want to add the tags.

    " + "documentation":"

    The name of the Firehose stream to which you want to add the tags.

    " }, "Tags":{ "shape":"TagDeliveryStreamInputTagList", @@ -3969,7 +4353,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream.

    " + "documentation":"

    The name of the Firehose stream.

    " }, "TagKeys":{ "shape":"TagKeyList", @@ -3992,7 +4376,7 @@ "members":{ "DeliveryStreamName":{ "shape":"DeliveryStreamName", - "documentation":"

    The name of the delivery stream.

    " + "documentation":"

    The name of the Firehose stream.

    " }, "CurrentDeliveryStreamVersionId":{ "shape":"DeliveryStreamVersionId", @@ -4041,7 +4425,7 @@ }, "IcebergDestinationUpdate":{ "shape":"IcebergDestinationUpdate", - "documentation":"

    Describes an update for a destination in Apache Iceberg Tables.

    Amazon Data Firehose is in preview release and is subject to change.

    " + "documentation":"

    Describes an update for a destination in Apache Iceberg Tables.

    " } } }, @@ -4067,11 +4451,11 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"

    The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

    The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

    " + "documentation":"

    The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

    The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this Firehose stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

    " }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    • ec2:DescribeVpcs

    • ec2:DescribeVpcAttribute

    • ec2:DescribeSubnets

    • ec2:DescribeSecurityGroups

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    • ec2:CreateNetworkInterfacePermission

    • ec2:DeleteNetworkInterface

    When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.

    " + "documentation":"

    The ARN of the IAM role that you want the Firehose stream to use to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    • ec2:DescribeVpcs

    • ec2:DescribeVpcAttribute

    • ec2:DescribeSubnets

    • ec2:DescribeSecurityGroups

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    • ec2:CreateNetworkInterfacePermission

    • ec2:DeleteNetworkInterface

    When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.

    " }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", @@ -4091,15 +4475,15 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"

    The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

    The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

    " + "documentation":"

    The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

    The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this Firehose stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

    " }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

    The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    • ec2:DescribeVpcs

    • ec2:DescribeVpcAttribute

    • ec2:DescribeSubnets

    • ec2:DescribeSecurityGroups

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    • ec2:CreateNetworkInterfacePermission

    • ec2:DeleteNetworkInterface

    If you revoke these permissions after you create the delivery stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

    " + "documentation":"

    The ARN of the IAM role that the Firehose stream uses to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    • ec2:DescribeVpcs

    • ec2:DescribeVpcAttribute

    • ec2:DescribeSubnets

    • ec2:DescribeSecurityGroups

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    • ec2:CreateNetworkInterfacePermission

    • ec2:DeleteNetworkInterface

    If you revoke these permissions after you create the Firehose stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

    " }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"

    The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

    " + "documentation":"

    The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your Firehose stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

    " }, "VpcId":{ "shape":"NonEmptyStringWithoutWhitespace", @@ -4107,6 +4491,18 @@ } }, "documentation":"

    The details of the VPC of the Amazon ES destination.

    " + }, + "VpcEndpointServiceName":{ + "type":"string", + "max":255, + "min":47, + "pattern":"([a-zA-Z0-9\\-\\_]+\\.){2,3}vpce\\.[a-zA-Z0-9\\-]*\\.vpce-svc\\-[a-zA-Z0-9\\-]{17}$" + }, + "WarehouseLocation":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"s3:\\/\\/.*" } }, "documentation":"Amazon Data Firehose

    Amazon Data Firehose was previously known as Amazon Kinesis Data Firehose.

    Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supported destinations.

    " diff --git a/tools/code-generation/api-descriptions/lambda-2015-03-31.normal.json b/tools/code-generation/api-descriptions/lambda-2015-03-31.normal.json index bb88177b62f..27494f9cece 100644 --- a/tools/code-generation/api-descriptions/lambda-2015-03-31.normal.json +++ b/tools/code-generation/api-descriptions/lambda-2015-03-31.normal.json @@ -1945,7 +1945,7 @@ }, "KMSKeyArn":{ "shape":"KMSKeyArn", - "documentation":"

    The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). If you don't provide a customer managed key, Lambda uses a default service key.

    " + "documentation":"

    The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

    • The function's environment variables.

    • The function's Lambda SnapStart snapshots.

    • When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.

    • The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.

    If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

    " }, "TracingConfig":{ "shape":"TracingConfig", @@ -2726,6 +2726,10 @@ "ImageUri":{ "shape":"String", "documentation":"

    URI of a container image in the Amazon ECR registry.

    " + }, + "SourceKMSKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

    The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.

    " } }, "documentation":"

    The code for the Lambda function. You can either specify an object in Amazon S3, upload a .zip file archive deployment package directly, or specify the URI of a container image.

    " @@ -2748,6 +2752,10 @@ "ResolvedImageUri":{ "shape":"String", "documentation":"

    The resolved URI for the image.

    " + }, + "SourceKMSKeyArn":{ + "shape":"String", + "documentation":"

    The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.

    " } }, "documentation":"

    Details about a function's deployment package.

    " @@ -2817,7 +2825,7 @@ }, "KMSKeyArn":{ "shape":"KMSKeyArn", - "documentation":"

    The KMS key that's used to encrypt the function's environment variables. When Lambda SnapStart is activated, this key is also used to encrypt the function's snapshot. This key is returned only if you've configured a customer managed key.

    " + "documentation":"

    The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

    • The function's environment variables.

    • The function's Lambda SnapStart snapshots.

    • When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.

    • The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.

    If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

    " }, "TracingConfig":{ "shape":"TracingConfigResponse", @@ -3229,7 +3237,7 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

    The function's tags. Lambda returns tag data only if you have explicit allow permissions for lambda:ListTags.

    " + "documentation":"

    The function's tags. Lambda returns tag data only if you have explicit allow permissions for lambda:ListTags.

    " }, "TagsError":{ "shape":"TagsError", @@ -6116,6 +6124,10 @@ "Architectures":{ "shape":"ArchitecturesList", "documentation":"

    The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.

    " + }, + "SourceKMSKeyArn":{ + "shape":"KMSKeyArn", + "documentation":"

    The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services managed key.

    " } } }, @@ -6167,7 +6179,7 @@ }, "KMSKeyArn":{ "shape":"KMSKeyArn", - "documentation":"

    The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). If you don't provide a customer managed key, Lambda uses a default service key.

    " + "documentation":"

    The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

    • The function's environment variables.

    • The function's Lambda SnapStart snapshots.

    • When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.

    • The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.

    If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

    " }, "TracingConfig":{ "shape":"TracingConfig", diff --git a/tools/code-generation/api-descriptions/pinpoint-sms-voice-v2-2022-03-31.normal.json b/tools/code-generation/api-descriptions/pinpoint-sms-voice-v2-2022-03-31.normal.json index 009a8a2914b..40cb33c2aef 100644 --- a/tools/code-generation/api-descriptions/pinpoint-sms-voice-v2-2022-03-31.normal.json +++ b/tools/code-generation/api-descriptions/pinpoint-sms-voice-v2-2022-03-31.normal.json @@ -2358,7 +2358,7 @@ }, "RegistrationStatus":{ "shape":"RegistrationStatus", - "documentation":"

    The status of the registration.

    • CREATED: Your registration is created but not submitted.

    • SUBMITTED: Your registration has been submitted and is awaiting review.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • PROVISIONING: Your registration has been approved and your origination identity is being created.

    • COMPLETE: Your registration has been approved and and your origination identity has been created.

    • REQUIRES_UPDATES: You must fix your registration and resubmit it.

    • CLOSED: The phone number or sender ID has been deleted and you must also delete the registration for the number.

    • DELETED: The registration has been deleted.

    " + "documentation":"

    The status of the registration.

    • CLOSED: The phone number or sender ID has been deleted and you must also delete the registration for the number.

    • CREATED: Your registration is created but not submitted.

    • COMPLETE: Your registration has been approved and your origination identity has been created.

    • DELETED: The registration has been deleted.

    • PROVISIONING: Your registration has been approved and your origination identity is being created.

    • REQUIRES_AUTHENTICATION: You need to complete email authentication.

    • REQUIRES_UPDATES: You must fix your registration and resubmit it.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • SUBMITTED: Your registration has been submitted and is awaiting review.

    " }, "CurrentVersionNumber":{ "shape":"RegistrationVersionNumber", @@ -2412,7 +2412,7 @@ }, "RegistrationVersionStatus":{ "shape":"RegistrationVersionStatus", - "documentation":"

    The status of the registration.

    • DRAFT: The initial status of a registration version after it’s created.

    • SUBMITTED: Your registration has been submitted.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • APPROVED: Your registration has been approved.

    • DISCARDED: You've abandon this version of their registration to start over with a new version.

    • DENIED: You must fix your registration and resubmit it.

    • REVOKED: Your previously approved registration has been revoked.

    • ARCHIVED: Your previously approved registration version moves into this status when a more recently submitted version is approved.

    " + "documentation":"

    The status of the registration.

    • APPROVED: Your registration has been approved.

    • ARCHIVED: Your previously approved registration version moves into this status when a more recently submitted version is approved.

    • DENIED: You must fix your registration and resubmit it.

    • DISCARDED: You've abandon this version of their registration to start over with a new version.

    • DRAFT: The initial status of a registration version after it’s created.

    • REQUIRES_AUTHENTICATION: You need to complete email authentication.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • REVOKED: Your previously approved registration has been revoked.

    • SUBMITTED: Your registration has been submitted.

    " }, "RegistrationVersionStatusHistory":{ "shape":"RegistrationVersionStatusHistory", @@ -2982,7 +2982,7 @@ }, "RegistrationStatus":{ "shape":"RegistrationStatus", - "documentation":"

    The status of the registration.

    • CREATED: Your registration is created but not submitted.

    • SUBMITTED: Your registration has been submitted and is awaiting review.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • PROVISIONING: Your registration has been approved and your origination identity is being created.

    • COMPLETE: Your registration has been approved and and your origination identity has been created.

    • REQUIRES_UPDATES: You must fix your registration and resubmit it.

    • CLOSED: The phone number or sender ID has been deleted and you must also delete the registration for the number.

    • DELETED: The registration has been deleted.

    " + "documentation":"

    The status of the registration.

    • CLOSED: The phone number or sender ID has been deleted and you must also delete the registration for the number.

    • CREATED: Your registration is created but not submitted.

    • COMPLETE: Your registration has been approved and your origination identity has been created.

    • DELETED: The registration has been deleted.

    • PROVISIONING: Your registration has been approved and your origination identity is being created.

    • REQUIRES_AUTHENTICATION: You need to complete email authentication.

    • REQUIRES_UPDATES: You must fix your registration and resubmit it.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • SUBMITTED: Your registration has been submitted and is awaiting review.

    " }, "CurrentVersionNumber":{ "shape":"RegistrationVersionNumber", @@ -3995,7 +3995,7 @@ }, "RegistrationVersionStatus":{ "shape":"RegistrationVersionStatus", - "documentation":"

    The status of the registration version.

    • DRAFT: The initial status of a registration version after it’s created.

    • SUBMITTED: Your registration has been submitted.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • APPROVED: Your registration has been approved.

    • DISCARDED: You've abandon this version of their registration to start over with a new version.

    • DENIED: You must fix your registration and resubmit it.

    • REVOKED: Your previously approved registration has been revoked.

    • ARCHIVED: Your previously approved registration version moves into this status when a more recently submitted version is approved.

    " + "documentation":"

    The status of the registration version.

    • APPROVED: Your registration has been approved.

    • ARCHIVED: Your previously approved registration version moves into this status when a more recently submitted version is approved.

    • DENIED: You must fix your registration and resubmit it.

    • DISCARDED: You've abandon this version of their registration to start over with a new version.

    • DRAFT: The initial status of a registration version after it’s created.

    • REQUIRES_AUTHENTICATION: You need to complete email authentication.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • REVOKED: Your previously approved registration has been revoked.

    • SUBMITTED: Your registration has been submitted.

    " }, "RegistrationVersionStatusHistory":{ "shape":"RegistrationVersionStatusHistory", @@ -5746,7 +5746,7 @@ }, "RegistrationStatus":{ "shape":"RegistrationStatus", - "documentation":"

    The status of the registration.

    • CREATED: Your registration is created but not submitted.

    • SUBMITTED: Your registration has been submitted and is awaiting review.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • PROVISIONING: Your registration has been approved and your origination identity is being created.

    • COMPLETE: Your registration has been approved and and your origination identity has been created.

    • REQUIRES_UPDATES: You must fix your registration and resubmit it.

    • CLOSED: The phone number or sender ID has been deleted and you must also delete the registration for the number.

    • DELETED: The registration has been deleted.

    " + "documentation":"

    The status of the registration.

    • CLOSED: The phone number or sender ID has been deleted and you must also delete the registration for the number.

    • CREATED: Your registration is created but not submitted.

    • COMPLETE: Your registration has been approved and your origination identity has been created.

    • DELETED: The registration has been deleted.

    • PROVISIONING: Your registration has been approved and your origination identity is being created.

    • REQUIRES_AUTHENTICATION: You need to complete email authentication.

    • REQUIRES_UPDATES: You must fix your registration and resubmit it.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • SUBMITTED: Your registration has been submitted and is awaiting review.

    " }, "CurrentVersionNumber":{ "shape":"RegistrationVersionNumber", @@ -5979,7 +5979,7 @@ }, "RegistrationVersionStatus":{ "shape":"RegistrationVersionStatus", - "documentation":"

    The status of the registration.

    • DRAFT: The initial status of a registration version after it’s created.

    • SUBMITTED: Your registration has been submitted.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • APPROVED: Your registration has been approved.

    • DISCARDED: You've abandon this version of their registration to start over with a new version.

    • DENIED: You must fix your registration and resubmit it.

    • REVOKED: Your previously approved registration has been revoked.

    • ARCHIVED: Your previously approved registration version moves into this status when a more recently submitted version is approved.

    " + "documentation":"

    The status of the registration.

    • APPROVED: Your registration has been approved.

    • ARCHIVED: Your previously approved registration version moves into this status when a more recently submitted version is approved.

    • DENIED: You must fix your registration and resubmit it.

    • DISCARDED: You've abandon this version of their registration to start over with a new version.

    • DRAFT: The initial status of a registration version after it’s created.

    • REQUIRES_AUTHENTICATION: You need to complete email authentication.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • REVOKED: Your previously approved registration has been revoked.

    • SUBMITTED: Your registration has been submitted.

    " }, "RegistrationVersionStatusHistory":{ "shape":"RegistrationVersionStatusHistory", @@ -6038,6 +6038,10 @@ "shape":"Timestamp", "documentation":"

    The time when the registration was in the reviewing state, in UNIX epoch time format.

    " }, + "RequiresAuthenticationTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The time when the registration was in the requires authentication state, in UNIX epoch time format.

    " + }, "ApprovedTimestamp":{ "shape":"Timestamp", "documentation":"

    The time when the registration was in the approved state, in UNIX epoch time format.

    " @@ -7179,7 +7183,7 @@ }, "RegistrationVersionStatus":{ "shape":"RegistrationVersionStatus", - "documentation":"

    The status of the registration version.

    • DRAFT: The initial status of a registration version after it’s created.

    • SUBMITTED: Your registration has been submitted.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • APPROVED: Your registration has been approved.

    • DISCARDED: You've abandon this version of their registration to start over with a new version.

    • DENIED: You must fix your registration and resubmit it.

    • REVOKED: Your previously approved registration has been revoked.

    • ARCHIVED: Your previously approved registration version moves into this status when a more recently submitted version is approved.

    " + "documentation":"

    The status of the registration version.

    • APPROVED: Your registration has been approved.

    • ARCHIVED: Your previously approved registration version moves into this status when a more recently submitted version is approved.

    • DENIED: You must fix your registration and resubmit it.

    • DISCARDED: You've abandon this version of their registration to start over with a new version.

    • DRAFT: The initial status of a registration version after it’s created.

    • REQUIRES_AUTHENTICATION: You need to complete email authentication.

    • REVIEWING: Your registration has been accepted and is being reviewed.

    • REVOKED: Your previously approved registration has been revoked.

    • SUBMITTED: Your registration has been submitted.

    " }, "RegistrationVersionStatusHistory":{ "shape":"RegistrationVersionStatusHistory", diff --git a/tools/code-generation/api-descriptions/qbusiness-2023-11-27.normal.json b/tools/code-generation/api-descriptions/qbusiness-2023-11-27.normal.json index ed1a62d2295..532272576cd 100644 --- a/tools/code-generation/api-descriptions/qbusiness-2023-11-27.normal.json +++ b/tools/code-generation/api-descriptions/qbusiness-2023-11-27.normal.json @@ -111,7 +111,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Creates an Amazon Q Business application.

    There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users.

    ", + "documentation":"

    Creates an Amazon Q Business application.

    There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users.

    A Amazon Q Apps service linked role will be created if it's absent in the Amazon Web Services account when the QAppsConfiguration is enabled in the request. For more information, see Using service-linked roles for Q Apps

    ", "idempotent":true }, "CreateDataSource":{ @@ -950,7 +950,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Updates an existing Amazon Q Business application.

    ", + "documentation":"

    Updates an existing Amazon Q Business application.

    A Amazon Q Apps service-linked role will be created if it's absent in the Amazon Web Services account when the QAppsConfiguration is enabled in the request. For more information, see Using service-linked roles for Q Apps

    ", "idempotent":true }, "UpdateChatControlsConfiguration":{ @@ -4192,7 +4192,8 @@ "memberUsers":{ "shape":"MemberUsers", "documentation":"

    A list of users that belong to a group. For example, a list of interns all belong to the \"Interns\" group.

    " - } + }, + "s3PathForGroupMembers":{"shape":"S3"} }, "documentation":"

    A list of users or sub groups that belong to a group. This is for generating Amazon Q Business chat results only from document a user has access to.

    " }, @@ -5645,7 +5646,11 @@ "shape":"MembershipType", "documentation":"

    The type of the group.

    " }, - "groupMembers":{"shape":"GroupMembers"} + "groupMembers":{"shape":"GroupMembers"}, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file that contains your list of users that belong to a group.The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file that contains your list of users that belong to a group.

    " + } } }, "PutGroupResponse":{