diff --git a/VERSION b/VERSION
index 17e6550092a..298115c6ebf 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.11.440
\ No newline at end of file
+1.11.441
\ No newline at end of file
diff --git a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/ComputeConfiguration.h b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/ComputeConfiguration.h
new file mode 100644
index 00000000000..233f52cb983
--- /dev/null
+++ b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/ComputeConfiguration.h
@@ -0,0 +1,100 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include Contains compute attributes. These attributes only need be specified when
+ * your project's or fleet's computeType
is set to
+ * ATTRIBUTE_BASED_COMPUTE
.See Also:
AWS
+ * API Reference
The number of vCPUs of the instance type included in your fleet.
+ */ + inline long long GetVCpu() const{ return m_vCpu; } + inline bool VCpuHasBeenSet() const { return m_vCpuHasBeenSet; } + inline void SetVCpu(long long value) { m_vCpuHasBeenSet = true; m_vCpu = value; } + inline ComputeConfiguration& WithVCpu(long long value) { SetVCpu(value); return *this;} + ///@} + + ///@{ + /** + *The amount of memory of the instance type included in your fleet.
+ */ + inline long long GetMemory() const{ return m_memory; } + inline bool MemoryHasBeenSet() const { return m_memoryHasBeenSet; } + inline void SetMemory(long long value) { m_memoryHasBeenSet = true; m_memory = value; } + inline ComputeConfiguration& WithMemory(long long value) { SetMemory(value); return *this;} + ///@} + + ///@{ + /** + *The amount of disk space of the instance type included in your fleet.
+ */ + inline long long GetDisk() const{ return m_disk; } + inline bool DiskHasBeenSet() const { return m_diskHasBeenSet; } + inline void SetDisk(long long value) { m_diskHasBeenSet = true; m_disk = value; } + inline ComputeConfiguration& WithDisk(long long value) { SetDisk(value); return *this;} + ///@} + + ///@{ + /** + *The machine type of the instance type included in your fleet.
+ */ + inline const MachineType& GetMachineType() const{ return m_machineType; } + inline bool MachineTypeHasBeenSet() const { return m_machineTypeHasBeenSet; } + inline void SetMachineType(const MachineType& value) { m_machineTypeHasBeenSet = true; m_machineType = value; } + inline void SetMachineType(MachineType&& value) { m_machineTypeHasBeenSet = true; m_machineType = std::move(value); } + inline ComputeConfiguration& WithMachineType(const MachineType& value) { SetMachineType(value); return *this;} + inline ComputeConfiguration& WithMachineType(MachineType&& value) { SetMachineType(std::move(value)); return *this;} + ///@} + private: + + long long m_vCpu; + bool m_vCpuHasBeenSet = false; + + long long m_memory; + bool m_memoryHasBeenSet = false; + + long long m_disk; + bool m_diskHasBeenSet = false; + + MachineType m_machineType; + bool m_machineTypeHasBeenSet = false; + }; + +} // namespace Model +} // namespace CodeBuild +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/ComputeType.h b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/ComputeType.h index f8704530a0a..ee2e5f2d195 100644 --- a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/ComputeType.h +++ b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/ComputeType.h @@ -25,7 +25,8 @@ namespace Model BUILD_LAMBDA_2GB, BUILD_LAMBDA_4GB, BUILD_LAMBDA_8GB, - BUILD_LAMBDA_10GB + BUILD_LAMBDA_10GB, + ATTRIBUTE_BASED_COMPUTE }; namespace ComputeTypeMapper diff --git a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/CreateFleetRequest.h b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/CreateFleetRequest.h index 7e0af5ec349..72d21cec6f6 100644 --- a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/CreateFleetRequest.h +++ b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/CreateFleetRequest.h @@ -9,6 +9,7 @@ #includeInformation about the compute resources the compute fleet uses. Available - * values include:
BUILD_GENERAL1_SMALL
: Use up to 3
- * GB memory and 2 vCPUs for builds.
- * BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
- * builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB
- * memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs
- * for builds, depending on your environment type.
- * BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824
+ * values include:
ATTRIBUTE_BASED_COMPUTE
: Specify
+ * the amount of vCPUs, memory, disk space, and the type of machine.
+ * If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes
+ * by using computeConfiguration
. CodeBuild will select the cheapest
+ * instance that satisfies your specified attributes. For more information, see Reserved
+ * capacity environment types in the CodeBuild User Guide.
BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2
+ * vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up
+ * to 8 GiB memory and 4 vCPUs for builds.
+ * BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for
+ * builds, depending on your environment type.
+ * BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for
+ * builds, depending on your environment type.
+ * BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and 824
* GB of SSD storage for builds. This compute type supports Docker images up to 100
- * GB uncompressed.
If you use + * GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1
+ * GiB memory for builds. Only available for environment type
+ * LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for builds.
+ * Only available for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for builds. Only available
+ * for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for builds. Only available
+ * for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for builds. Only
+ * available for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
If you use
* BUILD_GENERAL1_SMALL
:
For environment type
- * LINUX_CONTAINER
, you can use up to 3 GB memory and 2 vCPUs for
+ * LINUX_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs for
* builds.
For environment type
- * LINUX_GPU_CONTAINER
, you can use up to 16 GB memory, 4 vCPUs, and 1
- * NVIDIA A10G Tensor Core GPU for builds.
For environment type
- * ARM_CONTAINER
, you can use up to 4 GB memory and 2 vCPUs on
+ * LINUX_GPU_CONTAINER
, you can use up to 16 GiB memory, 4 vCPUs, and
+ * 1 NVIDIA A10G Tensor Core GPU for builds.
For environment
+ * type ARM_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs on
* ARM-based processors for builds.
If you use
* BUILD_GENERAL1_LARGE
:
For environment type
- * LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for
+ * LINUX_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs for
* builds.
For environment type
- * LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and
- * 4 NVIDIA Tesla V100 GPUs for builds.
For environment type
- * ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on
+ * LINUX_GPU_CONTAINER
, you can use up to 255 GiB memory, 32 vCPUs,
+ * and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type
+ * ARM_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs on
* ARM-based processors for builds.
For more information, see Build - * environment compute types in the CodeBuild User Guide.
+ * href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment.types">On-demand + * environment types in the CodeBuild User Guide. */ inline const ComputeType& GetComputeType() const{ return m_computeType; } inline bool ComputeTypeHasBeenSet() const { return m_computeTypeHasBeenSet; } @@ -143,6 +165,19 @@ namespace Model inline CreateFleetRequest& WithComputeType(ComputeType&& value) { SetComputeType(std::move(value)); return *this;} ///@} + ///@{ + /** + *The compute configuration of the compute fleet. This is only required if
+ * computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
The scaling configuration of the compute fleet.
@@ -259,6 +294,9 @@ namespace Model ComputeType m_computeType; bool m_computeTypeHasBeenSet = false; + ComputeConfiguration m_computeConfiguration; + bool m_computeConfigurationHasBeenSet = false; + ScalingConfigurationInput m_scalingConfiguration; bool m_scalingConfigurationHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/Fleet.h b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/Fleet.h index 6a2b4fd81ce..6d4f230f47e 100644 --- a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/Fleet.h +++ b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/Fleet.h @@ -10,6 +10,7 @@ #includeInformation about the compute resources the compute fleet uses. Available - * values include:
BUILD_GENERAL1_SMALL
: Use up to 3
- * GB memory and 2 vCPUs for builds.
- * BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
- * builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB
- * memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs
- * for builds, depending on your environment type.
- * BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824
+ * values include:
ATTRIBUTE_BASED_COMPUTE
: Specify
+ * the amount of vCPUs, memory, disk space, and the type of machine.
+ * If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes
+ * by using computeConfiguration
. CodeBuild will select the cheapest
+ * instance that satisfies your specified attributes. For more information, see Reserved
+ * capacity environment types in the CodeBuild User Guide.
BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2
+ * vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up
+ * to 8 GiB memory and 4 vCPUs for builds.
+ * BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for
+ * builds, depending on your environment type.
+ * BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for
+ * builds, depending on your environment type.
+ * BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and 824
* GB of SSD storage for builds. This compute type supports Docker images up to 100
- * GB uncompressed.
If you use + * GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1
+ * GiB memory for builds. Only available for environment type
+ * LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for builds.
+ * Only available for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for builds. Only available
+ * for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for builds. Only available
+ * for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for builds. Only
+ * available for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
If you use
* BUILD_GENERAL1_SMALL
:
For environment type
- * LINUX_CONTAINER
, you can use up to 3 GB memory and 2 vCPUs for
+ * LINUX_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs for
* builds.
For environment type
- * LINUX_GPU_CONTAINER
, you can use up to 16 GB memory, 4 vCPUs, and 1
- * NVIDIA A10G Tensor Core GPU for builds.
For environment type
- * ARM_CONTAINER
, you can use up to 4 GB memory and 2 vCPUs on
+ * LINUX_GPU_CONTAINER
, you can use up to 16 GiB memory, 4 vCPUs, and
+ * 1 NVIDIA A10G Tensor Core GPU for builds.
For environment
+ * type ARM_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs on
* ARM-based processors for builds.
If you use
* BUILD_GENERAL1_LARGE
:
For environment type
- * LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for
+ * LINUX_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs for
* builds.
For environment type
- * LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and
- * 4 NVIDIA Tesla V100 GPUs for builds.
For environment type
- * ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on
+ * LINUX_GPU_CONTAINER
, you can use up to 255 GiB memory, 32 vCPUs,
+ * and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type
+ * ARM_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs on
* ARM-based processors for builds.
For more information, see Build - * environment compute types in the CodeBuild User Guide.
+ * href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment.types">On-demand + * environment types in the CodeBuild User Guide. */ inline const ComputeType& GetComputeType() const{ return m_computeType; } inline bool ComputeTypeHasBeenSet() const { return m_computeTypeHasBeenSet; } @@ -213,6 +235,19 @@ namespace Model inline Fleet& WithComputeType(ComputeType&& value) { SetComputeType(std::move(value)); return *this;} ///@} + ///@{ + /** + *The compute configuration of the compute fleet. This is only required if
+ * computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
The scaling configuration of the compute fleet.
@@ -344,6 +379,9 @@ namespace Model ComputeType m_computeType; bool m_computeTypeHasBeenSet = false; + ComputeConfiguration m_computeConfiguration; + bool m_computeConfigurationHasBeenSet = false; + ScalingConfigurationOutput m_scalingConfiguration; bool m_scalingConfigurationHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/MachineType.h b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/MachineType.h new file mode 100644 index 00000000000..1b784dc5856 --- /dev/null +++ b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/MachineType.h @@ -0,0 +1,31 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#includeInformation about the compute resources the build project uses. Available - * values include:
BUILD_GENERAL1_SMALL
: Use up to 3
- * GB memory and 2 vCPUs for builds.
- * BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
- * builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB
- * memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs
- * for builds, depending on your environment type.
- * BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824
+ * values include:
ATTRIBUTE_BASED_COMPUTE
: Specify
+ * the amount of vCPUs, memory, disk space, and the type of machine.
+ * If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes
+ * by using computeConfiguration
. CodeBuild will select the cheapest
+ * instance that satisfies your specified attributes. For more information, see Reserved
+ * capacity environment types in the CodeBuild User Guide.
BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2
+ * vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up
+ * to 8 GiB memory and 4 vCPUs for builds.
+ * BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for
+ * builds, depending on your environment type.
+ * BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for
+ * builds, depending on your environment type.
+ * BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and 824
* GB of SSD storage for builds. This compute type supports Docker images up to 100
* GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1
- * GB memory for builds. Only available for environment type
+ * GiB memory for builds. Only available for environment type
* LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GB memory for builds.
+ *
BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for builds.
* Only available for environment type LINUX_LAMBDA_CONTAINER
and
* ARM_LAMBDA_CONTAINER
.
- * BUILD_LAMBDA_4GB
: Use up to 4 GB memory for builds. Only available
+ * BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for builds. Only available
* for environment type LINUX_LAMBDA_CONTAINER
and
* ARM_LAMBDA_CONTAINER
.
- * BUILD_LAMBDA_8GB
: Use up to 8 GB memory for builds. Only available
+ * BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for builds. Only available
* for environment type LINUX_LAMBDA_CONTAINER
and
* ARM_LAMBDA_CONTAINER
.
- * BUILD_LAMBDA_10GB
: Use up to 10 GB memory for builds. Only
+ * BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for builds. Only
* available for environment type LINUX_LAMBDA_CONTAINER
and
* ARM_LAMBDA_CONTAINER
.
If you use
* BUILD_GENERAL1_SMALL
:
For environment type
- * LINUX_CONTAINER
, you can use up to 3 GB memory and 2 vCPUs for
+ * LINUX_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs for
* builds.
For environment type
- * LINUX_GPU_CONTAINER
, you can use up to 16 GB memory, 4 vCPUs, and 1
- * NVIDIA A10G Tensor Core GPU for builds.
For environment type
- * ARM_CONTAINER
, you can use up to 4 GB memory and 2 vCPUs on
+ * LINUX_GPU_CONTAINER
, you can use up to 16 GiB memory, 4 vCPUs, and
+ * 1 NVIDIA A10G Tensor Core GPU for builds.
For environment
+ * type ARM_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs on
* ARM-based processors for builds.
If you use
* BUILD_GENERAL1_LARGE
:
For environment type
- * LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for
+ * LINUX_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs for
* builds.
For environment type
- * LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and
- * 4 NVIDIA Tesla V100 GPUs for builds.
For environment type
- * ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on
- * ARM-based processors for builds.
If you're using
- * compute fleets during project creation, computeType
will be
- * ignored.
For more information, see Build - * Environment Compute Types in the CodeBuild User Guide.
+ *LINUX_GPU_CONTAINER
, you can use up to 255 GiB memory, 32 vCPUs,
+ * and 4 NVIDIA Tesla V100 GPUs for builds. For environment type
+ * ARM_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs on
+ * ARM-based processors for builds.
For more information, see On-demand + * environment types in the CodeBuild User Guide.
*/ inline const ComputeType& GetComputeType() const{ return m_computeType; } inline bool ComputeTypeHasBeenSet() const { return m_computeTypeHasBeenSet; } @@ -161,6 +167,19 @@ namespace Model inline ProjectEnvironment& WithComputeType(ComputeType&& value) { SetComputeType(std::move(value)); return *this;} ///@} + ///@{ + /** + *The compute configuration of the build project. This is only required if
+ * computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
A ProjectFleet object to use for this build project.
@@ -270,6 +289,9 @@ namespace Model ComputeType m_computeType; bool m_computeTypeHasBeenSet = false; + ComputeConfiguration m_computeConfiguration; + bool m_computeConfigurationHasBeenSet = false; + ProjectFleet m_fleet; bool m_fleetHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/UpdateFleetRequest.h b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/UpdateFleetRequest.h index b3e0c486155..3ffb89d25a9 100644 --- a/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/UpdateFleetRequest.h +++ b/generated/src/aws-cpp-sdk-codebuild/include/aws/codebuild/model/UpdateFleetRequest.h @@ -9,6 +9,7 @@ #includeInformation about the compute resources the compute fleet uses. Available - * values include:
BUILD_GENERAL1_SMALL
: Use up to 3
- * GB memory and 2 vCPUs for builds.
- * BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
- * builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB
- * memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs
- * for builds, depending on your environment type.
- * BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824
+ * values include:
ATTRIBUTE_BASED_COMPUTE
: Specify
+ * the amount of vCPUs, memory, disk space, and the type of machine.
+ * If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes
+ * by using computeConfiguration
. CodeBuild will select the cheapest
+ * instance that satisfies your specified attributes. For more information, see Reserved
+ * capacity environment types in the CodeBuild User Guide.
BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2
+ * vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up
+ * to 8 GiB memory and 4 vCPUs for builds.
+ * BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for
+ * builds, depending on your environment type.
+ * BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for
+ * builds, depending on your environment type.
+ * BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and 824
* GB of SSD storage for builds. This compute type supports Docker images up to 100
- * GB uncompressed.
If you use + * GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1
+ * GiB memory for builds. Only available for environment type
+ * LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for builds.
+ * Only available for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for builds. Only available
+ * for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for builds. Only available
+ * for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
+ * BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for builds. Only
+ * available for environment type LINUX_LAMBDA_CONTAINER
and
+ * ARM_LAMBDA_CONTAINER
.
If you use
* BUILD_GENERAL1_SMALL
:
For environment type
- * LINUX_CONTAINER
, you can use up to 3 GB memory and 2 vCPUs for
+ * LINUX_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs for
* builds.
For environment type
- * LINUX_GPU_CONTAINER
, you can use up to 16 GB memory, 4 vCPUs, and 1
- * NVIDIA A10G Tensor Core GPU for builds.
For environment type
- * ARM_CONTAINER
, you can use up to 4 GB memory and 2 vCPUs on
+ * LINUX_GPU_CONTAINER
, you can use up to 16 GiB memory, 4 vCPUs, and
+ * 1 NVIDIA A10G Tensor Core GPU for builds.
For environment
+ * type ARM_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs on
* ARM-based processors for builds.
If you use
* BUILD_GENERAL1_LARGE
:
For environment type
- * LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for
+ * LINUX_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs for
* builds.
For environment type
- * LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and
- * 4 NVIDIA Tesla V100 GPUs for builds.
For environment type
- * ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on
+ * LINUX_GPU_CONTAINER
, you can use up to 255 GiB memory, 32 vCPUs,
+ * and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type
+ * ARM_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs on
* ARM-based processors for builds.
For more information, see Build - * environment compute types in the CodeBuild User Guide.
+ * href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment.types">On-demand + * environment types in the CodeBuild User Guide. */ inline const ComputeType& GetComputeType() const{ return m_computeType; } inline bool ComputeTypeHasBeenSet() const { return m_computeTypeHasBeenSet; } @@ -143,6 +165,19 @@ namespace Model inline UpdateFleetRequest& WithComputeType(ComputeType&& value) { SetComputeType(std::move(value)); return *this;} ///@} + ///@{ + /** + *The compute configuration of the compute fleet. This is only required if
+ * computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
The scaling configuration of the compute fleet.
@@ -259,6 +294,9 @@ namespace Model ComputeType m_computeType; bool m_computeTypeHasBeenSet = false; + ComputeConfiguration m_computeConfiguration; + bool m_computeConfigurationHasBeenSet = false; + ScalingConfigurationInput m_scalingConfiguration; bool m_scalingConfigurationHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-codebuild/source/model/ComputeConfiguration.cpp b/generated/src/aws-cpp-sdk-codebuild/source/model/ComputeConfiguration.cpp new file mode 100644 index 00000000000..d24d66eeac8 --- /dev/null +++ b/generated/src/aws-cpp-sdk-codebuild/source/model/ComputeConfiguration.cpp @@ -0,0 +1,104 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#includeThe ID of the detector that specifies the GuardDuty service whose findings - * you want to archive.
+ * you want to archive.To find the detectorId
in the current
+ * Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The detector ID associated with the GuardDuty account for which you want to - * create a filter.
+ * create a filter.To find the detectorId
in the current
+ * Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector of the GuardDuty account for which you want to - * create an IPSet.
+ * create an IPSet.To find the detectorId
in the current
+ * Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector of the GuardDuty account for which you want to - * associate member accounts.
+ * associate member accounts.To find the detectorId
in the
+ * current Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The ID of the GuardDuty detector associated with the publishing - * destination.
+ * destination.To find the detectorId
in the current Region,
+ * see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The ID of the detector for which you need to create sample findings.
+ *To find the detectorId
in the current Region, see the Settings
+ * page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector of the GuardDuty account for which you want to
- * create a ThreatIntelSet
.
ThreatIntelSet
. To find the detectorId
+ * in the current Region, see the Settings page in the GuardDuty console, or run
+ * the ListDetectors
+ * API.
The unique ID of the detector that you want to delete.
+ *The unique ID of the detector that you want to delete.
To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with the filter.
+ *The unique ID of the detector that is associated with the filter.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector associated with the IPSet.
+ *The unique ID of the detector associated with the IPSet.
To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector of the GuardDuty account whose members you want - * to delete.
+ * to delete.To find the detectorId
in the current Region, see
+ * the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector associated with the publishing destination to - * delete.
+ * delete.To find the detectorId
in the current Region, see
+ * the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with the threatIntelSet.
+ *To find the detectorId
in the current Region, see the Settings
+ * page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that the request is associated with.
+ *The unique ID of the detector that the request is associated with.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The detector ID of the delegated administrator for which you need to retrieve - * the information.
+ * the information.To find the detectorId
in the current
+ * Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector associated with the publishing destination to - * retrieve.
+ * retrieve.To find the detectorId
in the current Region, see
+ * the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the GuardDuty detector.
+ *The unique ID of the GuardDuty detector.
To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that you want to get.
+ *The unique ID of the detector that you want to get.
To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with this filter.
+ *The unique ID of the detector that is associated with this filter.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The ID of the detector that specifies the GuardDuty service whose findings - * you want to retrieve.
+ * you want to retrieve.To find the detectorId
in the current
+ * Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The ID of the detector whose findings statistics you want to retrieve.
+ *To find the detectorId
in the current Region, see the Settings
+ * page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with the IPSet.
+ *The unique ID of the detector that is associated with the IPSet.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with this scan.
+ *The unique ID of the detector that is associated with this scan.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The detector ID for the administrator account.
+ *The detector ID for the administrator account.
To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector of the GuardDuty account whose members you want - * to retrieve.
+ * to retrieve.To find the detectorId
in the current Region,
+ * see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector of the GuardDuty member account.
+ *The unique ID of the detector of the GuardDuty member account.
To find
+ * the detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with the threatIntelSet.
+ *To find the detectorId
in the current Region, see the Settings
+ * page in the GuardDuty console, or run the ListDetectors
+ * API.
The ID of the detector that specifies the GuardDuty service whose usage - * statistics you want to retrieve.
+ * statistics you want to retrieve.To find the detectorId
in
+ * the current Region, see the Settings page in the GuardDuty console, or run the
+ * ListDetectors
+ * API.
The unique ID of the detector of the GuardDuty account with which you want to - * invite members.
+ * invite members.To find the detectorId
in the current
+ * Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector whose coverage details you want to - * retrieve.
+ * retrieve.To find the detectorId
in the current Region, see
+ * the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with the filter.
+ *The unique ID of the detector that is associated with the filter.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The ID of the detector that specifies the GuardDuty service whose findings - * you want to list.
+ * you want to list.To find the detectorId
in the current
+ * Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with IPSet.
+ *The unique ID of the detector that is associated with IPSet.
To find
+ * the detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with the member.
+ *The unique ID of the detector that is associated with the member.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The detector ID for which you want to retrieve the publishing - * destination.
+ * destination.To find the detectorId
in the current Region,
+ * see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that is associated with the threatIntelSet.
+ *To find the detectorId
in the current Region, see the Settings
+ * page in the GuardDuty console, or run the ListDetectors
+ * API.
Instance tag key-value pairs associated with the database instance ID.
+ *Information about the tag key-value pairs.
*/ inline const Aws::VectorContains information about the resource type RDSLimitlessDB
that
+ * is involved in a GuardDuty finding.
The name associated with the Limitless DB shard group.
+ */ + inline const Aws::String& GetDbShardGroupIdentifier() const{ return m_dbShardGroupIdentifier; } + inline bool DbShardGroupIdentifierHasBeenSet() const { return m_dbShardGroupIdentifierHasBeenSet; } + inline void SetDbShardGroupIdentifier(const Aws::String& value) { m_dbShardGroupIdentifierHasBeenSet = true; m_dbShardGroupIdentifier = value; } + inline void SetDbShardGroupIdentifier(Aws::String&& value) { m_dbShardGroupIdentifierHasBeenSet = true; m_dbShardGroupIdentifier = std::move(value); } + inline void SetDbShardGroupIdentifier(const char* value) { m_dbShardGroupIdentifierHasBeenSet = true; m_dbShardGroupIdentifier.assign(value); } + inline RdsLimitlessDbDetails& WithDbShardGroupIdentifier(const Aws::String& value) { SetDbShardGroupIdentifier(value); return *this;} + inline RdsLimitlessDbDetails& WithDbShardGroupIdentifier(Aws::String&& value) { SetDbShardGroupIdentifier(std::move(value)); return *this;} + inline RdsLimitlessDbDetails& WithDbShardGroupIdentifier(const char* value) { SetDbShardGroupIdentifier(value); return *this;} + ///@} + + ///@{ + /** + *The resource identifier of the DB shard group within the Limitless + * Database.
+ */ + inline const Aws::String& GetDbShardGroupResourceId() const{ return m_dbShardGroupResourceId; } + inline bool DbShardGroupResourceIdHasBeenSet() const { return m_dbShardGroupResourceIdHasBeenSet; } + inline void SetDbShardGroupResourceId(const Aws::String& value) { m_dbShardGroupResourceIdHasBeenSet = true; m_dbShardGroupResourceId = value; } + inline void SetDbShardGroupResourceId(Aws::String&& value) { m_dbShardGroupResourceIdHasBeenSet = true; m_dbShardGroupResourceId = std::move(value); } + inline void SetDbShardGroupResourceId(const char* value) { m_dbShardGroupResourceIdHasBeenSet = true; m_dbShardGroupResourceId.assign(value); } + inline RdsLimitlessDbDetails& WithDbShardGroupResourceId(const Aws::String& value) { SetDbShardGroupResourceId(value); return *this;} + inline RdsLimitlessDbDetails& WithDbShardGroupResourceId(Aws::String&& value) { SetDbShardGroupResourceId(std::move(value)); return *this;} + inline RdsLimitlessDbDetails& WithDbShardGroupResourceId(const char* value) { SetDbShardGroupResourceId(value); return *this;} + ///@} + + ///@{ + /** + *The Amazon Resource Name (ARN) that identifies the DB shard group.
+ */ + inline const Aws::String& GetDbShardGroupArn() const{ return m_dbShardGroupArn; } + inline bool DbShardGroupArnHasBeenSet() const { return m_dbShardGroupArnHasBeenSet; } + inline void SetDbShardGroupArn(const Aws::String& value) { m_dbShardGroupArnHasBeenSet = true; m_dbShardGroupArn = value; } + inline void SetDbShardGroupArn(Aws::String&& value) { m_dbShardGroupArnHasBeenSet = true; m_dbShardGroupArn = std::move(value); } + inline void SetDbShardGroupArn(const char* value) { m_dbShardGroupArnHasBeenSet = true; m_dbShardGroupArn.assign(value); } + inline RdsLimitlessDbDetails& WithDbShardGroupArn(const Aws::String& value) { SetDbShardGroupArn(value); return *this;} + inline RdsLimitlessDbDetails& WithDbShardGroupArn(Aws::String&& value) { SetDbShardGroupArn(std::move(value)); return *this;} + inline RdsLimitlessDbDetails& WithDbShardGroupArn(const char* value) { SetDbShardGroupArn(value); return *this;} + ///@} + + ///@{ + /** + *The database engine of the database instance involved in the finding.
+ */ + inline const Aws::String& GetEngine() const{ return m_engine; } + inline bool EngineHasBeenSet() const { return m_engineHasBeenSet; } + inline void SetEngine(const Aws::String& value) { m_engineHasBeenSet = true; m_engine = value; } + inline void SetEngine(Aws::String&& value) { m_engineHasBeenSet = true; m_engine = std::move(value); } + inline void SetEngine(const char* value) { m_engineHasBeenSet = true; m_engine.assign(value); } + inline RdsLimitlessDbDetails& WithEngine(const Aws::String& value) { SetEngine(value); return *this;} + inline RdsLimitlessDbDetails& WithEngine(Aws::String&& value) { SetEngine(std::move(value)); return *this;} + inline RdsLimitlessDbDetails& WithEngine(const char* value) { SetEngine(value); return *this;} + ///@} + + ///@{ + /** + *The version of the database engine.
+ */ + inline const Aws::String& GetEngineVersion() const{ return m_engineVersion; } + inline bool EngineVersionHasBeenSet() const { return m_engineVersionHasBeenSet; } + inline void SetEngineVersion(const Aws::String& value) { m_engineVersionHasBeenSet = true; m_engineVersion = value; } + inline void SetEngineVersion(Aws::String&& value) { m_engineVersionHasBeenSet = true; m_engineVersion = std::move(value); } + inline void SetEngineVersion(const char* value) { m_engineVersionHasBeenSet = true; m_engineVersion.assign(value); } + inline RdsLimitlessDbDetails& WithEngineVersion(const Aws::String& value) { SetEngineVersion(value); return *this;} + inline RdsLimitlessDbDetails& WithEngineVersion(Aws::String&& value) { SetEngineVersion(std::move(value)); return *this;} + inline RdsLimitlessDbDetails& WithEngineVersion(const char* value) { SetEngineVersion(value); return *this;} + ///@} + + ///@{ + /** + *The name of the database cluster that is a part of the Limitless + * Database.
+ */ + inline const Aws::String& GetDbClusterIdentifier() const{ return m_dbClusterIdentifier; } + inline bool DbClusterIdentifierHasBeenSet() const { return m_dbClusterIdentifierHasBeenSet; } + inline void SetDbClusterIdentifier(const Aws::String& value) { m_dbClusterIdentifierHasBeenSet = true; m_dbClusterIdentifier = value; } + inline void SetDbClusterIdentifier(Aws::String&& value) { m_dbClusterIdentifierHasBeenSet = true; m_dbClusterIdentifier = std::move(value); } + inline void SetDbClusterIdentifier(const char* value) { m_dbClusterIdentifierHasBeenSet = true; m_dbClusterIdentifier.assign(value); } + inline RdsLimitlessDbDetails& WithDbClusterIdentifier(const Aws::String& value) { SetDbClusterIdentifier(value); return *this;} + inline RdsLimitlessDbDetails& WithDbClusterIdentifier(Aws::String&& value) { SetDbClusterIdentifier(std::move(value)); return *this;} + inline RdsLimitlessDbDetails& WithDbClusterIdentifier(const char* value) { SetDbClusterIdentifier(value); return *this;} + ///@} + + ///@{ + /** + *Information about the tag-key value pair.
+ */ + inline const Aws::VectorContains information about the RDS Limitless database that was involved in a + * GuardDuty finding.
+ */ + inline const RdsLimitlessDbDetails& GetRdsLimitlessDbDetails() const{ return m_rdsLimitlessDbDetails; } + inline bool RdsLimitlessDbDetailsHasBeenSet() const { return m_rdsLimitlessDbDetailsHasBeenSet; } + inline void SetRdsLimitlessDbDetails(const RdsLimitlessDbDetails& value) { m_rdsLimitlessDbDetailsHasBeenSet = true; m_rdsLimitlessDbDetails = value; } + inline void SetRdsLimitlessDbDetails(RdsLimitlessDbDetails&& value) { m_rdsLimitlessDbDetailsHasBeenSet = true; m_rdsLimitlessDbDetails = std::move(value); } + inline Resource& WithRdsLimitlessDbDetails(const RdsLimitlessDbDetails& value) { SetRdsLimitlessDbDetails(value); return *this;} + inline Resource& WithRdsLimitlessDbDetails(RdsLimitlessDbDetails&& value) { SetRdsLimitlessDbDetails(std::move(value)); return *this;} + ///@} + ///@{ /** *Contains information about the user details through which anomalous login @@ -234,6 +248,9 @@ namespace Model RdsDbInstanceDetails m_rdsDbInstanceDetails; bool m_rdsDbInstanceDetailsHasBeenSet = false; + RdsLimitlessDbDetails m_rdsLimitlessDbDetails; + bool m_rdsLimitlessDbDetailsHasBeenSet = false; + RdsDbUserDetails m_rdsDbUserDetails; bool m_rdsDbUserDetailsHasBeenSet = false; diff --git a/generated/src/aws-cpp-sdk-guardduty/include/aws/guardduty/model/Scan.h b/generated/src/aws-cpp-sdk-guardduty/include/aws/guardduty/model/Scan.h index 5b91f6946f9..c5f61100902 100644 --- a/generated/src/aws-cpp-sdk-guardduty/include/aws/guardduty/model/Scan.h +++ b/generated/src/aws-cpp-sdk-guardduty/include/aws/guardduty/model/Scan.h @@ -47,7 +47,11 @@ namespace Model ///@{ /** - *
The unique ID of the detector that the request is associated with.
+ *The unique ID of the detector that the request is associated with.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The unique detector ID of the administrator account that the request is
* associated with. If the account is an administrator, the
* AdminDetectorId
will be the same as the one used for
- * DetectorId
.
DetectorId
. To find the detectorId
in the
+ * current Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector of the GuardDuty administrator account - * associated with the member accounts to monitor.
+ * associated with the member accounts to monitor.To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector associated with the GuardDuty administrator - * account that is monitoring member accounts.
+ * account that is monitoring member accounts.To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
Contains information about a tag associated with the EC2 - * instance.
Contains information about a tag key-value pair.
The EC2 instance tag key.
+ *Describes the key associated with the tag.
*/ inline const Aws::String& GetKey() const{ return m_key; } inline bool KeyHasBeenSet() const { return m_keyHasBeenSet; } @@ -54,7 +53,7 @@ namespace Model ///@{ /** - *The EC2 instance tag value.
+ *Describes the value associated with the tag key.
*/ inline const Aws::String& GetValue() const{ return m_value; } inline bool ValueHasBeenSet() const { return m_valueHasBeenSet; } diff --git a/generated/src/aws-cpp-sdk-guardduty/include/aws/guardduty/model/UnarchiveFindingsRequest.h b/generated/src/aws-cpp-sdk-guardduty/include/aws/guardduty/model/UnarchiveFindingsRequest.h index 867d91ffb97..5d3d3010775 100644 --- a/generated/src/aws-cpp-sdk-guardduty/include/aws/guardduty/model/UnarchiveFindingsRequest.h +++ b/generated/src/aws-cpp-sdk-guardduty/include/aws/guardduty/model/UnarchiveFindingsRequest.h @@ -35,7 +35,11 @@ namespace Model ///@{ /** - *The ID of the detector associated with the findings to unarchive.
+ *The ID of the detector associated with the findings to unarchive.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector to update.
+ *The unique ID of the detector to update.
To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that specifies the GuardDuty service where you - * want to update a filter.
+ * want to update a filter.To find the detectorId
in the
+ * current Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The ID of the detector that is associated with the findings for which you - * want to update the feedback.
+ * want to update the feedback.To find the detectorId
in the
+ * current Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The detectorID that specifies the GuardDuty service whose IPSet you want to - * update.
+ * update.To find the detectorId
in the current Region, see
+ * the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The unique ID of the detector that specifies the GuardDuty service where you - * want to update scan settings.
+ * want to update scan settings.To find the detectorId
in the
+ * current Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The detector ID of the administrator account.
+ *The detector ID of the administrator account.
To find the
+ * detectorId
in the current Region, see the Settings page in the
+ * GuardDuty console, or run the ListDetectors
+ * API.
The ID of the detector that configures the delegated administrator.
+ *The ID of the detector that configures the delegated administrator.
To
+ * find the detectorId
in the current Region, see the Settings page in
+ * the GuardDuty console, or run the ListDetectors
+ * API.
The ID of the detector associated with the publishing destinations to - * update.
+ * update.To find the detectorId
in the current Region, see
+ * the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
The detectorID that specifies the GuardDuty service whose ThreatIntelSet you - * want to update.
+ * want to update.To find the detectorId
in the current
+ * Region, see the Settings page in the GuardDuty console, or run the ListDetectors
+ * API.
Creates a new LF-Tag expression with the provided name, description, catalog + * ID, and expression body. This call fails if a LF-Tag expression with the same + * name already exists in the caller’s account or if the underlying LF-Tags don't + * exist. To call this API operation, caller needs the following Lake Formation + * permissions:
CREATE_LF_TAG_EXPRESSION
on the root catalog
+ * resource.
GRANT_WITH_LF_TAG_EXPRESSION
on all underlying
+ * LF-Tag key:value pairs included in the expression.
Creates an IAM Identity Center connection with Lake Formation to allow IAM * Identity Center users and groups to access Data Catalog resources.
Deletes the LF-Tag expression. The caller must be a data lake admin or have
+ * DROP
permissions on the LF-Tag expression. Deleting a LF-Tag
+ * expression will also delete all LFTagPolicy
permissions referencing
+ * the LF-Tag expression.
Deletes an IAM Identity Center connection with Lake Formation.
DESCRIBE
permission on the LF-Tag
+ * expression resource. Returns the state of a query previously submitted. Clients are expected to
* poll GetQueryState
to monitor the current state of the planning
@@ -892,8 +979,10 @@ namespace LakeFormation
*
Allows a caller in a secure environment to assume a role with permission to * access Amazon S3. In order to vend such credentials, Lake Formation assumes the * role associated with a registered location, for example an Amazon S3 bucket, - * with a scope down policy which restricts the access to a single - * prefix.
To call this API, the role that the service assumes must have
+ * lakeformation:GetDataAccess
permission on the
+ * resource.
Returns the LF-Tag expressions in caller’s account filtered based on caller's + * permissions. Data Lake and read only admins implicitly can see all tag + * expressions in their account, else caller needs DESCRIBE permissions on tag + * expression.
Lists LF-tags that the requester has permission to view.
The following * request registers a new location and gives Lake Formation permission to use the * service-linked role to access that location.
ResourceArn =
- * arn:aws:s3:::my-bucket UseServiceLinkedRole = true
If + * arn:aws:s3:::my-bucket/ UseServiceLinkedRole = true
If
* UseServiceLinkedRole
is not set to true, you must provide or set
* the RoleArn
:
* arn:aws:iam::12345:role/my-data-access-role
Updates the name of the LF-Tag expression to the new description and
+ * expression body provided. Updating a LF-Tag expression immediately changes the
+ * permission boundaries of all existing LFTagPolicy
permission grants
+ * that reference the given LF-Tag expression.
Updates the IAM Identity Center connection parameters.
A name for the expression.
+ */ + inline const Aws::String& GetName() const{ return m_name; } + inline bool NameHasBeenSet() const { return m_nameHasBeenSet; } + inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; } + inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); } + inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); } + inline CreateLFTagExpressionRequest& WithName(const Aws::String& value) { SetName(value); return *this;} + inline CreateLFTagExpressionRequest& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;} + inline CreateLFTagExpressionRequest& WithName(const char* value) { SetName(value); return *this;} + ///@} + + ///@{ + /** + *A description with information about the LF-Tag expression.
+ */ + inline const Aws::String& GetDescription() const{ return m_description; } + inline bool DescriptionHasBeenSet() const { return m_descriptionHasBeenSet; } + inline void SetDescription(const Aws::String& value) { m_descriptionHasBeenSet = true; m_description = value; } + inline void SetDescription(Aws::String&& value) { m_descriptionHasBeenSet = true; m_description = std::move(value); } + inline void SetDescription(const char* value) { m_descriptionHasBeenSet = true; m_description.assign(value); } + inline CreateLFTagExpressionRequest& WithDescription(const Aws::String& value) { SetDescription(value); return *this;} + inline CreateLFTagExpressionRequest& WithDescription(Aws::String&& value) { SetDescription(std::move(value)); return *this;} + inline CreateLFTagExpressionRequest& WithDescription(const char* value) { SetDescription(value); return *this;} + ///@} + + ///@{ + /** + *The identifier for the Data Catalog. By default, the account ID. The Data + * Catalog is the persistent metadata store. It contains database definitions, + * table definitions, and other control information to manage your Lake Formation + * environment.
+ */ + inline const Aws::String& GetCatalogId() const{ return m_catalogId; } + inline bool CatalogIdHasBeenSet() const { return m_catalogIdHasBeenSet; } + inline void SetCatalogId(const Aws::String& value) { m_catalogIdHasBeenSet = true; m_catalogId = value; } + inline void SetCatalogId(Aws::String&& value) { m_catalogIdHasBeenSet = true; m_catalogId = std::move(value); } + inline void SetCatalogId(const char* value) { m_catalogIdHasBeenSet = true; m_catalogId.assign(value); } + inline CreateLFTagExpressionRequest& WithCatalogId(const Aws::String& value) { SetCatalogId(value); return *this;} + inline CreateLFTagExpressionRequest& WithCatalogId(Aws::String&& value) { SetCatalogId(std::move(value)); return *this;} + inline CreateLFTagExpressionRequest& WithCatalogId(const char* value) { SetCatalogId(value); return *this;} + ///@} + + ///@{ + /** + *A list of LF-Tag conditions (key-value pairs).
+ */ + inline const Aws::VectorThe name for the LF-Tag expression.
+ */ + inline const Aws::String& GetName() const{ return m_name; } + inline bool NameHasBeenSet() const { return m_nameHasBeenSet; } + inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; } + inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); } + inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); } + inline DeleteLFTagExpressionRequest& WithName(const Aws::String& value) { SetName(value); return *this;} + inline DeleteLFTagExpressionRequest& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;} + inline DeleteLFTagExpressionRequest& WithName(const char* value) { SetName(value); return *this;} + ///@} + + ///@{ + /** + *The identifier for the Data Catalog. By default, the account ID in which the + * LF-Tag expression is saved.
+ */ + inline const Aws::String& GetCatalogId() const{ return m_catalogId; } + inline bool CatalogIdHasBeenSet() const { return m_catalogIdHasBeenSet; } + inline void SetCatalogId(const Aws::String& value) { m_catalogIdHasBeenSet = true; m_catalogId = value; } + inline void SetCatalogId(Aws::String&& value) { m_catalogIdHasBeenSet = true; m_catalogId = std::move(value); } + inline void SetCatalogId(const char* value) { m_catalogIdHasBeenSet = true; m_catalogId.assign(value); } + inline DeleteLFTagExpressionRequest& WithCatalogId(const Aws::String& value) { SetCatalogId(value); return *this;} + inline DeleteLFTagExpressionRequest& WithCatalogId(Aws::String&& value) { SetCatalogId(std::move(value)); return *this;} + inline DeleteLFTagExpressionRequest& WithCatalogId(const char* value) { SetCatalogId(value); return *this;} + ///@} + private: + + Aws::String m_name; + bool m_nameHasBeenSet = false; + + Aws::String m_catalogId; + bool m_catalogIdHasBeenSet = false; + }; + +} // namespace Model +} // namespace LakeFormation +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-lakeformation/include/aws/lakeformation/model/DeleteLFTagExpressionResult.h b/generated/src/aws-cpp-sdk-lakeformation/include/aws/lakeformation/model/DeleteLFTagExpressionResult.h new file mode 100644 index 00000000000..432b020bada --- /dev/null +++ b/generated/src/aws-cpp-sdk-lakeformation/include/aws/lakeformation/model/DeleteLFTagExpressionResult.h @@ -0,0 +1,52 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#includeThe name for the LF-Tag expression
+ */ + inline const Aws::String& GetName() const{ return m_name; } + inline bool NameHasBeenSet() const { return m_nameHasBeenSet; } + inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; } + inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); } + inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); } + inline GetLFTagExpressionRequest& WithName(const Aws::String& value) { SetName(value); return *this;} + inline GetLFTagExpressionRequest& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;} + inline GetLFTagExpressionRequest& WithName(const char* value) { SetName(value); return *this;} + ///@} + + ///@{ + /** + *The identifier for the Data Catalog. By default, the account ID.
+ */ + inline const Aws::String& GetCatalogId() const{ return m_catalogId; } + inline bool CatalogIdHasBeenSet() const { return m_catalogIdHasBeenSet; } + inline void SetCatalogId(const Aws::String& value) { m_catalogIdHasBeenSet = true; m_catalogId = value; } + inline void SetCatalogId(Aws::String&& value) { m_catalogIdHasBeenSet = true; m_catalogId = std::move(value); } + inline void SetCatalogId(const char* value) { m_catalogIdHasBeenSet = true; m_catalogId.assign(value); } + inline GetLFTagExpressionRequest& WithCatalogId(const Aws::String& value) { SetCatalogId(value); return *this;} + inline GetLFTagExpressionRequest& WithCatalogId(Aws::String&& value) { SetCatalogId(std::move(value)); return *this;} + inline GetLFTagExpressionRequest& WithCatalogId(const char* value) { SetCatalogId(value); return *this;} + ///@} + private: + + Aws::String m_name; + bool m_nameHasBeenSet = false; + + Aws::String m_catalogId; + bool m_catalogIdHasBeenSet = false; + }; + +} // namespace Model +} // namespace LakeFormation +} // namespace Aws diff --git a/generated/src/aws-cpp-sdk-lakeformation/include/aws/lakeformation/model/GetLFTagExpressionResult.h b/generated/src/aws-cpp-sdk-lakeformation/include/aws/lakeformation/model/GetLFTagExpressionResult.h new file mode 100644 index 00000000000..00880b1cdc4 --- /dev/null +++ b/generated/src/aws-cpp-sdk-lakeformation/include/aws/lakeformation/model/GetLFTagExpressionResult.h @@ -0,0 +1,116 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#pragma once +#include