Skip to content

Commit

Permalink
GuardDuty RDS Protection expands support for Amazon Aurora PostgreSQL…
Browse files Browse the repository at this point in the history
… Limitless Databases.

Fix ListStorageLensConfigurations and ListStorageLensGroups deserialization for Smithy SDKs.
API changes for new named tag expressions feature.
AWS CodeBuild now adds additional compute types for reserved capacity fleet.
Adding BatchGetPolicy API which supports the retrieval of multiple policies across multiple policy stores within a single request.
Introduces category apis in AmazonQApps. Web experience users use Categories to tag and filter library items.
  • Loading branch information
aws-sdk-cpp-automation committed Nov 6, 2024
1 parent 3670e57 commit 01f7ee3
Show file tree
Hide file tree
Showing 155 changed files with 6,595 additions and 247 deletions.
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.11.440
1.11.441
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/

#pragma once
#include <aws/codebuild/CodeBuild_EXPORTS.h>
#include <aws/codebuild/model/MachineType.h>
#include <utility>

namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace CodeBuild
{
namespace Model
{

/**
* <p>Contains compute attributes. These attributes only need be specified when
* your project's or fleet's <code>computeType</code> is set to
* <code>ATTRIBUTE_BASED_COMPUTE</code>.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ComputeConfiguration">AWS
* API Reference</a></p>
*/
class ComputeConfiguration
{
public:
AWS_CODEBUILD_API ComputeConfiguration();
AWS_CODEBUILD_API ComputeConfiguration(Aws::Utils::Json::JsonView jsonValue);
AWS_CODEBUILD_API ComputeConfiguration& operator=(Aws::Utils::Json::JsonView jsonValue);
AWS_CODEBUILD_API Aws::Utils::Json::JsonValue Jsonize() const;


///@{
/**
* <p>The number of vCPUs of the instance type included in your fleet.</p>
*/
inline long long GetVCpu() const{ return m_vCpu; }
inline bool VCpuHasBeenSet() const { return m_vCpuHasBeenSet; }
inline void SetVCpu(long long value) { m_vCpuHasBeenSet = true; m_vCpu = value; }
inline ComputeConfiguration& WithVCpu(long long value) { SetVCpu(value); return *this;}
///@}

///@{
/**
* <p>The amount of memory of the instance type included in your fleet.</p>
*/
inline long long GetMemory() const{ return m_memory; }
inline bool MemoryHasBeenSet() const { return m_memoryHasBeenSet; }
inline void SetMemory(long long value) { m_memoryHasBeenSet = true; m_memory = value; }
inline ComputeConfiguration& WithMemory(long long value) { SetMemory(value); return *this;}
///@}

///@{
/**
* <p>The amount of disk space of the instance type included in your fleet.</p>
*/
inline long long GetDisk() const{ return m_disk; }
inline bool DiskHasBeenSet() const { return m_diskHasBeenSet; }
inline void SetDisk(long long value) { m_diskHasBeenSet = true; m_disk = value; }
inline ComputeConfiguration& WithDisk(long long value) { SetDisk(value); return *this;}
///@}

///@{
/**
* <p>The machine type of the instance type included in your fleet.</p>
*/
inline const MachineType& GetMachineType() const{ return m_machineType; }
inline bool MachineTypeHasBeenSet() const { return m_machineTypeHasBeenSet; }
inline void SetMachineType(const MachineType& value) { m_machineTypeHasBeenSet = true; m_machineType = value; }
inline void SetMachineType(MachineType&& value) { m_machineTypeHasBeenSet = true; m_machineType = std::move(value); }
inline ComputeConfiguration& WithMachineType(const MachineType& value) { SetMachineType(value); return *this;}
inline ComputeConfiguration& WithMachineType(MachineType&& value) { SetMachineType(std::move(value)); return *this;}
///@}
private:

long long m_vCpu;
bool m_vCpuHasBeenSet = false;

long long m_memory;
bool m_memoryHasBeenSet = false;

long long m_disk;
bool m_diskHasBeenSet = false;

MachineType m_machineType;
bool m_machineTypeHasBeenSet = false;
};

} // namespace Model
} // namespace CodeBuild
} // namespace Aws
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ namespace Model
BUILD_LAMBDA_2GB,
BUILD_LAMBDA_4GB,
BUILD_LAMBDA_8GB,
BUILD_LAMBDA_10GB
BUILD_LAMBDA_10GB,
ATTRIBUTE_BASED_COMPUTE
};

namespace ComputeTypeMapper
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/codebuild/model/EnvironmentType.h>
#include <aws/codebuild/model/ComputeType.h>
#include <aws/codebuild/model/ComputeConfiguration.h>
#include <aws/codebuild/model/ScalingConfigurationInput.h>
#include <aws/codebuild/model/FleetOverflowBehavior.h>
#include <aws/codebuild/model/VpcConfig.h>
Expand Down Expand Up @@ -108,32 +109,53 @@ namespace Model
///@{
/**
* <p>Information about the compute resources the compute fleet uses. Available
* values include:</p> <ul> <li> <p> <code>BUILD_GENERAL1_SMALL</code>: Use up to 3
* GB memory and 2 vCPUs for builds.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_MEDIUM</code>: Use up to 7 GB memory and 4 vCPUs for
* builds.</p> </li> <li> <p> <code>BUILD_GENERAL1_LARGE</code>: Use up to 16 GB
* memory and 8 vCPUs for builds, depending on your environment type.</p> </li>
* <li> <p> <code>BUILD_GENERAL1_XLARGE</code>: Use up to 70 GB memory and 36 vCPUs
* for builds, depending on your environment type.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_2XLARGE</code>: Use up to 145 GB memory, 72 vCPUs, and 824
* values include:</p> <ul> <li> <p> <code>ATTRIBUTE_BASED_COMPUTE</code>: Specify
* the amount of vCPUs, memory, disk space, and the type of machine.</p> <p>
* If you use <code>ATTRIBUTE_BASED_COMPUTE</code>, you must define your attributes
* by using <code>computeConfiguration</code>. CodeBuild will select the cheapest
* instance that satisfies your specified attributes. For more information, see <a
* href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment-reserved-capacity.types">Reserved
* capacity environment types</a> in the <i>CodeBuild User Guide</i>.</p>
* </li> <li> <p> <code>BUILD_GENERAL1_SMALL</code>: Use up to 4 GiB memory and 2
* vCPUs for builds.</p> </li> <li> <p> <code>BUILD_GENERAL1_MEDIUM</code>: Use up
* to 8 GiB memory and 4 vCPUs for builds.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_LARGE</code>: Use up to 16 GiB memory and 8 vCPUs for
* builds, depending on your environment type.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_XLARGE</code>: Use up to 72 GiB memory and 36 vCPUs for
* builds, depending on your environment type.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_2XLARGE</code>: Use up to 144 GiB memory, 72 vCPUs, and 824
* GB of SSD storage for builds. This compute type supports Docker images up to 100
* GB uncompressed.</p> </li> </ul> <p> If you use
* GB uncompressed.</p> </li> <li> <p> <code>BUILD_LAMBDA_1GB</code>: Use up to 1
* GiB memory for builds. Only available for environment type
* <code>LINUX_LAMBDA_CONTAINER</code> and <code>ARM_LAMBDA_CONTAINER</code>.</p>
* </li> <li> <p> <code>BUILD_LAMBDA_2GB</code>: Use up to 2 GiB memory for builds.
* Only available for environment type <code>LINUX_LAMBDA_CONTAINER</code> and
* <code>ARM_LAMBDA_CONTAINER</code>.</p> </li> <li> <p>
* <code>BUILD_LAMBDA_4GB</code>: Use up to 4 GiB memory for builds. Only available
* for environment type <code>LINUX_LAMBDA_CONTAINER</code> and
* <code>ARM_LAMBDA_CONTAINER</code>.</p> </li> <li> <p>
* <code>BUILD_LAMBDA_8GB</code>: Use up to 8 GiB memory for builds. Only available
* for environment type <code>LINUX_LAMBDA_CONTAINER</code> and
* <code>ARM_LAMBDA_CONTAINER</code>.</p> </li> <li> <p>
* <code>BUILD_LAMBDA_10GB</code>: Use up to 10 GiB memory for builds. Only
* available for environment type <code>LINUX_LAMBDA_CONTAINER</code> and
* <code>ARM_LAMBDA_CONTAINER</code>.</p> </li> </ul> <p> If you use
* <code>BUILD_GENERAL1_SMALL</code>: </p> <ul> <li> <p> For environment type
* <code>LINUX_CONTAINER</code>, you can use up to 3 GB memory and 2 vCPUs for
* <code>LINUX_CONTAINER</code>, you can use up to 4 GiB memory and 2 vCPUs for
* builds. </p> </li> <li> <p> For environment type
* <code>LINUX_GPU_CONTAINER</code>, you can use up to 16 GB memory, 4 vCPUs, and 1
* NVIDIA A10G Tensor Core GPU for builds.</p> </li> <li> <p> For environment type
* <code>ARM_CONTAINER</code>, you can use up to 4 GB memory and 2 vCPUs on
* <code>LINUX_GPU_CONTAINER</code>, you can use up to 16 GiB memory, 4 vCPUs, and
* 1 NVIDIA A10G Tensor Core GPU for builds.</p> </li> <li> <p> For environment
* type <code>ARM_CONTAINER</code>, you can use up to 4 GiB memory and 2 vCPUs on
* ARM-based processors for builds.</p> </li> </ul> <p> If you use
* <code>BUILD_GENERAL1_LARGE</code>: </p> <ul> <li> <p> For environment type
* <code>LINUX_CONTAINER</code>, you can use up to 15 GB memory and 8 vCPUs for
* <code>LINUX_CONTAINER</code>, you can use up to 16 GiB memory and 8 vCPUs for
* builds. </p> </li> <li> <p> For environment type
* <code>LINUX_GPU_CONTAINER</code>, you can use up to 255 GB memory, 32 vCPUs, and
* 4 NVIDIA Tesla V100 GPUs for builds.</p> </li> <li> <p> For environment type
* <code>ARM_CONTAINER</code>, you can use up to 16 GB memory and 8 vCPUs on
* <code>LINUX_GPU_CONTAINER</code>, you can use up to 255 GiB memory, 32 vCPUs,
* and 4 NVIDIA Tesla V100 GPUs for builds.</p> </li> <li> <p> For environment type
* <code>ARM_CONTAINER</code>, you can use up to 16 GiB memory and 8 vCPUs on
* ARM-based processors for builds.</p> </li> </ul> <p>For more information, see <a
* href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html">Build
* environment compute types</a> in the <i>CodeBuild User Guide.</i> </p>
* href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment.types">On-demand
* environment types</a> in the <i>CodeBuild User Guide.</i> </p>
*/
inline const ComputeType& GetComputeType() const{ return m_computeType; }
inline bool ComputeTypeHasBeenSet() const { return m_computeTypeHasBeenSet; }
Expand All @@ -143,6 +165,19 @@ namespace Model
inline CreateFleetRequest& WithComputeType(ComputeType&& value) { SetComputeType(std::move(value)); return *this;}
///@}

///@{
/**
* <p>The compute configuration of the compute fleet. This is only required if
* <code>computeType</code> is set to <code>ATTRIBUTE_BASED_COMPUTE</code>.</p>
*/
inline const ComputeConfiguration& GetComputeConfiguration() const{ return m_computeConfiguration; }
inline bool ComputeConfigurationHasBeenSet() const { return m_computeConfigurationHasBeenSet; }
inline void SetComputeConfiguration(const ComputeConfiguration& value) { m_computeConfigurationHasBeenSet = true; m_computeConfiguration = value; }
inline void SetComputeConfiguration(ComputeConfiguration&& value) { m_computeConfigurationHasBeenSet = true; m_computeConfiguration = std::move(value); }
inline CreateFleetRequest& WithComputeConfiguration(const ComputeConfiguration& value) { SetComputeConfiguration(value); return *this;}
inline CreateFleetRequest& WithComputeConfiguration(ComputeConfiguration&& value) { SetComputeConfiguration(std::move(value)); return *this;}
///@}

///@{
/**
* <p>The scaling configuration of the compute fleet.</p>
Expand Down Expand Up @@ -259,6 +294,9 @@ namespace Model
ComputeType m_computeType;
bool m_computeTypeHasBeenSet = false;

ComputeConfiguration m_computeConfiguration;
bool m_computeConfigurationHasBeenSet = false;

ScalingConfigurationInput m_scalingConfiguration;
bool m_scalingConfigurationHasBeenSet = false;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <aws/codebuild/model/FleetStatus.h>
#include <aws/codebuild/model/EnvironmentType.h>
#include <aws/codebuild/model/ComputeType.h>
#include <aws/codebuild/model/ComputeConfiguration.h>
#include <aws/codebuild/model/ScalingConfigurationOutput.h>
#include <aws/codebuild/model/FleetOverflowBehavior.h>
#include <aws/codebuild/model/VpcConfig.h>
Expand Down Expand Up @@ -178,32 +179,53 @@ namespace Model
///@{
/**
* <p>Information about the compute resources the compute fleet uses. Available
* values include:</p> <ul> <li> <p> <code>BUILD_GENERAL1_SMALL</code>: Use up to 3
* GB memory and 2 vCPUs for builds.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_MEDIUM</code>: Use up to 7 GB memory and 4 vCPUs for
* builds.</p> </li> <li> <p> <code>BUILD_GENERAL1_LARGE</code>: Use up to 16 GB
* memory and 8 vCPUs for builds, depending on your environment type.</p> </li>
* <li> <p> <code>BUILD_GENERAL1_XLARGE</code>: Use up to 70 GB memory and 36 vCPUs
* for builds, depending on your environment type.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_2XLARGE</code>: Use up to 145 GB memory, 72 vCPUs, and 824
* values include:</p> <ul> <li> <p> <code>ATTRIBUTE_BASED_COMPUTE</code>: Specify
* the amount of vCPUs, memory, disk space, and the type of machine.</p> <p>
* If you use <code>ATTRIBUTE_BASED_COMPUTE</code>, you must define your attributes
* by using <code>computeConfiguration</code>. CodeBuild will select the cheapest
* instance that satisfies your specified attributes. For more information, see <a
* href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment-reserved-capacity.types">Reserved
* capacity environment types</a> in the <i>CodeBuild User Guide</i>.</p>
* </li> <li> <p> <code>BUILD_GENERAL1_SMALL</code>: Use up to 4 GiB memory and 2
* vCPUs for builds.</p> </li> <li> <p> <code>BUILD_GENERAL1_MEDIUM</code>: Use up
* to 8 GiB memory and 4 vCPUs for builds.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_LARGE</code>: Use up to 16 GiB memory and 8 vCPUs for
* builds, depending on your environment type.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_XLARGE</code>: Use up to 72 GiB memory and 36 vCPUs for
* builds, depending on your environment type.</p> </li> <li> <p>
* <code>BUILD_GENERAL1_2XLARGE</code>: Use up to 144 GiB memory, 72 vCPUs, and 824
* GB of SSD storage for builds. This compute type supports Docker images up to 100
* GB uncompressed.</p> </li> </ul> <p> If you use
* GB uncompressed.</p> </li> <li> <p> <code>BUILD_LAMBDA_1GB</code>: Use up to 1
* GiB memory for builds. Only available for environment type
* <code>LINUX_LAMBDA_CONTAINER</code> and <code>ARM_LAMBDA_CONTAINER</code>.</p>
* </li> <li> <p> <code>BUILD_LAMBDA_2GB</code>: Use up to 2 GiB memory for builds.
* Only available for environment type <code>LINUX_LAMBDA_CONTAINER</code> and
* <code>ARM_LAMBDA_CONTAINER</code>.</p> </li> <li> <p>
* <code>BUILD_LAMBDA_4GB</code>: Use up to 4 GiB memory for builds. Only available
* for environment type <code>LINUX_LAMBDA_CONTAINER</code> and
* <code>ARM_LAMBDA_CONTAINER</code>.</p> </li> <li> <p>
* <code>BUILD_LAMBDA_8GB</code>: Use up to 8 GiB memory for builds. Only available
* for environment type <code>LINUX_LAMBDA_CONTAINER</code> and
* <code>ARM_LAMBDA_CONTAINER</code>.</p> </li> <li> <p>
* <code>BUILD_LAMBDA_10GB</code>: Use up to 10 GiB memory for builds. Only
* available for environment type <code>LINUX_LAMBDA_CONTAINER</code> and
* <code>ARM_LAMBDA_CONTAINER</code>.</p> </li> </ul> <p> If you use
* <code>BUILD_GENERAL1_SMALL</code>: </p> <ul> <li> <p> For environment type
* <code>LINUX_CONTAINER</code>, you can use up to 3 GB memory and 2 vCPUs for
* <code>LINUX_CONTAINER</code>, you can use up to 4 GiB memory and 2 vCPUs for
* builds. </p> </li> <li> <p> For environment type
* <code>LINUX_GPU_CONTAINER</code>, you can use up to 16 GB memory, 4 vCPUs, and 1
* NVIDIA A10G Tensor Core GPU for builds.</p> </li> <li> <p> For environment type
* <code>ARM_CONTAINER</code>, you can use up to 4 GB memory and 2 vCPUs on
* <code>LINUX_GPU_CONTAINER</code>, you can use up to 16 GiB memory, 4 vCPUs, and
* 1 NVIDIA A10G Tensor Core GPU for builds.</p> </li> <li> <p> For environment
* type <code>ARM_CONTAINER</code>, you can use up to 4 GiB memory and 2 vCPUs on
* ARM-based processors for builds.</p> </li> </ul> <p> If you use
* <code>BUILD_GENERAL1_LARGE</code>: </p> <ul> <li> <p> For environment type
* <code>LINUX_CONTAINER</code>, you can use up to 15 GB memory and 8 vCPUs for
* <code>LINUX_CONTAINER</code>, you can use up to 16 GiB memory and 8 vCPUs for
* builds. </p> </li> <li> <p> For environment type
* <code>LINUX_GPU_CONTAINER</code>, you can use up to 255 GB memory, 32 vCPUs, and
* 4 NVIDIA Tesla V100 GPUs for builds.</p> </li> <li> <p> For environment type
* <code>ARM_CONTAINER</code>, you can use up to 16 GB memory and 8 vCPUs on
* <code>LINUX_GPU_CONTAINER</code>, you can use up to 255 GiB memory, 32 vCPUs,
* and 4 NVIDIA Tesla V100 GPUs for builds.</p> </li> <li> <p> For environment type
* <code>ARM_CONTAINER</code>, you can use up to 16 GiB memory and 8 vCPUs on
* ARM-based processors for builds.</p> </li> </ul> <p>For more information, see <a
* href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html">Build
* environment compute types</a> in the <i>CodeBuild User Guide.</i> </p>
* href="https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html#environment.types">On-demand
* environment types</a> in the <i>CodeBuild User Guide.</i> </p>
*/
inline const ComputeType& GetComputeType() const{ return m_computeType; }
inline bool ComputeTypeHasBeenSet() const { return m_computeTypeHasBeenSet; }
Expand All @@ -213,6 +235,19 @@ namespace Model
inline Fleet& WithComputeType(ComputeType&& value) { SetComputeType(std::move(value)); return *this;}
///@}

///@{
/**
* <p>The compute configuration of the compute fleet. This is only required if
* <code>computeType</code> is set to <code>ATTRIBUTE_BASED_COMPUTE</code>.</p>
*/
inline const ComputeConfiguration& GetComputeConfiguration() const{ return m_computeConfiguration; }
inline bool ComputeConfigurationHasBeenSet() const { return m_computeConfigurationHasBeenSet; }
inline void SetComputeConfiguration(const ComputeConfiguration& value) { m_computeConfigurationHasBeenSet = true; m_computeConfiguration = value; }
inline void SetComputeConfiguration(ComputeConfiguration&& value) { m_computeConfigurationHasBeenSet = true; m_computeConfiguration = std::move(value); }
inline Fleet& WithComputeConfiguration(const ComputeConfiguration& value) { SetComputeConfiguration(value); return *this;}
inline Fleet& WithComputeConfiguration(ComputeConfiguration&& value) { SetComputeConfiguration(std::move(value)); return *this;}
///@}

///@{
/**
* <p>The scaling configuration of the compute fleet.</p>
Expand Down Expand Up @@ -344,6 +379,9 @@ namespace Model
ComputeType m_computeType;
bool m_computeTypeHasBeenSet = false;

ComputeConfiguration m_computeConfiguration;
bool m_computeConfigurationHasBeenSet = false;

ScalingConfigurationOutput m_scalingConfiguration;
bool m_scalingConfigurationHasBeenSet = false;

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/

#pragma once
#include <aws/codebuild/CodeBuild_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>

namespace Aws
{
namespace CodeBuild
{
namespace Model
{
enum class MachineType
{
NOT_SET,
GENERAL,
NVME
};

namespace MachineTypeMapper
{
AWS_CODEBUILD_API MachineType GetMachineTypeForName(const Aws::String& name);

AWS_CODEBUILD_API Aws::String GetNameForMachineType(MachineType value);
} // namespace MachineTypeMapper
} // namespace Model
} // namespace CodeBuild
} // namespace Aws
Loading

0 comments on commit 01f7ee3

Please sign in to comment.