Skip to content

Commit

Permalink
obtain credentials from ec2
Browse files Browse the repository at this point in the history
  • Loading branch information
anik3tra0 committed Mar 8, 2024
1 parent 605e379 commit 47fd2bc
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 191 deletions.
27 changes: 7 additions & 20 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,33 +1,20 @@
# Use an official Python runtime as a parent image
FROM python:3.11-slim
FROM python:3.11

RUN apt-get update
RUN apt-get install -y cron

# Install Poetry
RUN pip install poetry

ENV POETRY_NO_INTERACTION=1 \
POETRY_VIRTUALENVS_IN_PROJECT=1 \
POETRY_VIRTUALENVS_CREATE=1 \
POETRY_CACHE_DIR=/tmp/poetry_cache

# Set the working directory in the container to /app
# Set the working directory inside the container
WORKDIR /app

# Copy the Python script and poetry files (pyproject.toml, poetry.lock) into the container at /app
COPY main.py pyproject.toml poetry.lock crontab /app/
# Copy the requirements file to the working directory
COPY requirements.txt .

# Install project dependencies
RUN poetry install && rm -rf $POETRY_CACHE_DIR
RUN pip install --no-cache-dir -r requirements.txt

### Enable this if you want to run this as a one off process
## Run ecs_service_discovery.py when the container launches
#CMD ["python", "./main.py"]

COPY main.py crontab /app/

RUN poetry install
# Copy the Python script and poetry files (pyproject.toml, poetry.lock) into the container at /app
COPY main.py crontab ./

# Add crontab file in the cron directory
ADD crontab /etc/cron.d/ecs-service-discovery-cron
Expand Down
2 changes: 1 addition & 1 deletion crontab
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Run ecs_service_discovery.py every 3 minutes
*/3 * * * * python /app/main.py --cluster_name="<cluster_name>" --output_dir /shared_volume/ --scrape_port 9097 >> /var/log/cron.log 2>&1
* * * * * /usr/local/bin/python /app/main.py --cluster_name="<ecs_cluster_name>" --output_dir /shared_volume/ --scrape_port 9097 --region "ap-south-1" >> /var/log/cron.log 2>&1

# An empty line is required at the end of this file for a valid cron file.
5 changes: 3 additions & 2 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ services:
context: .
dockerfile: Dockerfile
volumes:
- ~/.aws:/root/.aws:ro
- ./shared_volume:/shared_volume
network_mode: host

Expand All @@ -18,8 +19,8 @@ services:
- "8429:8429"
volumes:
- ./vmagentdata:/vmagentdata
- ./prometheus.yml:/etc/vmagent/vmagent.yaml
- ./shared_volume:/shared_volume/ecs_file_sd_config.json
- ./vmagent.yaml:/etc/vmagent/vmagent.yaml
- ./shared_volume:/shared_volume/
command:
- "--promscrape.config=/etc/vmagent/vmagent.yaml"
- "--remoteWrite.url=<remote_write_url>"
Expand Down
64 changes: 38 additions & 26 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
import json
import argparse
import os
from botocore.exceptions import ClientError
from botocore.exceptions import ClientError, NoCredentialsError


def assume_role(arn, session_name):
def assume_role(arn, session_name, region):
if not arn:
# If no role ARN provided, return None to use default credentials
return None
sts_client = boto3.client('sts')
sts_client = boto3.client('sts', region_name=region)
try:
assumed_role = sts_client.assume_role(RoleArn=arn, RoleSessionName=session_name)
credentials = assumed_role['Credentials']
Expand All @@ -19,32 +19,35 @@ def assume_role(arn, session_name):
return None


def get_ecs_services(cluster_name, credentials=None):
def get_ecs_services(cluster_name, region, credentials=None):
if credentials:
ecs = boto3.client(
'ecs',
region_name=region,
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
else:
ecs = boto3.client('ecs')
ecs = boto3.client('ecs', region_name=region)

services = ecs.list_services(cluster=cluster_name)['serviceArns']
detailed_services = ecs.describe_services(cluster=cluster_name, services=services)
return detailed_services['services']


def get_task_ips(cluster_name, service_name, credentials=None):
def get_task_ips(cluster_name, service_name, region, credentials=None):
if credentials:
ecs = boto3.client(
'ecs',
region_name=region,
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
aws_session_token=credentials['SessionToken'],
aws_region=credentials['Region']
)
else:
ecs = boto3.client('ecs')
ecs = boto3.client('ecs', region_name=region )

# List tasks for the given service
task_arns = ecs.list_tasks(cluster=cluster_name, serviceName=service_name)['taskArns']
Expand All @@ -68,29 +71,38 @@ def main():
parser.add_argument('--output_dir', type=str, help='Directory to output the JSON file', required=True)
parser.add_argument('--role_arn', type=str, default=None, help='ARN of the role to assume (optional)')
parser.add_argument('--scrape_port', type=str, default=None, help='Port number of the Scrape service', required=True)
parser.add_argument('--region', type=str, default=None, help='AWS Region', required=True)

args = parser.parse_args()

credentials = assume_role(args.role_arn, 'ecs_sd_script') if args.role_arn else None
credentials = assume_role(args.role_arn, args.region, 'ecs_sd_script') if args.role_arn else None

file_sd_config = []

for service in get_ecs_services(args.cluster_name, credentials):
service_name = service['serviceName']
ips = get_task_ips(args.cluster_name, service_name, credentials)
targets = [f"{ip}:{args.scrape_port}" for ip in ips]

file_sd_config.append({
"targets": targets,
"labels": {
"job": service_name,
"ecs_cluster": args.cluster_name,
"ecs_service_name": service_name
}
})

output_file = os.path.join(args.output_dir, 'ecs_file_sd_config.json')
with open(output_file, 'w') as file:
json.dump(file_sd_config, file, indent=4)
for service in get_ecs_services(args.cluster_name, args.region, credentials):
try:
service_name = service['serviceName']
ips = get_task_ips(args.cluster_name, service_name, args.region, credentials)
targets = [f"{ip}:{args.scrape_port}" for ip in ips]

file_sd_config.append({
"targets": targets,
"labels": {
"job": service_name,
"ecs_cluster": args.cluster_name,
"ecs_service_name": service_name
}
})
except NoCredentialsError as e:
print(f"An error occurred: {e}")
file_sd_config.append({
"targets": [],
"labels": {}
})
finally:
output_file = os.path.join(args.output_dir, 'ecs_file_sd_config.json')
with open(output_file, 'w') as file:
json.dump(file_sd_config, file, indent=4)


if __name__ == "__main__":
Expand Down
125 changes: 0 additions & 125 deletions poetry.lock

This file was deleted.

17 changes: 0 additions & 17 deletions pyproject.toml

This file was deleted.

7 changes: 7 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
boto3==1.34.58
botocore==1.34.58
jmespath==1.0.1
python-dateutil==2.9.0.post0
s3transfer==0.10.0
six==1.16.0
urllib3==2.0.7
12 changes: 12 additions & 0 deletions vmagent.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
global:
scrape_interval: 1m
scrape_timeout: 20s
# Check https://prometheus.io/docs/prometheus/latest/configuration/configuration for more details
scrape_configs:
- job_name: "ecs_service_sraper"
file_sd_configs:
- files:
- /shared_volume/ecs_file_sd_config.json
- job_name: "vmagent"
static_configs:
- targets: [ "localhost:8429" ]

0 comments on commit 47fd2bc

Please sign in to comment.