Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin' into 26-as-a-devops-i-want-to-c…
Browse files Browse the repository at this point in the history
…reate-unit-tests-for-the-remove-previous-imagepy-script
  • Loading branch information
ThomasCardin committed Nov 22, 2024
2 parents b17581c + 7d46bc8 commit 13eff20
Show file tree
Hide file tree
Showing 17 changed files with 337 additions and 2 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/custom-dockerfile-push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ on:
- otel-auto
- webtop-fedora-kde
- webtop-ubuntu-kde
- alloy
- tempo
- loki
- prometheus
tag:
required: true
description: Version to tag the image
Expand Down
21 changes: 21 additions & 0 deletions .github/workflows/integration-workflow.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
name: Integration workflow

on:
workflow_dispatch:
inputs:
username:
required: true
description: "Enter your username"
type: string
email:
required: true
description: "Enter your email address"
type: string

jobs:
integration:
uses: ai-cfia/github-workflows/.github/workflows/workflow-integration.yml@158-implement-a-workflow-to-capture-user-inputs-and-trigger-template-rendering
with:
username: ${{ github.event.inputs.username }}
email: ${{ github.event.inputs.email }}
secrets: inherit
6 changes: 6 additions & 0 deletions dockerfiles/alloy/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
FROM grafana/alloy:latest

COPY config.alloy /etc/alloy/config.alloy
COPY endpoints.json /etc/alloy/endpoints.json

CMD ["run", "--server.http.listen-addr=0.0.0.0:12345", "--stability.level=public-preview", "/etc/alloy/config.alloy"]
87 changes: 87 additions & 0 deletions dockerfiles/alloy/config.alloy
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
// Load endpoint credentials and options
local.file "endpoints" {
filename = "/etc/alloy/endpoints.json"
}

// Metrics scraping configuration
prometheus.scrape "infrastructure" {
targets = [
{"__address__" = "prometheus:9090", group = "infrastructure", service = "prometheus"},
{"__address__" = "tempo:3200", group = "infrastructure", service = "tempo"},
{"__address__" = "loki:3100", group = "infrastructure", service = "loki"},
{"__address__" = "grafana:3000", group = "infrastructure", service = "grafana"},
]
scrape_interval = "15s"
forward_to = [prometheus.remote_write.default.receiver]
}

// OTLP Receiver for OpenTelemetry data
otelcol.receiver.otlp "default" {
grpc { }
http { }

output {
metrics = [otelcol.exporter.prometheus.default.input]
logs = [otelcol.exporter.loki.default.input]
traces = [otelcol.exporter.otlp.tempo.input]
}
}

// Memory Limiter Processor to manage memory
otelcol.processor.memory_limiter "default" {
check_interval = "1s"
limit = "1GiB"
output {
metrics = [otelcol.processor.batch.default.input]
logs = [otelcol.processor.batch.default.input]
traces = [otelcol.processor.batch.default.input]
}
}

// Batch Processor for batching trace data
otelcol.processor.batch "default" {
output {
metrics = [otelcol.exporter.prometheus.default.input]
logs = [otelcol.exporter.loki.default.input]
traces = [otelcol.exporter.otlp.tempo.input]
}
}

// Logging configuration
logging {
level = "info"
format = "logfmt"
}

// Loki Exporter for logs
otelcol.exporter.loki "default" {
forward_to = [loki.write.default.receiver]
}

// Write logs to the local Loki instance
loki.write "default" {
endpoint {
url = "http://loki:3100/loki/api/v1/push"
}
}

// Tempo Exporter for trace data
otelcol.exporter.otlp "tempo" {
client {
endpoint = "http://tempo:4317"
tls {
insecure = true
}
}
}

otelcol.exporter.prometheus "default" {
forward_to = [prometheus.remote_write.default.receiver]
}

// Remote write configuration to Prometheus instance
prometheus.remote_write "default" {
endpoint {
url = "http://prometheus:9090/api/v1/write"
}
}
24 changes: 24 additions & 0 deletions dockerfiles/alloy/endpoints.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{
"metrics": {
"url": "http://prometheus:9009/api/v1/push",
"basicAuth": {
"username": "",
"password": ""
}
},
"logs": {
"url": "http://loki:3100/loki/api/v1/push",
"basicAuth": {
"username": "",
"password": ""
}
},
"traces": {
"url": "http://tempo:4317",
"basicAuthToken": "",
"tls": {
"insecure": true,
"insecureSkipVerify": true
}
}
}
5 changes: 5 additions & 0 deletions dockerfiles/loki/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM grafana/loki:latest

COPY loki.yaml /etc/loki/loki.yaml

CMD ["--pattern-ingester.enabled=true", "-config.file=/etc/loki/loki.yaml"]
36 changes: 36 additions & 0 deletions dockerfiles/loki/loki.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
auth_enabled: false

server:
http_listen_port: 3100

common:
path_prefix: /data/loki
replication_factor: 1
ring:
kvstore:
store: inmemory

ingester:
wal:
enabled: false

schema_config:
configs:
- from: "2023-01-05"
index:
period: 24h
prefix: index_
object_store: filesystem
schema: v13
store: tsdb

storage_config:
filesystem:
directory: /data/loki/chunks
tsdb_shipper:
active_index_directory: /data/loki/tsdb-index
cache_location: /data/loki/tsdb-cache

limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
7 changes: 7 additions & 0 deletions dockerfiles/prometheus/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
FROM prom/prometheus:latest
COPY prometheus.yml.tmpl /etc/prometheus/prometheus.yml.tmpl
COPY entrypoint.sh /entrypoint.sh

ENV ENABLE_BACKEND_METRICS=false

ENTRYPOINT ["/entrypoint.sh"]
13 changes: 13 additions & 0 deletions dockerfiles/prometheus/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/sh

if [ "$ENABLE_BACKEND_METRICS" = "true" ]; then
BACKEND_JOB="- job_name: 'backend'\n metrics_path: '/metrics'\n static_configs:\n - targets: ['backend:5000']"
else
BACKEND_JOB=""
fi

# Replace placeholder in the template
sed "s|{{BACKEND_JOB}}|$BACKEND_JOB|" /etc/prometheus/prometheus.yml.tmpl > /etc/prometheus/prometheus.yml

# Start Prometheus
exec /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --web.enable-remote-write-receiver
10 changes: 10 additions & 0 deletions dockerfiles/prometheus/prometheus.yml.tmpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
global:
scrape_interval: 15s
evaluation_interval: 15s

scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']

{{BACKEND_JOB}}
5 changes: 5 additions & 0 deletions dockerfiles/tempo/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM grafana/tempo:latest

COPY tempo.yaml /etc/tempo.yaml

CMD ["-config.file=/etc/tempo.yaml"]
94 changes: 94 additions & 0 deletions dockerfiles/tempo/tempo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
# For more information on this configuration, see the complete reference guide at
# https://grafana.com/docs/tempo/latest/configuration/

# Enables result streaming from Tempo (to Grafana) via HTTP.
stream_over_http_enabled: true

# Configure the server block.
server:
# Listen for all incoming requests on port 3200.
http_listen_port: 3200

# The distributor receives incoming trace span data for the system.
distributor:
receivers: # This configuration will listen on all ports and protocols that tempo is capable of.
jaeger: # The receivers all come from the OpenTelemetry collector. More configuration information can
protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
thrift_http: #
grpc: # For a production deployment you should only enable the receivers you need!
thrift_binary: #
thrift_compact:
otlp:
protocols:
http:
grpc: # This example repository only utilises the OTLP gRPC receiver on port 4317.
zipkin: # Receive trace data in any supported Zipkin format.

# The ingester receives data from the distributor and processes it into indices and blocks.
ingester:
trace_idle_period: 10s # The length of time after a trace has not received spans to consider it complete and flush it.
max_block_bytes: 1_000_000 # Cut the head block when it hits this size or
max_block_duration: 5m # this much time passes

# The compactor block configures the compactor responsible for compacting TSDB blocks.
compactor:
compaction:
compaction_window: 1h # Blocks in this time window will be compacted together.
max_block_bytes: 100_000_000 # Maximum size of a compacted block.
block_retention: 1h # How long to keep blocks. Default is 14 days, this demo system is short-lived.
compacted_block_retention: 10m # How long to keep compacted blocks stored elsewhere.

# Configuration block to determine where to store TSDB blocks.
storage:
trace:
backend: local # Use the local filesystem for block storage. Not recommended for production systems.
block:
bloom_filter_false_positive: .05 # Bloom filter false positive rate. lower values create larger filters but fewer false positives.
# Write Ahead Log (WAL) configuration.
wal:
path: /tmp/tempo/wal # Directory to store the the WAL locally.
# Local configuration for filesystem storage.
local:
path: /tmp/tempo/blocks # Directory to store the TSDB blocks.
# Pool used for finding trace IDs.
pool:
max_workers: 100 # Worker pool determines the number of parallel requests to the object store backend.
queue_depth: 10000 # Maximum depth for the querier queue jobs. A job is required for each block searched.

# Configures the metrics generator component of Tempo.
metrics_generator:
# Specifies which processors to use.
processor:
# Span metrics create metrics based on span type, duration, name and service.
span_metrics:
# Configure extra dimensions to add as metric labels.
dimensions:
- http.method
- http.target
- http.status_code
- service.version
# Service graph metrics create node and edge metrics for determinng service interactions.
service_graphs:
# Configure extra dimensions to add as metric labels.
dimensions:
- http.method
- http.target
- http.status_code
- service.version
# Configure the local blocks processor.
local_blocks:
# Ensure that metrics blocks are flushed to storage so TraceQL metrics queries against historical data.
flush_to_storage: true
# The registry configuration determines how to process metrics.
registry:
collection_interval: 5s
external_labels:
source: tempo
storage:
path: /tmp/tempo/generator/wal
traces_storage:
path: /tmp/tempo/generator/traces

# Global override configuration.
overrides:
metrics_generator_processors: ['service-graphs', 'span-metrics','local-blocks'] # The types of metrics generation to enable for each tenant.
27 changes: 25 additions & 2 deletions github-repository-creation-guide.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# GitHub repository creation guide
# GitHub Repository Creation Guide

When creating a GitHub repository you need to follow a few organizational
standardization rules :
Expand All @@ -11,6 +11,7 @@ standardization rules :
- Add a description.
- Create a LICENSE.md file.
- Protect your branches.
- Enable secret scanning and push protection.

## How to Create a LICENSE.md File

Expand All @@ -26,7 +27,7 @@ License](https://github.com/ai-cfia/devops/assets/9827730/f7d4576f-1a3e-4a95-98e

4. Click on "Review and submit" to create your file.

## Why you need to protect your branches
## Why You Need to Protect Your Branches

Protecting branches in GitHub is essential for maintaining code integrity,
ensuring a consistent commit history, and safeguarding against disruptive
Expand All @@ -49,3 +50,25 @@ Tab](https://github.com/ai-cfia/devops/assets/9827730/5b5d85ef-5713-4c60-a519-66
number of required approvals.
- ![Branch Protection
Settings](https://github.com/ai-cfia/devops/assets/9827730/fe2a4a22-19af-4f3b-96e1-03095c26ddeb)

## How to Enable Secret Scanning and Push Protection

Enabling secret scanning and push protection in GitHub repositories helps
prevent sensitive information, such as API keys, passwords, and tokens, from
being inadvertently exposed in your codebase. Secret scanning detects exposed
secrets, while push protection actively blocks commits containing known secrets.
If a secret is leaked, repository administrators receive an alert.

To enable these functionalities:

1. From the main page of your repository, click **Settings**. ![Repository
Settings](./images/{186D1DE0-B70F-4DAA-8267-D8029BB90F66}.png)

1. In the sidebar, scroll down to the **Security** section and click **Code security**.

1. Scroll down to the **Secret scanning** section and click **Enable**. ![Enable
Secret scanning](./images/{88B79545-E575-41D6-AAB5-EBD53195E25F}.png)

1. After enabling Secret scanning, the option to enable Push protection will
appear. Click **Enable**. ![Enable Push
protection](./images/{EE4585DB-1219-43A3-BDF0-B8E6F0ADCEDB}.png)
Binary file added images/{186D1DE0-B70F-4DAA-8267-D8029BB90F66}.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/{2E6531D8-47AA-4584-BA62-07F41E068DA3}.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/{88B79545-E575-41D6-AAB5-EBD53195E25F}.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/{EE4585DB-1219-43A3-BDF0-B8E6F0ADCEDB}.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit 13eff20

Please sign in to comment.